KVM: x86/mmu: remove "bool base_only" arguments
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17
18 #include "irq.h"
19 #include "ioapic.h"
20 #include "mmu.h"
21 #include "mmu_internal.h"
22 #include "tdp_mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "kvm_emulate.h"
26 #include "cpuid.h"
27 #include "spte.h"
28
29 #include <linux/kvm_host.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/mm.h>
33 #include <linux/highmem.h>
34 #include <linux/moduleparam.h>
35 #include <linux/export.h>
36 #include <linux/swap.h>
37 #include <linux/hugetlb.h>
38 #include <linux/compiler.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/sched/signal.h>
42 #include <linux/uaccess.h>
43 #include <linux/hash.h>
44 #include <linux/kern_levels.h>
45 #include <linux/kthread.h>
46
47 #include <asm/page.h>
48 #include <asm/memtype.h>
49 #include <asm/cmpxchg.h>
50 #include <asm/io.h>
51 #include <asm/set_memory.h>
52 #include <asm/vmx.h>
53 #include <asm/kvm_page_track.h>
54 #include "trace.h"
55
56 #include "paging.h"
57
58 extern bool itlb_multihit_kvm_mitigation;
59
60 int __read_mostly nx_huge_pages = -1;
61 static uint __read_mostly nx_huge_pages_recovery_period_ms;
62 #ifdef CONFIG_PREEMPT_RT
63 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
64 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
65 #else
66 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
67 #endif
68
69 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
70 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
71
72 static const struct kernel_param_ops nx_huge_pages_ops = {
73         .set = set_nx_huge_pages,
74         .get = param_get_bool,
75 };
76
77 static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
78         .set = set_nx_huge_pages_recovery_param,
79         .get = param_get_uint,
80 };
81
82 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
83 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
84 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
85                 &nx_huge_pages_recovery_ratio, 0644);
86 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
87 module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
88                 &nx_huge_pages_recovery_period_ms, 0644);
89 __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
90
91 static bool __read_mostly force_flush_and_sync_on_reuse;
92 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
93
94 /*
95  * When setting this variable to true it enables Two-Dimensional-Paging
96  * where the hardware walks 2 page tables:
97  * 1. the guest-virtual to guest-physical
98  * 2. while doing 1. it walks guest-physical to host-physical
99  * If the hardware supports that we don't need to do shadow paging.
100  */
101 bool tdp_enabled = false;
102
103 static int max_huge_page_level __read_mostly;
104 static int tdp_root_level __read_mostly;
105 static int max_tdp_level __read_mostly;
106
107 #ifdef MMU_DEBUG
108 bool dbg = 0;
109 module_param(dbg, bool, 0644);
110 #endif
111
112 #define PTE_PREFETCH_NUM                8
113
114 #define PT32_LEVEL_BITS 10
115
116 #define PT32_LEVEL_SHIFT(level) \
117                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
118
119 #define PT32_LVL_OFFSET_MASK(level) \
120         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
121                                                 * PT32_LEVEL_BITS))) - 1))
122
123 #define PT32_INDEX(address, level)\
124         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
125
126
127 #define PT32_BASE_ADDR_MASK PAGE_MASK
128 #define PT32_DIR_BASE_ADDR_MASK \
129         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
130 #define PT32_LVL_ADDR_MASK(level) \
131         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
132                                             * PT32_LEVEL_BITS))) - 1))
133
134 #include <trace/events/kvm.h>
135
136 /* make pte_list_desc fit well in cache lines */
137 #define PTE_LIST_EXT 14
138
139 /*
140  * Slight optimization of cacheline layout, by putting `more' and `spte_count'
141  * at the start; then accessing it will only use one single cacheline for
142  * either full (entries==PTE_LIST_EXT) case or entries<=6.
143  */
144 struct pte_list_desc {
145         struct pte_list_desc *more;
146         /*
147          * Stores number of entries stored in the pte_list_desc.  No need to be
148          * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
149          */
150         u64 spte_count;
151         u64 *sptes[PTE_LIST_EXT];
152 };
153
154 struct kvm_shadow_walk_iterator {
155         u64 addr;
156         hpa_t shadow_addr;
157         u64 *sptep;
158         int level;
159         unsigned index;
160 };
161
162 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
163         for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
164                                          (_root), (_addr));                \
165              shadow_walk_okay(&(_walker));                                 \
166              shadow_walk_next(&(_walker)))
167
168 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
169         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
170              shadow_walk_okay(&(_walker));                      \
171              shadow_walk_next(&(_walker)))
172
173 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)     \
174         for (shadow_walk_init(&(_walker), _vcpu, _addr);                \
175              shadow_walk_okay(&(_walker)) &&                            \
176                 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });  \
177              __shadow_walk_next(&(_walker), spte))
178
179 static struct kmem_cache *pte_list_desc_cache;
180 struct kmem_cache *mmu_page_header_cache;
181 static struct percpu_counter kvm_total_used_mmu_pages;
182
183 static void mmu_spte_set(u64 *sptep, u64 spte);
184
185 struct kvm_mmu_role_regs {
186         const unsigned long cr0;
187         const unsigned long cr4;
188         const u64 efer;
189 };
190
191 #define CREATE_TRACE_POINTS
192 #include "mmutrace.h"
193
194 /*
195  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
196  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
197  * the single source of truth for the MMU's state.
198  */
199 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)                   \
200 static inline bool __maybe_unused                                       \
201 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs)             \
202 {                                                                       \
203         return !!(regs->reg & flag);                                    \
204 }
205 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
206 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
207 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
208 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
209 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
215
216 /*
217  * The MMU itself (with a valid role) is the single source of truth for the
218  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
219  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
220  * and the vCPU may be incorrect/irrelevant.
221  */
222 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)         \
223 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)        \
224 {                                                               \
225         return !!(mmu->mmu_role. base_or_ext . reg##_##name);   \
226 }
227 BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
228 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
229 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
230 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
231 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
232 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
233 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
234 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
235 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
236
237 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
238 {
239         struct kvm_mmu_role_regs regs = {
240                 .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
241                 .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
242                 .efer = vcpu->arch.efer,
243         };
244
245         return regs;
246 }
247
248 static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs)
249 {
250         if (!____is_cr0_pg(regs))
251                 return 0;
252         else if (____is_efer_lma(regs))
253                 return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
254                                                PT64_ROOT_4LEVEL;
255         else if (____is_cr4_pae(regs))
256                 return PT32E_ROOT_LEVEL;
257         else
258                 return PT32_ROOT_LEVEL;
259 }
260
261 static inline bool kvm_available_flush_tlb_with_range(void)
262 {
263         return kvm_x86_ops.tlb_remote_flush_with_range;
264 }
265
266 static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
267                 struct kvm_tlb_range *range)
268 {
269         int ret = -ENOTSUPP;
270
271         if (range && kvm_x86_ops.tlb_remote_flush_with_range)
272                 ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
273
274         if (ret)
275                 kvm_flush_remote_tlbs(kvm);
276 }
277
278 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
279                 u64 start_gfn, u64 pages)
280 {
281         struct kvm_tlb_range range;
282
283         range.start_gfn = start_gfn;
284         range.pages = pages;
285
286         kvm_flush_remote_tlbs_with_range(kvm, &range);
287 }
288
289 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
290                            unsigned int access)
291 {
292         u64 spte = make_mmio_spte(vcpu, gfn, access);
293
294         trace_mark_mmio_spte(sptep, gfn, spte);
295         mmu_spte_set(sptep, spte);
296 }
297
298 static gfn_t get_mmio_spte_gfn(u64 spte)
299 {
300         u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
301
302         gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
303                & shadow_nonpresent_or_rsvd_mask;
304
305         return gpa >> PAGE_SHIFT;
306 }
307
308 static unsigned get_mmio_spte_access(u64 spte)
309 {
310         return spte & shadow_mmio_access_mask;
311 }
312
313 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
314 {
315         u64 kvm_gen, spte_gen, gen;
316
317         gen = kvm_vcpu_memslots(vcpu)->generation;
318         if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
319                 return false;
320
321         kvm_gen = gen & MMIO_SPTE_GEN_MASK;
322         spte_gen = get_mmio_spte_generation(spte);
323
324         trace_check_mmio_spte(spte, kvm_gen, spte_gen);
325         return likely(kvm_gen == spte_gen);
326 }
327
328 static int is_cpuid_PSE36(void)
329 {
330         return 1;
331 }
332
333 static gfn_t pse36_gfn_delta(u32 gpte)
334 {
335         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
336
337         return (gpte & PT32_DIR_PSE36_MASK) << shift;
338 }
339
340 #ifdef CONFIG_X86_64
341 static void __set_spte(u64 *sptep, u64 spte)
342 {
343         WRITE_ONCE(*sptep, spte);
344 }
345
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
347 {
348         WRITE_ONCE(*sptep, spte);
349 }
350
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
352 {
353         return xchg(sptep, spte);
354 }
355
356 static u64 __get_spte_lockless(u64 *sptep)
357 {
358         return READ_ONCE(*sptep);
359 }
360 #else
361 union split_spte {
362         struct {
363                 u32 spte_low;
364                 u32 spte_high;
365         };
366         u64 spte;
367 };
368
369 static void count_spte_clear(u64 *sptep, u64 spte)
370 {
371         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
372
373         if (is_shadow_present_pte(spte))
374                 return;
375
376         /* Ensure the spte is completely set before we increase the count */
377         smp_wmb();
378         sp->clear_spte_count++;
379 }
380
381 static void __set_spte(u64 *sptep, u64 spte)
382 {
383         union split_spte *ssptep, sspte;
384
385         ssptep = (union split_spte *)sptep;
386         sspte = (union split_spte)spte;
387
388         ssptep->spte_high = sspte.spte_high;
389
390         /*
391          * If we map the spte from nonpresent to present, We should store
392          * the high bits firstly, then set present bit, so cpu can not
393          * fetch this spte while we are setting the spte.
394          */
395         smp_wmb();
396
397         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
398 }
399
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
401 {
402         union split_spte *ssptep, sspte;
403
404         ssptep = (union split_spte *)sptep;
405         sspte = (union split_spte)spte;
406
407         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
408
409         /*
410          * If we map the spte from present to nonpresent, we should clear
411          * present bit firstly to avoid vcpu fetch the old high bits.
412          */
413         smp_wmb();
414
415         ssptep->spte_high = sspte.spte_high;
416         count_spte_clear(sptep, spte);
417 }
418
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
420 {
421         union split_spte *ssptep, sspte, orig;
422
423         ssptep = (union split_spte *)sptep;
424         sspte = (union split_spte)spte;
425
426         /* xchg acts as a barrier before the setting of the high bits */
427         orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
428         orig.spte_high = ssptep->spte_high;
429         ssptep->spte_high = sspte.spte_high;
430         count_spte_clear(sptep, spte);
431
432         return orig.spte;
433 }
434
435 /*
436  * The idea using the light way get the spte on x86_32 guest is from
437  * gup_get_pte (mm/gup.c).
438  *
439  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
440  * coalesces them and we are running out of the MMU lock.  Therefore
441  * we need to protect against in-progress updates of the spte.
442  *
443  * Reading the spte while an update is in progress may get the old value
444  * for the high part of the spte.  The race is fine for a present->non-present
445  * change (because the high part of the spte is ignored for non-present spte),
446  * but for a present->present change we must reread the spte.
447  *
448  * All such changes are done in two steps (present->non-present and
449  * non-present->present), hence it is enough to count the number of
450  * present->non-present updates: if it changed while reading the spte,
451  * we might have hit the race.  This is done using clear_spte_count.
452  */
453 static u64 __get_spte_lockless(u64 *sptep)
454 {
455         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
456         union split_spte spte, *orig = (union split_spte *)sptep;
457         int count;
458
459 retry:
460         count = sp->clear_spte_count;
461         smp_rmb();
462
463         spte.spte_low = orig->spte_low;
464         smp_rmb();
465
466         spte.spte_high = orig->spte_high;
467         smp_rmb();
468
469         if (unlikely(spte.spte_low != orig->spte_low ||
470               count != sp->clear_spte_count))
471                 goto retry;
472
473         return spte.spte;
474 }
475 #endif
476
477 static bool spte_has_volatile_bits(u64 spte)
478 {
479         if (!is_shadow_present_pte(spte))
480                 return false;
481
482         /*
483          * Always atomically update spte if it can be updated
484          * out of mmu-lock, it can ensure dirty bit is not lost,
485          * also, it can help us to get a stable is_writable_pte()
486          * to ensure tlb flush is not missed.
487          */
488         if (spte_can_locklessly_be_made_writable(spte) ||
489             is_access_track_spte(spte))
490                 return true;
491
492         if (spte_ad_enabled(spte)) {
493                 if ((spte & shadow_accessed_mask) == 0 ||
494                     (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
495                         return true;
496         }
497
498         return false;
499 }
500
501 /* Rules for using mmu_spte_set:
502  * Set the sptep from nonpresent to present.
503  * Note: the sptep being assigned *must* be either not present
504  * or in a state where the hardware will not attempt to update
505  * the spte.
506  */
507 static void mmu_spte_set(u64 *sptep, u64 new_spte)
508 {
509         WARN_ON(is_shadow_present_pte(*sptep));
510         __set_spte(sptep, new_spte);
511 }
512
513 /*
514  * Update the SPTE (excluding the PFN), but do not track changes in its
515  * accessed/dirty status.
516  */
517 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
518 {
519         u64 old_spte = *sptep;
520
521         WARN_ON(!is_shadow_present_pte(new_spte));
522         check_spte_writable_invariants(new_spte);
523
524         if (!is_shadow_present_pte(old_spte)) {
525                 mmu_spte_set(sptep, new_spte);
526                 return old_spte;
527         }
528
529         if (!spte_has_volatile_bits(old_spte))
530                 __update_clear_spte_fast(sptep, new_spte);
531         else
532                 old_spte = __update_clear_spte_slow(sptep, new_spte);
533
534         WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
535
536         return old_spte;
537 }
538
539 /* Rules for using mmu_spte_update:
540  * Update the state bits, it means the mapped pfn is not changed.
541  *
542  * Whenever an MMU-writable SPTE is overwritten with a read-only SPTE, remote
543  * TLBs must be flushed. Otherwise rmap_write_protect will find a read-only
544  * spte, even though the writable spte might be cached on a CPU's TLB.
545  *
546  * Returns true if the TLB needs to be flushed
547  */
548 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
549 {
550         bool flush = false;
551         u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
552
553         if (!is_shadow_present_pte(old_spte))
554                 return false;
555
556         /*
557          * For the spte updated out of mmu-lock is safe, since
558          * we always atomically update it, see the comments in
559          * spte_has_volatile_bits().
560          */
561         if (spte_can_locklessly_be_made_writable(old_spte) &&
562               !is_writable_pte(new_spte))
563                 flush = true;
564
565         /*
566          * Flush TLB when accessed/dirty states are changed in the page tables,
567          * to guarantee consistency between TLB and page tables.
568          */
569
570         if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
571                 flush = true;
572                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
573         }
574
575         if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
576                 flush = true;
577                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
578         }
579
580         return flush;
581 }
582
583 /*
584  * Rules for using mmu_spte_clear_track_bits:
585  * It sets the sptep from present to nonpresent, and track the
586  * state bits, it is used to clear the last level sptep.
587  * Returns the old PTE.
588  */
589 static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
590 {
591         kvm_pfn_t pfn;
592         u64 old_spte = *sptep;
593         int level = sptep_to_sp(sptep)->role.level;
594
595         if (!spte_has_volatile_bits(old_spte))
596                 __update_clear_spte_fast(sptep, 0ull);
597         else
598                 old_spte = __update_clear_spte_slow(sptep, 0ull);
599
600         if (!is_shadow_present_pte(old_spte))
601                 return old_spte;
602
603         kvm_update_page_stats(kvm, level, -1);
604
605         pfn = spte_to_pfn(old_spte);
606
607         /*
608          * KVM does not hold the refcount of the page used by
609          * kvm mmu, before reclaiming the page, we should
610          * unmap it from mmu first.
611          */
612         WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
613
614         if (is_accessed_spte(old_spte))
615                 kvm_set_pfn_accessed(pfn);
616
617         if (is_dirty_spte(old_spte))
618                 kvm_set_pfn_dirty(pfn);
619
620         return old_spte;
621 }
622
623 /*
624  * Rules for using mmu_spte_clear_no_track:
625  * Directly clear spte without caring the state bits of sptep,
626  * it is used to set the upper level spte.
627  */
628 static void mmu_spte_clear_no_track(u64 *sptep)
629 {
630         __update_clear_spte_fast(sptep, 0ull);
631 }
632
633 static u64 mmu_spte_get_lockless(u64 *sptep)
634 {
635         return __get_spte_lockless(sptep);
636 }
637
638 /* Returns the Accessed status of the PTE and resets it at the same time. */
639 static bool mmu_spte_age(u64 *sptep)
640 {
641         u64 spte = mmu_spte_get_lockless(sptep);
642
643         if (!is_accessed_spte(spte))
644                 return false;
645
646         if (spte_ad_enabled(spte)) {
647                 clear_bit((ffs(shadow_accessed_mask) - 1),
648                           (unsigned long *)sptep);
649         } else {
650                 /*
651                  * Capture the dirty status of the page, so that it doesn't get
652                  * lost when the SPTE is marked for access tracking.
653                  */
654                 if (is_writable_pte(spte))
655                         kvm_set_pfn_dirty(spte_to_pfn(spte));
656
657                 spte = mark_spte_for_access_track(spte);
658                 mmu_spte_update_no_track(sptep, spte);
659         }
660
661         return true;
662 }
663
664 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
665 {
666         if (is_tdp_mmu(vcpu->arch.mmu)) {
667                 kvm_tdp_mmu_walk_lockless_begin();
668         } else {
669                 /*
670                  * Prevent page table teardown by making any free-er wait during
671                  * kvm_flush_remote_tlbs() IPI to all active vcpus.
672                  */
673                 local_irq_disable();
674
675                 /*
676                  * Make sure a following spte read is not reordered ahead of the write
677                  * to vcpu->mode.
678                  */
679                 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
680         }
681 }
682
683 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
684 {
685         if (is_tdp_mmu(vcpu->arch.mmu)) {
686                 kvm_tdp_mmu_walk_lockless_end();
687         } else {
688                 /*
689                  * Make sure the write to vcpu->mode is not reordered in front of
690                  * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
691                  * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
692                  */
693                 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
694                 local_irq_enable();
695         }
696 }
697
698 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
699 {
700         int r;
701
702         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
703         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
704                                        1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
705         if (r)
706                 return r;
707         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
708                                        PT64_ROOT_MAX_LEVEL);
709         if (r)
710                 return r;
711         if (maybe_indirect) {
712                 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
713                                                PT64_ROOT_MAX_LEVEL);
714                 if (r)
715                         return r;
716         }
717         return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
718                                           PT64_ROOT_MAX_LEVEL);
719 }
720
721 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
722 {
723         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
724         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
725         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
726         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
727 }
728
729 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
730 {
731         return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
732 }
733
734 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
735 {
736         kmem_cache_free(pte_list_desc_cache, pte_list_desc);
737 }
738
739 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
740 {
741         if (!sp->role.direct)
742                 return sp->gfns[index];
743
744         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
745 }
746
747 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
748 {
749         if (!sp->role.direct) {
750                 sp->gfns[index] = gfn;
751                 return;
752         }
753
754         if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
755                 pr_err_ratelimited("gfn mismatch under direct page %llx "
756                                    "(expected %llx, got %llx)\n",
757                                    sp->gfn,
758                                    kvm_mmu_page_get_gfn(sp, index), gfn);
759 }
760
761 /*
762  * Return the pointer to the large page information for a given gfn,
763  * handling slots that are not large page aligned.
764  */
765 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
766                 const struct kvm_memory_slot *slot, int level)
767 {
768         unsigned long idx;
769
770         idx = gfn_to_index(gfn, slot->base_gfn, level);
771         return &slot->arch.lpage_info[level - 2][idx];
772 }
773
774 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
775                                             gfn_t gfn, int count)
776 {
777         struct kvm_lpage_info *linfo;
778         int i;
779
780         for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
781                 linfo = lpage_info_slot(gfn, slot, i);
782                 linfo->disallow_lpage += count;
783                 WARN_ON(linfo->disallow_lpage < 0);
784         }
785 }
786
787 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
788 {
789         update_gfn_disallow_lpage_count(slot, gfn, 1);
790 }
791
792 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
793 {
794         update_gfn_disallow_lpage_count(slot, gfn, -1);
795 }
796
797 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
798 {
799         struct kvm_memslots *slots;
800         struct kvm_memory_slot *slot;
801         gfn_t gfn;
802
803         kvm->arch.indirect_shadow_pages++;
804         gfn = sp->gfn;
805         slots = kvm_memslots_for_spte_role(kvm, sp->role);
806         slot = __gfn_to_memslot(slots, gfn);
807
808         /* the non-leaf shadow pages are keeping readonly. */
809         if (sp->role.level > PG_LEVEL_4K)
810                 return kvm_slot_page_track_add_page(kvm, slot, gfn,
811                                                     KVM_PAGE_TRACK_WRITE);
812
813         kvm_mmu_gfn_disallow_lpage(slot, gfn);
814 }
815
816 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
817 {
818         if (sp->lpage_disallowed)
819                 return;
820
821         ++kvm->stat.nx_lpage_splits;
822         list_add_tail(&sp->lpage_disallowed_link,
823                       &kvm->arch.lpage_disallowed_mmu_pages);
824         sp->lpage_disallowed = true;
825 }
826
827 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
828 {
829         struct kvm_memslots *slots;
830         struct kvm_memory_slot *slot;
831         gfn_t gfn;
832
833         kvm->arch.indirect_shadow_pages--;
834         gfn = sp->gfn;
835         slots = kvm_memslots_for_spte_role(kvm, sp->role);
836         slot = __gfn_to_memslot(slots, gfn);
837         if (sp->role.level > PG_LEVEL_4K)
838                 return kvm_slot_page_track_remove_page(kvm, slot, gfn,
839                                                        KVM_PAGE_TRACK_WRITE);
840
841         kvm_mmu_gfn_allow_lpage(slot, gfn);
842 }
843
844 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
845 {
846         --kvm->stat.nx_lpage_splits;
847         sp->lpage_disallowed = false;
848         list_del(&sp->lpage_disallowed_link);
849 }
850
851 static struct kvm_memory_slot *
852 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
853                             bool no_dirty_log)
854 {
855         struct kvm_memory_slot *slot;
856
857         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
858         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
859                 return NULL;
860         if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
861                 return NULL;
862
863         return slot;
864 }
865
866 /*
867  * About rmap_head encoding:
868  *
869  * If the bit zero of rmap_head->val is clear, then it points to the only spte
870  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
871  * pte_list_desc containing more mappings.
872  */
873
874 /*
875  * Returns the number of pointers in the rmap chain, not counting the new one.
876  */
877 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
878                         struct kvm_rmap_head *rmap_head)
879 {
880         struct pte_list_desc *desc;
881         int count = 0;
882
883         if (!rmap_head->val) {
884                 rmap_printk("%p %llx 0->1\n", spte, *spte);
885                 rmap_head->val = (unsigned long)spte;
886         } else if (!(rmap_head->val & 1)) {
887                 rmap_printk("%p %llx 1->many\n", spte, *spte);
888                 desc = mmu_alloc_pte_list_desc(vcpu);
889                 desc->sptes[0] = (u64 *)rmap_head->val;
890                 desc->sptes[1] = spte;
891                 desc->spte_count = 2;
892                 rmap_head->val = (unsigned long)desc | 1;
893                 ++count;
894         } else {
895                 rmap_printk("%p %llx many->many\n", spte, *spte);
896                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
897                 while (desc->spte_count == PTE_LIST_EXT) {
898                         count += PTE_LIST_EXT;
899                         if (!desc->more) {
900                                 desc->more = mmu_alloc_pte_list_desc(vcpu);
901                                 desc = desc->more;
902                                 desc->spte_count = 0;
903                                 break;
904                         }
905                         desc = desc->more;
906                 }
907                 count += desc->spte_count;
908                 desc->sptes[desc->spte_count++] = spte;
909         }
910         return count;
911 }
912
913 static void
914 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
915                            struct pte_list_desc *desc, int i,
916                            struct pte_list_desc *prev_desc)
917 {
918         int j = desc->spte_count - 1;
919
920         desc->sptes[i] = desc->sptes[j];
921         desc->sptes[j] = NULL;
922         desc->spte_count--;
923         if (desc->spte_count)
924                 return;
925         if (!prev_desc && !desc->more)
926                 rmap_head->val = 0;
927         else
928                 if (prev_desc)
929                         prev_desc->more = desc->more;
930                 else
931                         rmap_head->val = (unsigned long)desc->more | 1;
932         mmu_free_pte_list_desc(desc);
933 }
934
935 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
936 {
937         struct pte_list_desc *desc;
938         struct pte_list_desc *prev_desc;
939         int i;
940
941         if (!rmap_head->val) {
942                 pr_err("%s: %p 0->BUG\n", __func__, spte);
943                 BUG();
944         } else if (!(rmap_head->val & 1)) {
945                 rmap_printk("%p 1->0\n", spte);
946                 if ((u64 *)rmap_head->val != spte) {
947                         pr_err("%s:  %p 1->BUG\n", __func__, spte);
948                         BUG();
949                 }
950                 rmap_head->val = 0;
951         } else {
952                 rmap_printk("%p many->many\n", spte);
953                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
954                 prev_desc = NULL;
955                 while (desc) {
956                         for (i = 0; i < desc->spte_count; ++i) {
957                                 if (desc->sptes[i] == spte) {
958                                         pte_list_desc_remove_entry(rmap_head,
959                                                         desc, i, prev_desc);
960                                         return;
961                                 }
962                         }
963                         prev_desc = desc;
964                         desc = desc->more;
965                 }
966                 pr_err("%s: %p many->many\n", __func__, spte);
967                 BUG();
968         }
969 }
970
971 static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
972                             u64 *sptep)
973 {
974         mmu_spte_clear_track_bits(kvm, sptep);
975         __pte_list_remove(sptep, rmap_head);
976 }
977
978 /* Return true if rmap existed, false otherwise */
979 static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
980 {
981         struct pte_list_desc *desc, *next;
982         int i;
983
984         if (!rmap_head->val)
985                 return false;
986
987         if (!(rmap_head->val & 1)) {
988                 mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
989                 goto out;
990         }
991
992         desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
993
994         for (; desc; desc = next) {
995                 for (i = 0; i < desc->spte_count; i++)
996                         mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
997                 next = desc->more;
998                 mmu_free_pte_list_desc(desc);
999         }
1000 out:
1001         /* rmap_head is meaningless now, remember to reset it */
1002         rmap_head->val = 0;
1003         return true;
1004 }
1005
1006 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1007 {
1008         struct pte_list_desc *desc;
1009         unsigned int count = 0;
1010
1011         if (!rmap_head->val)
1012                 return 0;
1013         else if (!(rmap_head->val & 1))
1014                 return 1;
1015
1016         desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1017
1018         while (desc) {
1019                 count += desc->spte_count;
1020                 desc = desc->more;
1021         }
1022
1023         return count;
1024 }
1025
1026 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1027                                          const struct kvm_memory_slot *slot)
1028 {
1029         unsigned long idx;
1030
1031         idx = gfn_to_index(gfn, slot->base_gfn, level);
1032         return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1033 }
1034
1035 static bool rmap_can_add(struct kvm_vcpu *vcpu)
1036 {
1037         struct kvm_mmu_memory_cache *mc;
1038
1039         mc = &vcpu->arch.mmu_pte_list_desc_cache;
1040         return kvm_mmu_memory_cache_nr_free_objects(mc);
1041 }
1042
1043 static void rmap_remove(struct kvm *kvm, u64 *spte)
1044 {
1045         struct kvm_memslots *slots;
1046         struct kvm_memory_slot *slot;
1047         struct kvm_mmu_page *sp;
1048         gfn_t gfn;
1049         struct kvm_rmap_head *rmap_head;
1050
1051         sp = sptep_to_sp(spte);
1052         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1053
1054         /*
1055          * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1056          * so we have to determine which memslots to use based on context
1057          * information in sp->role.
1058          */
1059         slots = kvm_memslots_for_spte_role(kvm, sp->role);
1060
1061         slot = __gfn_to_memslot(slots, gfn);
1062         rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1063
1064         __pte_list_remove(spte, rmap_head);
1065 }
1066
1067 /*
1068  * Used by the following functions to iterate through the sptes linked by a
1069  * rmap.  All fields are private and not assumed to be used outside.
1070  */
1071 struct rmap_iterator {
1072         /* private fields */
1073         struct pte_list_desc *desc;     /* holds the sptep if not NULL */
1074         int pos;                        /* index of the sptep */
1075 };
1076
1077 /*
1078  * Iteration must be started by this function.  This should also be used after
1079  * removing/dropping sptes from the rmap link because in such cases the
1080  * information in the iterator may not be valid.
1081  *
1082  * Returns sptep if found, NULL otherwise.
1083  */
1084 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1085                            struct rmap_iterator *iter)
1086 {
1087         u64 *sptep;
1088
1089         if (!rmap_head->val)
1090                 return NULL;
1091
1092         if (!(rmap_head->val & 1)) {
1093                 iter->desc = NULL;
1094                 sptep = (u64 *)rmap_head->val;
1095                 goto out;
1096         }
1097
1098         iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1099         iter->pos = 0;
1100         sptep = iter->desc->sptes[iter->pos];
1101 out:
1102         BUG_ON(!is_shadow_present_pte(*sptep));
1103         return sptep;
1104 }
1105
1106 /*
1107  * Must be used with a valid iterator: e.g. after rmap_get_first().
1108  *
1109  * Returns sptep if found, NULL otherwise.
1110  */
1111 static u64 *rmap_get_next(struct rmap_iterator *iter)
1112 {
1113         u64 *sptep;
1114
1115         if (iter->desc) {
1116                 if (iter->pos < PTE_LIST_EXT - 1) {
1117                         ++iter->pos;
1118                         sptep = iter->desc->sptes[iter->pos];
1119                         if (sptep)
1120                                 goto out;
1121                 }
1122
1123                 iter->desc = iter->desc->more;
1124
1125                 if (iter->desc) {
1126                         iter->pos = 0;
1127                         /* desc->sptes[0] cannot be NULL */
1128                         sptep = iter->desc->sptes[iter->pos];
1129                         goto out;
1130                 }
1131         }
1132
1133         return NULL;
1134 out:
1135         BUG_ON(!is_shadow_present_pte(*sptep));
1136         return sptep;
1137 }
1138
1139 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)                 \
1140         for (_spte_ = rmap_get_first(_rmap_head_, _iter_);              \
1141              _spte_; _spte_ = rmap_get_next(_iter_))
1142
1143 static void drop_spte(struct kvm *kvm, u64 *sptep)
1144 {
1145         u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1146
1147         if (is_shadow_present_pte(old_spte))
1148                 rmap_remove(kvm, sptep);
1149 }
1150
1151
1152 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1153 {
1154         if (is_large_pte(*sptep)) {
1155                 WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1156                 drop_spte(kvm, sptep);
1157                 return true;
1158         }
1159
1160         return false;
1161 }
1162
1163 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1164 {
1165         if (__drop_large_spte(vcpu->kvm, sptep)) {
1166                 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1167
1168                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1169                         KVM_PAGES_PER_HPAGE(sp->role.level));
1170         }
1171 }
1172
1173 /*
1174  * Write-protect on the specified @sptep, @pt_protect indicates whether
1175  * spte write-protection is caused by protecting shadow page table.
1176  *
1177  * Note: write protection is difference between dirty logging and spte
1178  * protection:
1179  * - for dirty logging, the spte can be set to writable at anytime if
1180  *   its dirty bitmap is properly set.
1181  * - for spte protection, the spte can be writable only after unsync-ing
1182  *   shadow page.
1183  *
1184  * Return true if tlb need be flushed.
1185  */
1186 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1187 {
1188         u64 spte = *sptep;
1189
1190         if (!is_writable_pte(spte) &&
1191               !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1192                 return false;
1193
1194         rmap_printk("spte %p %llx\n", sptep, *sptep);
1195
1196         if (pt_protect)
1197                 spte &= ~shadow_mmu_writable_mask;
1198         spte = spte & ~PT_WRITABLE_MASK;
1199
1200         return mmu_spte_update(sptep, spte);
1201 }
1202
1203 static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
1204                                bool pt_protect)
1205 {
1206         u64 *sptep;
1207         struct rmap_iterator iter;
1208         bool flush = false;
1209
1210         for_each_rmap_spte(rmap_head, &iter, sptep)
1211                 flush |= spte_write_protect(sptep, pt_protect);
1212
1213         return flush;
1214 }
1215
1216 static bool spte_clear_dirty(u64 *sptep)
1217 {
1218         u64 spte = *sptep;
1219
1220         rmap_printk("spte %p %llx\n", sptep, *sptep);
1221
1222         MMU_WARN_ON(!spte_ad_enabled(spte));
1223         spte &= ~shadow_dirty_mask;
1224         return mmu_spte_update(sptep, spte);
1225 }
1226
1227 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1228 {
1229         bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1230                                                (unsigned long *)sptep);
1231         if (was_writable && !spte_ad_enabled(*sptep))
1232                 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1233
1234         return was_writable;
1235 }
1236
1237 /*
1238  * Gets the GFN ready for another round of dirty logging by clearing the
1239  *      - D bit on ad-enabled SPTEs, and
1240  *      - W bit on ad-disabled SPTEs.
1241  * Returns true iff any D or W bits were cleared.
1242  */
1243 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1244                                const struct kvm_memory_slot *slot)
1245 {
1246         u64 *sptep;
1247         struct rmap_iterator iter;
1248         bool flush = false;
1249
1250         for_each_rmap_spte(rmap_head, &iter, sptep)
1251                 if (spte_ad_need_write_protect(*sptep))
1252                         flush |= spte_wrprot_for_clear_dirty(sptep);
1253                 else
1254                         flush |= spte_clear_dirty(sptep);
1255
1256         return flush;
1257 }
1258
1259 /**
1260  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1261  * @kvm: kvm instance
1262  * @slot: slot to protect
1263  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1264  * @mask: indicates which pages we should protect
1265  *
1266  * Used when we do not need to care about huge page mappings.
1267  */
1268 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1269                                      struct kvm_memory_slot *slot,
1270                                      gfn_t gfn_offset, unsigned long mask)
1271 {
1272         struct kvm_rmap_head *rmap_head;
1273
1274         if (is_tdp_mmu_enabled(kvm))
1275                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1276                                 slot->base_gfn + gfn_offset, mask, true);
1277
1278         if (!kvm_memslots_have_rmaps(kvm))
1279                 return;
1280
1281         while (mask) {
1282                 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1283                                         PG_LEVEL_4K, slot);
1284                 rmap_write_protect(rmap_head, false);
1285
1286                 /* clear the first set bit */
1287                 mask &= mask - 1;
1288         }
1289 }
1290
1291 /**
1292  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1293  * protect the page if the D-bit isn't supported.
1294  * @kvm: kvm instance
1295  * @slot: slot to clear D-bit
1296  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1297  * @mask: indicates which pages we should clear D-bit
1298  *
1299  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1300  */
1301 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1302                                          struct kvm_memory_slot *slot,
1303                                          gfn_t gfn_offset, unsigned long mask)
1304 {
1305         struct kvm_rmap_head *rmap_head;
1306
1307         if (is_tdp_mmu_enabled(kvm))
1308                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1309                                 slot->base_gfn + gfn_offset, mask, false);
1310
1311         if (!kvm_memslots_have_rmaps(kvm))
1312                 return;
1313
1314         while (mask) {
1315                 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1316                                         PG_LEVEL_4K, slot);
1317                 __rmap_clear_dirty(kvm, rmap_head, slot);
1318
1319                 /* clear the first set bit */
1320                 mask &= mask - 1;
1321         }
1322 }
1323
1324 /**
1325  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1326  * PT level pages.
1327  *
1328  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1329  * enable dirty logging for them.
1330  *
1331  * We need to care about huge page mappings: e.g. during dirty logging we may
1332  * have such mappings.
1333  */
1334 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1335                                 struct kvm_memory_slot *slot,
1336                                 gfn_t gfn_offset, unsigned long mask)
1337 {
1338         /*
1339          * Huge pages are NOT write protected when we start dirty logging in
1340          * initially-all-set mode; must write protect them here so that they
1341          * are split to 4K on the first write.
1342          *
1343          * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1344          * of memslot has no such restriction, so the range can cross two large
1345          * pages.
1346          */
1347         if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1348                 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1349                 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1350
1351                 if (READ_ONCE(eager_page_split))
1352                         kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K);
1353
1354                 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1355
1356                 /* Cross two large pages? */
1357                 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1358                     ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1359                         kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1360                                                        PG_LEVEL_2M);
1361         }
1362
1363         /* Now handle 4K PTEs.  */
1364         if (kvm_x86_ops.cpu_dirty_log_size)
1365                 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1366         else
1367                 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1368 }
1369
1370 int kvm_cpu_dirty_log_size(void)
1371 {
1372         return kvm_x86_ops.cpu_dirty_log_size;
1373 }
1374
1375 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1376                                     struct kvm_memory_slot *slot, u64 gfn,
1377                                     int min_level)
1378 {
1379         struct kvm_rmap_head *rmap_head;
1380         int i;
1381         bool write_protected = false;
1382
1383         if (kvm_memslots_have_rmaps(kvm)) {
1384                 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1385                         rmap_head = gfn_to_rmap(gfn, i, slot);
1386                         write_protected |= rmap_write_protect(rmap_head, true);
1387                 }
1388         }
1389
1390         if (is_tdp_mmu_enabled(kvm))
1391                 write_protected |=
1392                         kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1393
1394         return write_protected;
1395 }
1396
1397 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1398 {
1399         struct kvm_memory_slot *slot;
1400
1401         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1402         return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1403 }
1404
1405 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1406                           const struct kvm_memory_slot *slot)
1407 {
1408         return pte_list_destroy(kvm, rmap_head);
1409 }
1410
1411 static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1412                             struct kvm_memory_slot *slot, gfn_t gfn, int level,
1413                             pte_t unused)
1414 {
1415         return kvm_zap_rmapp(kvm, rmap_head, slot);
1416 }
1417
1418 static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1419                               struct kvm_memory_slot *slot, gfn_t gfn, int level,
1420                               pte_t pte)
1421 {
1422         u64 *sptep;
1423         struct rmap_iterator iter;
1424         bool need_flush = false;
1425         u64 new_spte;
1426         kvm_pfn_t new_pfn;
1427
1428         WARN_ON(pte_huge(pte));
1429         new_pfn = pte_pfn(pte);
1430
1431 restart:
1432         for_each_rmap_spte(rmap_head, &iter, sptep) {
1433                 rmap_printk("spte %p %llx gfn %llx (%d)\n",
1434                             sptep, *sptep, gfn, level);
1435
1436                 need_flush = true;
1437
1438                 if (pte_write(pte)) {
1439                         pte_list_remove(kvm, rmap_head, sptep);
1440                         goto restart;
1441                 } else {
1442                         new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1443                                         *sptep, new_pfn);
1444
1445                         mmu_spte_clear_track_bits(kvm, sptep);
1446                         mmu_spte_set(sptep, new_spte);
1447                 }
1448         }
1449
1450         if (need_flush && kvm_available_flush_tlb_with_range()) {
1451                 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1452                 return false;
1453         }
1454
1455         return need_flush;
1456 }
1457
1458 struct slot_rmap_walk_iterator {
1459         /* input fields. */
1460         const struct kvm_memory_slot *slot;
1461         gfn_t start_gfn;
1462         gfn_t end_gfn;
1463         int start_level;
1464         int end_level;
1465
1466         /* output fields. */
1467         gfn_t gfn;
1468         struct kvm_rmap_head *rmap;
1469         int level;
1470
1471         /* private field. */
1472         struct kvm_rmap_head *end_rmap;
1473 };
1474
1475 static void
1476 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1477 {
1478         iterator->level = level;
1479         iterator->gfn = iterator->start_gfn;
1480         iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1481         iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1482 }
1483
1484 static void
1485 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1486                     const struct kvm_memory_slot *slot, int start_level,
1487                     int end_level, gfn_t start_gfn, gfn_t end_gfn)
1488 {
1489         iterator->slot = slot;
1490         iterator->start_level = start_level;
1491         iterator->end_level = end_level;
1492         iterator->start_gfn = start_gfn;
1493         iterator->end_gfn = end_gfn;
1494
1495         rmap_walk_init_level(iterator, iterator->start_level);
1496 }
1497
1498 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1499 {
1500         return !!iterator->rmap;
1501 }
1502
1503 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1504 {
1505         if (++iterator->rmap <= iterator->end_rmap) {
1506                 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1507                 return;
1508         }
1509
1510         if (++iterator->level > iterator->end_level) {
1511                 iterator->rmap = NULL;
1512                 return;
1513         }
1514
1515         rmap_walk_init_level(iterator, iterator->level);
1516 }
1517
1518 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,    \
1519            _start_gfn, _end_gfn, _iter_)                                \
1520         for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,         \
1521                                  _end_level_, _start_gfn, _end_gfn);    \
1522              slot_rmap_walk_okay(_iter_);                               \
1523              slot_rmap_walk_next(_iter_))
1524
1525 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1526                                struct kvm_memory_slot *slot, gfn_t gfn,
1527                                int level, pte_t pte);
1528
1529 static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
1530                                                  struct kvm_gfn_range *range,
1531                                                  rmap_handler_t handler)
1532 {
1533         struct slot_rmap_walk_iterator iterator;
1534         bool ret = false;
1535
1536         for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1537                                  range->start, range->end - 1, &iterator)
1538                 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1539                                iterator.level, range->pte);
1540
1541         return ret;
1542 }
1543
1544 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1545 {
1546         bool flush = false;
1547
1548         if (kvm_memslots_have_rmaps(kvm))
1549                 flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1550
1551         if (is_tdp_mmu_enabled(kvm))
1552                 flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1553
1554         return flush;
1555 }
1556
1557 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1558 {
1559         bool flush = false;
1560
1561         if (kvm_memslots_have_rmaps(kvm))
1562                 flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
1563
1564         if (is_tdp_mmu_enabled(kvm))
1565                 flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
1566
1567         return flush;
1568 }
1569
1570 static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1571                           struct kvm_memory_slot *slot, gfn_t gfn, int level,
1572                           pte_t unused)
1573 {
1574         u64 *sptep;
1575         struct rmap_iterator iter;
1576         int young = 0;
1577
1578         for_each_rmap_spte(rmap_head, &iter, sptep)
1579                 young |= mmu_spte_age(sptep);
1580
1581         return young;
1582 }
1583
1584 static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1585                                struct kvm_memory_slot *slot, gfn_t gfn,
1586                                int level, pte_t unused)
1587 {
1588         u64 *sptep;
1589         struct rmap_iterator iter;
1590
1591         for_each_rmap_spte(rmap_head, &iter, sptep)
1592                 if (is_accessed_spte(*sptep))
1593                         return true;
1594         return false;
1595 }
1596
1597 #define RMAP_RECYCLE_THRESHOLD 1000
1598
1599 static void rmap_add(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
1600                      u64 *spte, gfn_t gfn)
1601 {
1602         struct kvm_mmu_page *sp;
1603         struct kvm_rmap_head *rmap_head;
1604         int rmap_count;
1605
1606         sp = sptep_to_sp(spte);
1607         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1608         rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1609         rmap_count = pte_list_add(vcpu, spte, rmap_head);
1610
1611         if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1612                 kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1613                 kvm_flush_remote_tlbs_with_address(
1614                                 vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
1615         }
1616 }
1617
1618 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1619 {
1620         bool young = false;
1621
1622         if (kvm_memslots_have_rmaps(kvm))
1623                 young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
1624
1625         if (is_tdp_mmu_enabled(kvm))
1626                 young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1627
1628         return young;
1629 }
1630
1631 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1632 {
1633         bool young = false;
1634
1635         if (kvm_memslots_have_rmaps(kvm))
1636                 young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
1637
1638         if (is_tdp_mmu_enabled(kvm))
1639                 young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1640
1641         return young;
1642 }
1643
1644 #ifdef MMU_DEBUG
1645 static int is_empty_shadow_page(u64 *spt)
1646 {
1647         u64 *pos;
1648         u64 *end;
1649
1650         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1651                 if (is_shadow_present_pte(*pos)) {
1652                         printk(KERN_ERR "%s: %p %llx\n", __func__,
1653                                pos, *pos);
1654                         return 0;
1655                 }
1656         return 1;
1657 }
1658 #endif
1659
1660 /*
1661  * This value is the sum of all of the kvm instances's
1662  * kvm->arch.n_used_mmu_pages values.  We need a global,
1663  * aggregate version in order to make the slab shrinker
1664  * faster
1665  */
1666 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1667 {
1668         kvm->arch.n_used_mmu_pages += nr;
1669         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1670 }
1671
1672 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1673 {
1674         MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1675         hlist_del(&sp->hash_link);
1676         list_del(&sp->link);
1677         free_page((unsigned long)sp->spt);
1678         if (!sp->role.direct)
1679                 free_page((unsigned long)sp->gfns);
1680         kmem_cache_free(mmu_page_header_cache, sp);
1681 }
1682
1683 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1684 {
1685         return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1686 }
1687
1688 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1689                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1690 {
1691         if (!parent_pte)
1692                 return;
1693
1694         pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1695 }
1696
1697 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1698                                        u64 *parent_pte)
1699 {
1700         __pte_list_remove(parent_pte, &sp->parent_ptes);
1701 }
1702
1703 static void drop_parent_pte(struct kvm_mmu_page *sp,
1704                             u64 *parent_pte)
1705 {
1706         mmu_page_remove_parent_pte(sp, parent_pte);
1707         mmu_spte_clear_no_track(parent_pte);
1708 }
1709
1710 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1711 {
1712         struct kvm_mmu_page *sp;
1713
1714         sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1715         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1716         if (!direct)
1717                 sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1718         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1719
1720         /*
1721          * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1722          * depends on valid pages being added to the head of the list.  See
1723          * comments in kvm_zap_obsolete_pages().
1724          */
1725         sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1726         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1727         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1728         return sp;
1729 }
1730
1731 static void mark_unsync(u64 *spte);
1732 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1733 {
1734         u64 *sptep;
1735         struct rmap_iterator iter;
1736
1737         for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1738                 mark_unsync(sptep);
1739         }
1740 }
1741
1742 static void mark_unsync(u64 *spte)
1743 {
1744         struct kvm_mmu_page *sp;
1745         unsigned int index;
1746
1747         sp = sptep_to_sp(spte);
1748         index = spte - sp->spt;
1749         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1750                 return;
1751         if (sp->unsync_children++)
1752                 return;
1753         kvm_mmu_mark_parents_unsync(sp);
1754 }
1755
1756 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1757                                struct kvm_mmu_page *sp)
1758 {
1759         return -1;
1760 }
1761
1762 #define KVM_PAGE_ARRAY_NR 16
1763
1764 struct kvm_mmu_pages {
1765         struct mmu_page_and_offset {
1766                 struct kvm_mmu_page *sp;
1767                 unsigned int idx;
1768         } page[KVM_PAGE_ARRAY_NR];
1769         unsigned int nr;
1770 };
1771
1772 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1773                          int idx)
1774 {
1775         int i;
1776
1777         if (sp->unsync)
1778                 for (i=0; i < pvec->nr; i++)
1779                         if (pvec->page[i].sp == sp)
1780                                 return 0;
1781
1782         pvec->page[pvec->nr].sp = sp;
1783         pvec->page[pvec->nr].idx = idx;
1784         pvec->nr++;
1785         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1786 }
1787
1788 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1789 {
1790         --sp->unsync_children;
1791         WARN_ON((int)sp->unsync_children < 0);
1792         __clear_bit(idx, sp->unsync_child_bitmap);
1793 }
1794
1795 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1796                            struct kvm_mmu_pages *pvec)
1797 {
1798         int i, ret, nr_unsync_leaf = 0;
1799
1800         for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1801                 struct kvm_mmu_page *child;
1802                 u64 ent = sp->spt[i];
1803
1804                 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1805                         clear_unsync_child_bit(sp, i);
1806                         continue;
1807                 }
1808
1809                 child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1810
1811                 if (child->unsync_children) {
1812                         if (mmu_pages_add(pvec, child, i))
1813                                 return -ENOSPC;
1814
1815                         ret = __mmu_unsync_walk(child, pvec);
1816                         if (!ret) {
1817                                 clear_unsync_child_bit(sp, i);
1818                                 continue;
1819                         } else if (ret > 0) {
1820                                 nr_unsync_leaf += ret;
1821                         } else
1822                                 return ret;
1823                 } else if (child->unsync) {
1824                         nr_unsync_leaf++;
1825                         if (mmu_pages_add(pvec, child, i))
1826                                 return -ENOSPC;
1827                 } else
1828                         clear_unsync_child_bit(sp, i);
1829         }
1830
1831         return nr_unsync_leaf;
1832 }
1833
1834 #define INVALID_INDEX (-1)
1835
1836 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1837                            struct kvm_mmu_pages *pvec)
1838 {
1839         pvec->nr = 0;
1840         if (!sp->unsync_children)
1841                 return 0;
1842
1843         mmu_pages_add(pvec, sp, INVALID_INDEX);
1844         return __mmu_unsync_walk(sp, pvec);
1845 }
1846
1847 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1848 {
1849         WARN_ON(!sp->unsync);
1850         trace_kvm_mmu_sync_page(sp);
1851         sp->unsync = 0;
1852         --kvm->stat.mmu_unsync;
1853 }
1854
1855 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1856                                      struct list_head *invalid_list);
1857 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1858                                     struct list_head *invalid_list);
1859
1860 #define for_each_valid_sp(_kvm, _sp, _list)                             \
1861         hlist_for_each_entry(_sp, _list, hash_link)                     \
1862                 if (is_obsolete_sp((_kvm), (_sp))) {                    \
1863                 } else
1864
1865 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                 \
1866         for_each_valid_sp(_kvm, _sp,                                    \
1867           &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
1868                 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1869
1870 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1871                          struct list_head *invalid_list)
1872 {
1873         int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
1874
1875         if (ret < 0)
1876                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1877         return ret;
1878 }
1879
1880 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1881                                         struct list_head *invalid_list,
1882                                         bool remote_flush)
1883 {
1884         if (!remote_flush && list_empty(invalid_list))
1885                 return false;
1886
1887         if (!list_empty(invalid_list))
1888                 kvm_mmu_commit_zap_page(kvm, invalid_list);
1889         else
1890                 kvm_flush_remote_tlbs(kvm);
1891         return true;
1892 }
1893
1894 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1895 {
1896         if (sp->role.invalid)
1897                 return true;
1898
1899         /* TDP MMU pages due not use the MMU generation. */
1900         return !sp->tdp_mmu_page &&
1901                unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1902 }
1903
1904 struct mmu_page_path {
1905         struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1906         unsigned int idx[PT64_ROOT_MAX_LEVEL];
1907 };
1908
1909 #define for_each_sp(pvec, sp, parents, i)                       \
1910                 for (i = mmu_pages_first(&pvec, &parents);      \
1911                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1912                         i = mmu_pages_next(&pvec, &parents, i))
1913
1914 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1915                           struct mmu_page_path *parents,
1916                           int i)
1917 {
1918         int n;
1919
1920         for (n = i+1; n < pvec->nr; n++) {
1921                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1922                 unsigned idx = pvec->page[n].idx;
1923                 int level = sp->role.level;
1924
1925                 parents->idx[level-1] = idx;
1926                 if (level == PG_LEVEL_4K)
1927                         break;
1928
1929                 parents->parent[level-2] = sp;
1930         }
1931
1932         return n;
1933 }
1934
1935 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1936                            struct mmu_page_path *parents)
1937 {
1938         struct kvm_mmu_page *sp;
1939         int level;
1940
1941         if (pvec->nr == 0)
1942                 return 0;
1943
1944         WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1945
1946         sp = pvec->page[0].sp;
1947         level = sp->role.level;
1948         WARN_ON(level == PG_LEVEL_4K);
1949
1950         parents->parent[level-2] = sp;
1951
1952         /* Also set up a sentinel.  Further entries in pvec are all
1953          * children of sp, so this element is never overwritten.
1954          */
1955         parents->parent[level-1] = NULL;
1956         return mmu_pages_next(pvec, parents, 0);
1957 }
1958
1959 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1960 {
1961         struct kvm_mmu_page *sp;
1962         unsigned int level = 0;
1963
1964         do {
1965                 unsigned int idx = parents->idx[level];
1966                 sp = parents->parent[level];
1967                 if (!sp)
1968                         return;
1969
1970                 WARN_ON(idx == INVALID_INDEX);
1971                 clear_unsync_child_bit(sp, idx);
1972                 level++;
1973         } while (!sp->unsync_children);
1974 }
1975
1976 static int mmu_sync_children(struct kvm_vcpu *vcpu,
1977                              struct kvm_mmu_page *parent, bool can_yield)
1978 {
1979         int i;
1980         struct kvm_mmu_page *sp;
1981         struct mmu_page_path parents;
1982         struct kvm_mmu_pages pages;
1983         LIST_HEAD(invalid_list);
1984         bool flush = false;
1985
1986         while (mmu_unsync_walk(parent, &pages)) {
1987                 bool protected = false;
1988
1989                 for_each_sp(pages, sp, parents, i)
1990                         protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
1991
1992                 if (protected) {
1993                         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
1994                         flush = false;
1995                 }
1996
1997                 for_each_sp(pages, sp, parents, i) {
1998                         kvm_unlink_unsync_page(vcpu->kvm, sp);
1999                         flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2000                         mmu_pages_clear_parents(&parents);
2001                 }
2002                 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2003                         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2004                         if (!can_yield) {
2005                                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2006                                 return -EINTR;
2007                         }
2008
2009                         cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2010                         flush = false;
2011                 }
2012         }
2013
2014         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2015         return 0;
2016 }
2017
2018 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2019 {
2020         atomic_set(&sp->write_flooding_count,  0);
2021 }
2022
2023 static void clear_sp_write_flooding_count(u64 *spte)
2024 {
2025         __clear_sp_write_flooding_count(sptep_to_sp(spte));
2026 }
2027
2028 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2029                                              gfn_t gfn,
2030                                              gva_t gaddr,
2031                                              unsigned level,
2032                                              int direct,
2033                                              unsigned int access)
2034 {
2035         bool direct_mmu = vcpu->arch.mmu->direct_map;
2036         union kvm_mmu_page_role role;
2037         struct hlist_head *sp_list;
2038         unsigned quadrant;
2039         struct kvm_mmu_page *sp;
2040         int ret;
2041         int collisions = 0;
2042         LIST_HEAD(invalid_list);
2043
2044         role = vcpu->arch.mmu->mmu_role.base;
2045         role.level = level;
2046         role.direct = direct;
2047         role.access = access;
2048         if (role.has_4_byte_gpte) {
2049                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2050                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2051                 role.quadrant = quadrant;
2052         }
2053
2054         sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2055         for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2056                 if (sp->gfn != gfn) {
2057                         collisions++;
2058                         continue;
2059                 }
2060
2061                 if (sp->role.word != role.word) {
2062                         /*
2063                          * If the guest is creating an upper-level page, zap
2064                          * unsync pages for the same gfn.  While it's possible
2065                          * the guest is using recursive page tables, in all
2066                          * likelihood the guest has stopped using the unsync
2067                          * page and is installing a completely unrelated page.
2068                          * Unsync pages must not be left as is, because the new
2069                          * upper-level page will be write-protected.
2070                          */
2071                         if (level > PG_LEVEL_4K && sp->unsync)
2072                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2073                                                          &invalid_list);
2074                         continue;
2075                 }
2076
2077                 if (direct_mmu)
2078                         goto trace_get_page;
2079
2080                 if (sp->unsync) {
2081                         /*
2082                          * The page is good, but is stale.  kvm_sync_page does
2083                          * get the latest guest state, but (unlike mmu_unsync_children)
2084                          * it doesn't write-protect the page or mark it synchronized!
2085                          * This way the validity of the mapping is ensured, but the
2086                          * overhead of write protection is not incurred until the
2087                          * guest invalidates the TLB mapping.  This allows multiple
2088                          * SPs for a single gfn to be unsync.
2089                          *
2090                          * If the sync fails, the page is zapped.  If so, break
2091                          * in order to rebuild it.
2092                          */
2093                         ret = kvm_sync_page(vcpu, sp, &invalid_list);
2094                         if (ret < 0)
2095                                 break;
2096
2097                         WARN_ON(!list_empty(&invalid_list));
2098                         if (ret > 0)
2099                                 kvm_flush_remote_tlbs(vcpu->kvm);
2100                 }
2101
2102                 __clear_sp_write_flooding_count(sp);
2103
2104 trace_get_page:
2105                 trace_kvm_mmu_get_page(sp, false);
2106                 goto out;
2107         }
2108
2109         ++vcpu->kvm->stat.mmu_cache_miss;
2110
2111         sp = kvm_mmu_alloc_page(vcpu, direct);
2112
2113         sp->gfn = gfn;
2114         sp->role = role;
2115         hlist_add_head(&sp->hash_link, sp_list);
2116         if (!direct) {
2117                 account_shadowed(vcpu->kvm, sp);
2118                 if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
2119                         kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2120         }
2121         trace_kvm_mmu_get_page(sp, true);
2122 out:
2123         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2124
2125         if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2126                 vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2127         return sp;
2128 }
2129
2130 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2131                                         struct kvm_vcpu *vcpu, hpa_t root,
2132                                         u64 addr)
2133 {
2134         iterator->addr = addr;
2135         iterator->shadow_addr = root;
2136         iterator->level = vcpu->arch.mmu->shadow_root_level;
2137
2138         if (iterator->level >= PT64_ROOT_4LEVEL &&
2139             vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2140             !vcpu->arch.mmu->direct_map)
2141                 iterator->level = PT32E_ROOT_LEVEL;
2142
2143         if (iterator->level == PT32E_ROOT_LEVEL) {
2144                 /*
2145                  * prev_root is currently only used for 64-bit hosts. So only
2146                  * the active root_hpa is valid here.
2147                  */
2148                 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2149
2150                 iterator->shadow_addr
2151                         = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2152                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2153                 --iterator->level;
2154                 if (!iterator->shadow_addr)
2155                         iterator->level = 0;
2156         }
2157 }
2158
2159 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2160                              struct kvm_vcpu *vcpu, u64 addr)
2161 {
2162         shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2163                                     addr);
2164 }
2165
2166 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2167 {
2168         if (iterator->level < PG_LEVEL_4K)
2169                 return false;
2170
2171         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2172         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2173         return true;
2174 }
2175
2176 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2177                                u64 spte)
2178 {
2179         if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2180                 iterator->level = 0;
2181                 return;
2182         }
2183
2184         iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2185         --iterator->level;
2186 }
2187
2188 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2189 {
2190         __shadow_walk_next(iterator, *iterator->sptep);
2191 }
2192
2193 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2194                              struct kvm_mmu_page *sp)
2195 {
2196         u64 spte;
2197
2198         BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2199
2200         spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2201
2202         mmu_spte_set(sptep, spte);
2203
2204         mmu_page_add_parent_pte(vcpu, sp, sptep);
2205
2206         if (sp->unsync_children || sp->unsync)
2207                 mark_unsync(sptep);
2208 }
2209
2210 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2211                                    unsigned direct_access)
2212 {
2213         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2214                 struct kvm_mmu_page *child;
2215
2216                 /*
2217                  * For the direct sp, if the guest pte's dirty bit
2218                  * changed form clean to dirty, it will corrupt the
2219                  * sp's access: allow writable in the read-only sp,
2220                  * so we should update the spte at this point to get
2221                  * a new sp with the correct access.
2222                  */
2223                 child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2224                 if (child->role.access == direct_access)
2225                         return;
2226
2227                 drop_parent_pte(child, sptep);
2228                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2229         }
2230 }
2231
2232 /* Returns the number of zapped non-leaf child shadow pages. */
2233 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2234                             u64 *spte, struct list_head *invalid_list)
2235 {
2236         u64 pte;
2237         struct kvm_mmu_page *child;
2238
2239         pte = *spte;
2240         if (is_shadow_present_pte(pte)) {
2241                 if (is_last_spte(pte, sp->role.level)) {
2242                         drop_spte(kvm, spte);
2243                 } else {
2244                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2245                         drop_parent_pte(child, spte);
2246
2247                         /*
2248                          * Recursively zap nested TDP SPs, parentless SPs are
2249                          * unlikely to be used again in the near future.  This
2250                          * avoids retaining a large number of stale nested SPs.
2251                          */
2252                         if (tdp_enabled && invalid_list &&
2253                             child->role.guest_mode && !child->parent_ptes.val)
2254                                 return kvm_mmu_prepare_zap_page(kvm, child,
2255                                                                 invalid_list);
2256                 }
2257         } else if (is_mmio_spte(pte)) {
2258                 mmu_spte_clear_no_track(spte);
2259         }
2260         return 0;
2261 }
2262
2263 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2264                                         struct kvm_mmu_page *sp,
2265                                         struct list_head *invalid_list)
2266 {
2267         int zapped = 0;
2268         unsigned i;
2269
2270         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2271                 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2272
2273         return zapped;
2274 }
2275
2276 static void kvm_mmu_unlink_parents(struct kvm_mmu_page *sp)
2277 {
2278         u64 *sptep;
2279         struct rmap_iterator iter;
2280
2281         while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2282                 drop_parent_pte(sp, sptep);
2283 }
2284
2285 static int mmu_zap_unsync_children(struct kvm *kvm,
2286                                    struct kvm_mmu_page *parent,
2287                                    struct list_head *invalid_list)
2288 {
2289         int i, zapped = 0;
2290         struct mmu_page_path parents;
2291         struct kvm_mmu_pages pages;
2292
2293         if (parent->role.level == PG_LEVEL_4K)
2294                 return 0;
2295
2296         while (mmu_unsync_walk(parent, &pages)) {
2297                 struct kvm_mmu_page *sp;
2298
2299                 for_each_sp(pages, sp, parents, i) {
2300                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2301                         mmu_pages_clear_parents(&parents);
2302                         zapped++;
2303                 }
2304         }
2305
2306         return zapped;
2307 }
2308
2309 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2310                                        struct kvm_mmu_page *sp,
2311                                        struct list_head *invalid_list,
2312                                        int *nr_zapped)
2313 {
2314         bool list_unstable, zapped_root = false;
2315
2316         trace_kvm_mmu_prepare_zap_page(sp);
2317         ++kvm->stat.mmu_shadow_zapped;
2318         *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2319         *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2320         kvm_mmu_unlink_parents(sp);
2321
2322         /* Zapping children means active_mmu_pages has become unstable. */
2323         list_unstable = *nr_zapped;
2324
2325         if (!sp->role.invalid && !sp->role.direct)
2326                 unaccount_shadowed(kvm, sp);
2327
2328         if (sp->unsync)
2329                 kvm_unlink_unsync_page(kvm, sp);
2330         if (!sp->root_count) {
2331                 /* Count self */
2332                 (*nr_zapped)++;
2333
2334                 /*
2335                  * Already invalid pages (previously active roots) are not on
2336                  * the active page list.  See list_del() in the "else" case of
2337                  * !sp->root_count.
2338                  */
2339                 if (sp->role.invalid)
2340                         list_add(&sp->link, invalid_list);
2341                 else
2342                         list_move(&sp->link, invalid_list);
2343                 kvm_mod_used_mmu_pages(kvm, -1);
2344         } else {
2345                 /*
2346                  * Remove the active root from the active page list, the root
2347                  * will be explicitly freed when the root_count hits zero.
2348                  */
2349                 list_del(&sp->link);
2350
2351                 /*
2352                  * Obsolete pages cannot be used on any vCPUs, see the comment
2353                  * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2354                  * treats invalid shadow pages as being obsolete.
2355                  */
2356                 zapped_root = !is_obsolete_sp(kvm, sp);
2357         }
2358
2359         if (sp->lpage_disallowed)
2360                 unaccount_huge_nx_page(kvm, sp);
2361
2362         sp->role.invalid = 1;
2363
2364         /*
2365          * Make the request to free obsolete roots after marking the root
2366          * invalid, otherwise other vCPUs may not see it as invalid.
2367          */
2368         if (zapped_root)
2369                 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2370         return list_unstable;
2371 }
2372
2373 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2374                                      struct list_head *invalid_list)
2375 {
2376         int nr_zapped;
2377
2378         __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2379         return nr_zapped;
2380 }
2381
2382 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2383                                     struct list_head *invalid_list)
2384 {
2385         struct kvm_mmu_page *sp, *nsp;
2386
2387         if (list_empty(invalid_list))
2388                 return;
2389
2390         /*
2391          * We need to make sure everyone sees our modifications to
2392          * the page tables and see changes to vcpu->mode here. The barrier
2393          * in the kvm_flush_remote_tlbs() achieves this. This pairs
2394          * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2395          *
2396          * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2397          * guest mode and/or lockless shadow page table walks.
2398          */
2399         kvm_flush_remote_tlbs(kvm);
2400
2401         list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2402                 WARN_ON(!sp->role.invalid || sp->root_count);
2403                 kvm_mmu_free_page(sp);
2404         }
2405 }
2406
2407 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2408                                                   unsigned long nr_to_zap)
2409 {
2410         unsigned long total_zapped = 0;
2411         struct kvm_mmu_page *sp, *tmp;
2412         LIST_HEAD(invalid_list);
2413         bool unstable;
2414         int nr_zapped;
2415
2416         if (list_empty(&kvm->arch.active_mmu_pages))
2417                 return 0;
2418
2419 restart:
2420         list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2421                 /*
2422                  * Don't zap active root pages, the page itself can't be freed
2423                  * and zapping it will just force vCPUs to realloc and reload.
2424                  */
2425                 if (sp->root_count)
2426                         continue;
2427
2428                 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2429                                                       &nr_zapped);
2430                 total_zapped += nr_zapped;
2431                 if (total_zapped >= nr_to_zap)
2432                         break;
2433
2434                 if (unstable)
2435                         goto restart;
2436         }
2437
2438         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2439
2440         kvm->stat.mmu_recycled += total_zapped;
2441         return total_zapped;
2442 }
2443
2444 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2445 {
2446         if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2447                 return kvm->arch.n_max_mmu_pages -
2448                         kvm->arch.n_used_mmu_pages;
2449
2450         return 0;
2451 }
2452
2453 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2454 {
2455         unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2456
2457         if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2458                 return 0;
2459
2460         kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2461
2462         /*
2463          * Note, this check is intentionally soft, it only guarantees that one
2464          * page is available, while the caller may end up allocating as many as
2465          * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2466          * exceeding the (arbitrary by default) limit will not harm the host,
2467          * being too aggressive may unnecessarily kill the guest, and getting an
2468          * exact count is far more trouble than it's worth, especially in the
2469          * page fault paths.
2470          */
2471         if (!kvm_mmu_available_pages(vcpu->kvm))
2472                 return -ENOSPC;
2473         return 0;
2474 }
2475
2476 /*
2477  * Changing the number of mmu pages allocated to the vm
2478  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2479  */
2480 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2481 {
2482         write_lock(&kvm->mmu_lock);
2483
2484         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2485                 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2486                                                   goal_nr_mmu_pages);
2487
2488                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2489         }
2490
2491         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2492
2493         write_unlock(&kvm->mmu_lock);
2494 }
2495
2496 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2497 {
2498         struct kvm_mmu_page *sp;
2499         LIST_HEAD(invalid_list);
2500         int r;
2501
2502         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2503         r = 0;
2504         write_lock(&kvm->mmu_lock);
2505         for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2506                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2507                          sp->role.word);
2508                 r = 1;
2509                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2510         }
2511         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2512         write_unlock(&kvm->mmu_lock);
2513
2514         return r;
2515 }
2516
2517 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2518 {
2519         gpa_t gpa;
2520         int r;
2521
2522         if (vcpu->arch.mmu->direct_map)
2523                 return 0;
2524
2525         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2526
2527         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2528
2529         return r;
2530 }
2531
2532 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2533 {
2534         trace_kvm_mmu_unsync_page(sp);
2535         ++kvm->stat.mmu_unsync;
2536         sp->unsync = 1;
2537
2538         kvm_mmu_mark_parents_unsync(sp);
2539 }
2540
2541 /*
2542  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2543  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
2544  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2545  * be write-protected.
2546  */
2547 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2548                             gfn_t gfn, bool can_unsync, bool prefetch)
2549 {
2550         struct kvm_mmu_page *sp;
2551         bool locked = false;
2552
2553         /*
2554          * Force write-protection if the page is being tracked.  Note, the page
2555          * track machinery is used to write-protect upper-level shadow pages,
2556          * i.e. this guards the role.level == 4K assertion below!
2557          */
2558         if (kvm_slot_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE))
2559                 return -EPERM;
2560
2561         /*
2562          * The page is not write-tracked, mark existing shadow pages unsync
2563          * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
2564          * that case, KVM must complete emulation of the guest TLB flush before
2565          * allowing shadow pages to become unsync (writable by the guest).
2566          */
2567         for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2568                 if (!can_unsync)
2569                         return -EPERM;
2570
2571                 if (sp->unsync)
2572                         continue;
2573
2574                 if (prefetch)
2575                         return -EEXIST;
2576
2577                 /*
2578                  * TDP MMU page faults require an additional spinlock as they
2579                  * run with mmu_lock held for read, not write, and the unsync
2580                  * logic is not thread safe.  Take the spinklock regardless of
2581                  * the MMU type to avoid extra conditionals/parameters, there's
2582                  * no meaningful penalty if mmu_lock is held for write.
2583                  */
2584                 if (!locked) {
2585                         locked = true;
2586                         spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2587
2588                         /*
2589                          * Recheck after taking the spinlock, a different vCPU
2590                          * may have since marked the page unsync.  A false
2591                          * positive on the unprotected check above is not
2592                          * possible as clearing sp->unsync _must_ hold mmu_lock
2593                          * for write, i.e. unsync cannot transition from 0->1
2594                          * while this CPU holds mmu_lock for read (or write).
2595                          */
2596                         if (READ_ONCE(sp->unsync))
2597                                 continue;
2598                 }
2599
2600                 WARN_ON(sp->role.level != PG_LEVEL_4K);
2601                 kvm_unsync_page(kvm, sp);
2602         }
2603         if (locked)
2604                 spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2605
2606         /*
2607          * We need to ensure that the marking of unsync pages is visible
2608          * before the SPTE is updated to allow writes because
2609          * kvm_mmu_sync_roots() checks the unsync flags without holding
2610          * the MMU lock and so can race with this. If the SPTE was updated
2611          * before the page had been marked as unsync-ed, something like the
2612          * following could happen:
2613          *
2614          * CPU 1                    CPU 2
2615          * ---------------------------------------------------------------------
2616          * 1.2 Host updates SPTE
2617          *     to be writable
2618          *                      2.1 Guest writes a GPTE for GVA X.
2619          *                          (GPTE being in the guest page table shadowed
2620          *                           by the SP from CPU 1.)
2621          *                          This reads SPTE during the page table walk.
2622          *                          Since SPTE.W is read as 1, there is no
2623          *                          fault.
2624          *
2625          *                      2.2 Guest issues TLB flush.
2626          *                          That causes a VM Exit.
2627          *
2628          *                      2.3 Walking of unsync pages sees sp->unsync is
2629          *                          false and skips the page.
2630          *
2631          *                      2.4 Guest accesses GVA X.
2632          *                          Since the mapping in the SP was not updated,
2633          *                          so the old mapping for GVA X incorrectly
2634          *                          gets used.
2635          * 1.1 Host marks SP
2636          *     as unsync
2637          *     (sp->unsync = true)
2638          *
2639          * The write barrier below ensures that 1.1 happens before 1.2 and thus
2640          * the situation in 2.4 does not arise.  It pairs with the read barrier
2641          * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
2642          */
2643         smp_wmb();
2644
2645         return 0;
2646 }
2647
2648 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
2649                         u64 *sptep, unsigned int pte_access, gfn_t gfn,
2650                         kvm_pfn_t pfn, struct kvm_page_fault *fault)
2651 {
2652         struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2653         int level = sp->role.level;
2654         int was_rmapped = 0;
2655         int ret = RET_PF_FIXED;
2656         bool flush = false;
2657         bool wrprot;
2658         u64 spte;
2659
2660         /* Prefetching always gets a writable pfn.  */
2661         bool host_writable = !fault || fault->map_writable;
2662         bool prefetch = !fault || fault->prefetch;
2663         bool write_fault = fault && fault->write;
2664
2665         pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2666                  *sptep, write_fault, gfn);
2667
2668         if (unlikely(is_noslot_pfn(pfn))) {
2669                 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2670                 return RET_PF_EMULATE;
2671         }
2672
2673         if (is_shadow_present_pte(*sptep)) {
2674                 /*
2675                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2676                  * the parent of the now unreachable PTE.
2677                  */
2678                 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2679                         struct kvm_mmu_page *child;
2680                         u64 pte = *sptep;
2681
2682                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2683                         drop_parent_pte(child, sptep);
2684                         flush = true;
2685                 } else if (pfn != spte_to_pfn(*sptep)) {
2686                         pgprintk("hfn old %llx new %llx\n",
2687                                  spte_to_pfn(*sptep), pfn);
2688                         drop_spte(vcpu->kvm, sptep);
2689                         flush = true;
2690                 } else
2691                         was_rmapped = 1;
2692         }
2693
2694         wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2695                            true, host_writable, &spte);
2696
2697         if (*sptep == spte) {
2698                 ret = RET_PF_SPURIOUS;
2699         } else {
2700                 flush |= mmu_spte_update(sptep, spte);
2701                 trace_kvm_mmu_set_spte(level, gfn, sptep);
2702         }
2703
2704         if (wrprot) {
2705                 if (write_fault)
2706                         ret = RET_PF_EMULATE;
2707         }
2708
2709         if (flush)
2710                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2711                                 KVM_PAGES_PER_HPAGE(level));
2712
2713         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2714
2715         if (!was_rmapped) {
2716                 WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
2717                 kvm_update_page_stats(vcpu->kvm, level, 1);
2718                 rmap_add(vcpu, slot, sptep, gfn);
2719         }
2720
2721         return ret;
2722 }
2723
2724 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2725                                     struct kvm_mmu_page *sp,
2726                                     u64 *start, u64 *end)
2727 {
2728         struct page *pages[PTE_PREFETCH_NUM];
2729         struct kvm_memory_slot *slot;
2730         unsigned int access = sp->role.access;
2731         int i, ret;
2732         gfn_t gfn;
2733
2734         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2735         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2736         if (!slot)
2737                 return -1;
2738
2739         ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2740         if (ret <= 0)
2741                 return -1;
2742
2743         for (i = 0; i < ret; i++, gfn++, start++) {
2744                 mmu_set_spte(vcpu, slot, start, access, gfn,
2745                              page_to_pfn(pages[i]), NULL);
2746                 put_page(pages[i]);
2747         }
2748
2749         return 0;
2750 }
2751
2752 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2753                                   struct kvm_mmu_page *sp, u64 *sptep)
2754 {
2755         u64 *spte, *start = NULL;
2756         int i;
2757
2758         WARN_ON(!sp->role.direct);
2759
2760         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2761         spte = sp->spt + i;
2762
2763         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2764                 if (is_shadow_present_pte(*spte) || spte == sptep) {
2765                         if (!start)
2766                                 continue;
2767                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2768                                 return;
2769                         start = NULL;
2770                 } else if (!start)
2771                         start = spte;
2772         }
2773         if (start)
2774                 direct_pte_prefetch_many(vcpu, sp, start, spte);
2775 }
2776
2777 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2778 {
2779         struct kvm_mmu_page *sp;
2780
2781         sp = sptep_to_sp(sptep);
2782
2783         /*
2784          * Without accessed bits, there's no way to distinguish between
2785          * actually accessed translations and prefetched, so disable pte
2786          * prefetch if accessed bits aren't available.
2787          */
2788         if (sp_ad_disabled(sp))
2789                 return;
2790
2791         if (sp->role.level > PG_LEVEL_4K)
2792                 return;
2793
2794         /*
2795          * If addresses are being invalidated, skip prefetching to avoid
2796          * accidentally prefetching those addresses.
2797          */
2798         if (unlikely(vcpu->kvm->mmu_notifier_count))
2799                 return;
2800
2801         __direct_pte_prefetch(vcpu, sp, sptep);
2802 }
2803
2804 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2805                                   const struct kvm_memory_slot *slot)
2806 {
2807         unsigned long hva;
2808         unsigned long flags;
2809         int level = PG_LEVEL_4K;
2810         pgd_t pgd;
2811         p4d_t p4d;
2812         pud_t pud;
2813         pmd_t pmd;
2814
2815         if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2816                 return PG_LEVEL_4K;
2817
2818         /*
2819          * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2820          * is not solely for performance, it's also necessary to avoid the
2821          * "writable" check in __gfn_to_hva_many(), which will always fail on
2822          * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2823          * page fault steps have already verified the guest isn't writing a
2824          * read-only memslot.
2825          */
2826         hva = __gfn_to_hva_memslot(slot, gfn);
2827
2828         /*
2829          * Lookup the mapping level in the current mm.  The information
2830          * may become stale soon, but it is safe to use as long as
2831          * 1) mmu_notifier_retry was checked after taking mmu_lock, and
2832          * 2) mmu_lock is taken now.
2833          *
2834          * We still need to disable IRQs to prevent concurrent tear down
2835          * of page tables.
2836          */
2837         local_irq_save(flags);
2838
2839         pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
2840         if (pgd_none(pgd))
2841                 goto out;
2842
2843         p4d = READ_ONCE(*p4d_offset(&pgd, hva));
2844         if (p4d_none(p4d) || !p4d_present(p4d))
2845                 goto out;
2846
2847         pud = READ_ONCE(*pud_offset(&p4d, hva));
2848         if (pud_none(pud) || !pud_present(pud))
2849                 goto out;
2850
2851         if (pud_large(pud)) {
2852                 level = PG_LEVEL_1G;
2853                 goto out;
2854         }
2855
2856         pmd = READ_ONCE(*pmd_offset(&pud, hva));
2857         if (pmd_none(pmd) || !pmd_present(pmd))
2858                 goto out;
2859
2860         if (pmd_large(pmd))
2861                 level = PG_LEVEL_2M;
2862
2863 out:
2864         local_irq_restore(flags);
2865         return level;
2866 }
2867
2868 int kvm_mmu_max_mapping_level(struct kvm *kvm,
2869                               const struct kvm_memory_slot *slot, gfn_t gfn,
2870                               kvm_pfn_t pfn, int max_level)
2871 {
2872         struct kvm_lpage_info *linfo;
2873         int host_level;
2874
2875         max_level = min(max_level, max_huge_page_level);
2876         for ( ; max_level > PG_LEVEL_4K; max_level--) {
2877                 linfo = lpage_info_slot(gfn, slot, max_level);
2878                 if (!linfo->disallow_lpage)
2879                         break;
2880         }
2881
2882         if (max_level == PG_LEVEL_4K)
2883                 return PG_LEVEL_4K;
2884
2885         host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
2886         return min(host_level, max_level);
2887 }
2888
2889 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2890 {
2891         struct kvm_memory_slot *slot = fault->slot;
2892         kvm_pfn_t mask;
2893
2894         fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
2895
2896         if (unlikely(fault->max_level == PG_LEVEL_4K))
2897                 return;
2898
2899         if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn))
2900                 return;
2901
2902         if (kvm_slot_dirty_track_enabled(slot))
2903                 return;
2904
2905         /*
2906          * Enforce the iTLB multihit workaround after capturing the requested
2907          * level, which will be used to do precise, accurate accounting.
2908          */
2909         fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
2910                                                      fault->gfn, fault->pfn,
2911                                                      fault->max_level);
2912         if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
2913                 return;
2914
2915         /*
2916          * mmu_notifier_retry() was successful and mmu_lock is held, so
2917          * the pmd can't be split from under us.
2918          */
2919         fault->goal_level = fault->req_level;
2920         mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
2921         VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
2922         fault->pfn &= ~mask;
2923 }
2924
2925 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
2926 {
2927         if (cur_level > PG_LEVEL_4K &&
2928             cur_level == fault->goal_level &&
2929             is_shadow_present_pte(spte) &&
2930             !is_large_pte(spte)) {
2931                 /*
2932                  * A small SPTE exists for this pfn, but FNAME(fetch)
2933                  * and __direct_map would like to create a large PTE
2934                  * instead: just force them to go down another level,
2935                  * patching back for them into pfn the next 9 bits of
2936                  * the address.
2937                  */
2938                 u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
2939                                 KVM_PAGES_PER_HPAGE(cur_level - 1);
2940                 fault->pfn |= fault->gfn & page_mask;
2941                 fault->goal_level--;
2942         }
2943 }
2944
2945 static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2946 {
2947         struct kvm_shadow_walk_iterator it;
2948         struct kvm_mmu_page *sp;
2949         int ret;
2950         gfn_t base_gfn = fault->gfn;
2951
2952         kvm_mmu_hugepage_adjust(vcpu, fault);
2953
2954         trace_kvm_mmu_spte_requested(fault);
2955         for_each_shadow_entry(vcpu, fault->addr, it) {
2956                 /*
2957                  * We cannot overwrite existing page tables with an NX
2958                  * large page, as the leaf could be executable.
2959                  */
2960                 if (fault->nx_huge_page_workaround_enabled)
2961                         disallowed_hugepage_adjust(fault, *it.sptep, it.level);
2962
2963                 base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2964                 if (it.level == fault->goal_level)
2965                         break;
2966
2967                 drop_large_spte(vcpu, it.sptep);
2968                 if (is_shadow_present_pte(*it.sptep))
2969                         continue;
2970
2971                 sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2972                                       it.level - 1, true, ACC_ALL);
2973
2974                 link_shadow_page(vcpu, it.sptep, sp);
2975                 if (fault->is_tdp && fault->huge_page_disallowed &&
2976                     fault->req_level >= it.level)
2977                         account_huge_nx_page(vcpu->kvm, sp);
2978         }
2979
2980         if (WARN_ON_ONCE(it.level != fault->goal_level))
2981                 return -EFAULT;
2982
2983         ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
2984                            base_gfn, fault->pfn, fault);
2985         if (ret == RET_PF_SPURIOUS)
2986                 return ret;
2987
2988         direct_pte_prefetch(vcpu, it.sptep);
2989         ++vcpu->stat.pf_fixed;
2990         return ret;
2991 }
2992
2993 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2994 {
2995         send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2996 }
2997
2998 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2999 {
3000         /*
3001          * Do not cache the mmio info caused by writing the readonly gfn
3002          * into the spte otherwise read access on readonly gfn also can
3003          * caused mmio page fault and treat it as mmio access.
3004          */
3005         if (pfn == KVM_PFN_ERR_RO_FAULT)
3006                 return RET_PF_EMULATE;
3007
3008         if (pfn == KVM_PFN_ERR_HWPOISON) {
3009                 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3010                 return RET_PF_RETRY;
3011         }
3012
3013         return -EFAULT;
3014 }
3015
3016 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
3017                                 unsigned int access, int *ret_val)
3018 {
3019         /* The pfn is invalid, report the error! */
3020         if (unlikely(is_error_pfn(fault->pfn))) {
3021                 *ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
3022                 return true;
3023         }
3024
3025         if (unlikely(!fault->slot)) {
3026                 gva_t gva = fault->is_tdp ? 0 : fault->addr;
3027
3028                 vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3029                                      access & shadow_mmio_access_mask);
3030                 /*
3031                  * If MMIO caching is disabled, emulate immediately without
3032                  * touching the shadow page tables as attempting to install an
3033                  * MMIO SPTE will just be an expensive nop.  Do not cache MMIO
3034                  * whose gfn is greater than host.MAXPHYADDR, any guest that
3035                  * generates such gfns is running nested and is being tricked
3036                  * by L0 userspace (you can observe gfn > L1.MAXPHYADDR if
3037                  * and only if L1's MAXPHYADDR is inaccurate with respect to
3038                  * the hardware's).
3039                  */
3040                 if (unlikely(!enable_mmio_caching) ||
3041                     unlikely(fault->gfn > kvm_mmu_max_gfn())) {
3042                         *ret_val = RET_PF_EMULATE;
3043                         return true;
3044                 }
3045         }
3046
3047         return false;
3048 }
3049
3050 static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
3051 {
3052         /*
3053          * Do not fix the mmio spte with invalid generation number which
3054          * need to be updated by slow page fault path.
3055          */
3056         if (fault->rsvd)
3057                 return false;
3058
3059         /* See if the page fault is due to an NX violation */
3060         if (unlikely(fault->exec && fault->present))
3061                 return false;
3062
3063         /*
3064          * #PF can be fast if:
3065          * 1. The shadow page table entry is not present, which could mean that
3066          *    the fault is potentially caused by access tracking (if enabled).
3067          * 2. The shadow page table entry is present and the fault
3068          *    is caused by write-protect, that means we just need change the W
3069          *    bit of the spte which can be done out of mmu-lock.
3070          *
3071          * However, if access tracking is disabled we know that a non-present
3072          * page must be a genuine page fault where we have to create a new SPTE.
3073          * So, if access tracking is disabled, we return true only for write
3074          * accesses to a present page.
3075          */
3076
3077         return shadow_acc_track_mask != 0 || (fault->write && fault->present);
3078 }
3079
3080 /*
3081  * Returns true if the SPTE was fixed successfully. Otherwise,
3082  * someone else modified the SPTE from its original value.
3083  */
3084 static bool
3085 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
3086                         u64 *sptep, u64 old_spte, u64 new_spte)
3087 {
3088         /*
3089          * Theoretically we could also set dirty bit (and flush TLB) here in
3090          * order to eliminate unnecessary PML logging. See comments in
3091          * set_spte. But fast_page_fault is very unlikely to happen with PML
3092          * enabled, so we do not do this. This might result in the same GPA
3093          * to be logged in PML buffer again when the write really happens, and
3094          * eventually to be called by mark_page_dirty twice. But it's also no
3095          * harm. This also avoids the TLB flush needed after setting dirty bit
3096          * so non-PML cases won't be impacted.
3097          *
3098          * Compare with set_spte where instead shadow_dirty_mask is set.
3099          */
3100         if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3101                 return false;
3102
3103         if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3104                 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3105
3106         return true;
3107 }
3108
3109 static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3110 {
3111         if (fault->exec)
3112                 return is_executable_pte(spte);
3113
3114         if (fault->write)
3115                 return is_writable_pte(spte);
3116
3117         /* Fault was on Read access */
3118         return spte & PT_PRESENT_MASK;
3119 }
3120
3121 /*
3122  * Returns the last level spte pointer of the shadow page walk for the given
3123  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3124  * walk could be performed, returns NULL and *spte does not contain valid data.
3125  *
3126  * Contract:
3127  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
3128  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
3129  */
3130 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3131 {
3132         struct kvm_shadow_walk_iterator iterator;
3133         u64 old_spte;
3134         u64 *sptep = NULL;
3135
3136         for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3137                 sptep = iterator.sptep;
3138                 *spte = old_spte;
3139         }
3140
3141         return sptep;
3142 }
3143
3144 /*
3145  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3146  */
3147 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3148 {
3149         struct kvm_mmu_page *sp;
3150         int ret = RET_PF_INVALID;
3151         u64 spte = 0ull;
3152         u64 *sptep = NULL;
3153         uint retry_count = 0;
3154
3155         if (!page_fault_can_be_fast(fault))
3156                 return ret;
3157
3158         walk_shadow_page_lockless_begin(vcpu);
3159
3160         do {
3161                 u64 new_spte;
3162
3163                 if (is_tdp_mmu(vcpu->arch.mmu))
3164                         sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3165                 else
3166                         sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3167
3168                 if (!is_shadow_present_pte(spte))
3169                         break;
3170
3171                 sp = sptep_to_sp(sptep);
3172                 if (!is_last_spte(spte, sp->role.level))
3173                         break;
3174
3175                 /*
3176                  * Check whether the memory access that caused the fault would
3177                  * still cause it if it were to be performed right now. If not,
3178                  * then this is a spurious fault caused by TLB lazily flushed,
3179                  * or some other CPU has already fixed the PTE after the
3180                  * current CPU took the fault.
3181                  *
3182                  * Need not check the access of upper level table entries since
3183                  * they are always ACC_ALL.
3184                  */
3185                 if (is_access_allowed(fault, spte)) {
3186                         ret = RET_PF_SPURIOUS;
3187                         break;
3188                 }
3189
3190                 new_spte = spte;
3191
3192                 if (is_access_track_spte(spte))
3193                         new_spte = restore_acc_track_spte(new_spte);
3194
3195                 /*
3196                  * Currently, to simplify the code, write-protection can
3197                  * be removed in the fast path only if the SPTE was
3198                  * write-protected for dirty-logging or access tracking.
3199                  */
3200                 if (fault->write &&
3201                     spte_can_locklessly_be_made_writable(spte)) {
3202                         new_spte |= PT_WRITABLE_MASK;
3203
3204                         /*
3205                          * Do not fix write-permission on the large spte when
3206                          * dirty logging is enabled. Since we only dirty the
3207                          * first page into the dirty-bitmap in
3208                          * fast_pf_fix_direct_spte(), other pages are missed
3209                          * if its slot has dirty logging enabled.
3210                          *
3211                          * Instead, we let the slow page fault path create a
3212                          * normal spte to fix the access.
3213                          */
3214                         if (sp->role.level > PG_LEVEL_4K &&
3215                             kvm_slot_dirty_track_enabled(fault->slot))
3216                                 break;
3217                 }
3218
3219                 /* Verify that the fault can be handled in the fast path */
3220                 if (new_spte == spte ||
3221                     !is_access_allowed(fault, new_spte))
3222                         break;
3223
3224                 /*
3225                  * Currently, fast page fault only works for direct mapping
3226                  * since the gfn is not stable for indirect shadow page. See
3227                  * Documentation/virt/kvm/locking.rst to get more detail.
3228                  */
3229                 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3230                         ret = RET_PF_FIXED;
3231                         break;
3232                 }
3233
3234                 if (++retry_count > 4) {
3235                         printk_once(KERN_WARNING
3236                                 "kvm: Fast #PF retrying more than 4 times.\n");
3237                         break;
3238                 }
3239
3240         } while (true);
3241
3242         trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3243         walk_shadow_page_lockless_end(vcpu);
3244
3245         return ret;
3246 }
3247
3248 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3249                                struct list_head *invalid_list)
3250 {
3251         struct kvm_mmu_page *sp;
3252
3253         if (!VALID_PAGE(*root_hpa))
3254                 return;
3255
3256         sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3257         if (WARN_ON(!sp))
3258                 return;
3259
3260         if (is_tdp_mmu_page(sp))
3261                 kvm_tdp_mmu_put_root(kvm, sp, false);
3262         else if (!--sp->root_count && sp->role.invalid)
3263                 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3264
3265         *root_hpa = INVALID_PAGE;
3266 }
3267
3268 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3269 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3270                         ulong roots_to_free)
3271 {
3272         int i;
3273         LIST_HEAD(invalid_list);
3274         bool free_active_root;
3275
3276         BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3277
3278         /* Before acquiring the MMU lock, see if we need to do any real work. */
3279         free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3280                 && VALID_PAGE(mmu->root.hpa);
3281
3282         if (!free_active_root) {
3283                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3284                         if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3285                             VALID_PAGE(mmu->prev_roots[i].hpa))
3286                                 break;
3287
3288                 if (i == KVM_MMU_NUM_PREV_ROOTS)
3289                         return;
3290         }
3291
3292         write_lock(&kvm->mmu_lock);
3293
3294         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3295                 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3296                         mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3297                                            &invalid_list);
3298
3299         if (free_active_root) {
3300                 if (to_shadow_page(mmu->root.hpa)) {
3301                         mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3302                 } else if (mmu->pae_root) {
3303                         for (i = 0; i < 4; ++i) {
3304                                 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3305                                         continue;
3306
3307                                 mmu_free_root_page(kvm, &mmu->pae_root[i],
3308                                                    &invalid_list);
3309                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3310                         }
3311                 }
3312                 mmu->root.hpa = INVALID_PAGE;
3313                 mmu->root.pgd = 0;
3314         }
3315
3316         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3317         write_unlock(&kvm->mmu_lock);
3318 }
3319 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3320
3321 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3322 {
3323         unsigned long roots_to_free = 0;
3324         hpa_t root_hpa;
3325         int i;
3326
3327         /*
3328          * This should not be called while L2 is active, L2 can't invalidate
3329          * _only_ its own roots, e.g. INVVPID unconditionally exits.
3330          */
3331         WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
3332
3333         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3334                 root_hpa = mmu->prev_roots[i].hpa;
3335                 if (!VALID_PAGE(root_hpa))
3336                         continue;
3337
3338                 if (!to_shadow_page(root_hpa) ||
3339                         to_shadow_page(root_hpa)->role.guest_mode)
3340                         roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3341         }
3342
3343         kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3344 }
3345 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3346
3347
3348 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3349 {
3350         int ret = 0;
3351
3352         if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3353                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3354                 ret = 1;
3355         }
3356
3357         return ret;
3358 }
3359
3360 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
3361                             u8 level, bool direct)
3362 {
3363         struct kvm_mmu_page *sp;
3364
3365         sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
3366         ++sp->root_count;
3367
3368         return __pa(sp->spt);
3369 }
3370
3371 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3372 {
3373         struct kvm_mmu *mmu = vcpu->arch.mmu;
3374         u8 shadow_root_level = mmu->shadow_root_level;
3375         hpa_t root;
3376         unsigned i;
3377         int r;
3378
3379         write_lock(&vcpu->kvm->mmu_lock);
3380         r = make_mmu_pages_available(vcpu);
3381         if (r < 0)
3382                 goto out_unlock;
3383
3384         if (is_tdp_mmu_enabled(vcpu->kvm)) {
3385                 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3386                 mmu->root.hpa = root;
3387         } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3388                 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3389                 mmu->root.hpa = root;
3390         } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3391                 if (WARN_ON_ONCE(!mmu->pae_root)) {
3392                         r = -EIO;
3393                         goto out_unlock;
3394                 }
3395
3396                 for (i = 0; i < 4; ++i) {
3397                         WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3398
3399                         root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
3400                                               i << 30, PT32_ROOT_LEVEL, true);
3401                         mmu->pae_root[i] = root | PT_PRESENT_MASK |
3402                                            shadow_me_mask;
3403                 }
3404                 mmu->root.hpa = __pa(mmu->pae_root);
3405         } else {
3406                 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3407                 r = -EIO;
3408                 goto out_unlock;
3409         }
3410
3411         /* root.pgd is ignored for direct MMUs. */
3412         mmu->root.pgd = 0;
3413 out_unlock:
3414         write_unlock(&vcpu->kvm->mmu_lock);
3415         return r;
3416 }
3417
3418 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
3419 {
3420         struct kvm_memslots *slots;
3421         struct kvm_memory_slot *slot;
3422         int r = 0, i, bkt;
3423
3424         /*
3425          * Check if this is the first shadow root being allocated before
3426          * taking the lock.
3427          */
3428         if (kvm_shadow_root_allocated(kvm))
3429                 return 0;
3430
3431         mutex_lock(&kvm->slots_arch_lock);
3432
3433         /* Recheck, under the lock, whether this is the first shadow root. */
3434         if (kvm_shadow_root_allocated(kvm))
3435                 goto out_unlock;
3436
3437         /*
3438          * Check if anything actually needs to be allocated, e.g. all metadata
3439          * will be allocated upfront if TDP is disabled.
3440          */
3441         if (kvm_memslots_have_rmaps(kvm) &&
3442             kvm_page_track_write_tracking_enabled(kvm))
3443                 goto out_success;
3444
3445         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
3446                 slots = __kvm_memslots(kvm, i);
3447                 kvm_for_each_memslot(slot, bkt, slots) {
3448                         /*
3449                          * Both of these functions are no-ops if the target is
3450                          * already allocated, so unconditionally calling both
3451                          * is safe.  Intentionally do NOT free allocations on
3452                          * failure to avoid having to track which allocations
3453                          * were made now versus when the memslot was created.
3454                          * The metadata is guaranteed to be freed when the slot
3455                          * is freed, and will be kept/used if userspace retries
3456                          * KVM_RUN instead of killing the VM.
3457                          */
3458                         r = memslot_rmap_alloc(slot, slot->npages);
3459                         if (r)
3460                                 goto out_unlock;
3461                         r = kvm_page_track_write_tracking_alloc(slot);
3462                         if (r)
3463                                 goto out_unlock;
3464                 }
3465         }
3466
3467         /*
3468          * Ensure that shadow_root_allocated becomes true strictly after
3469          * all the related pointers are set.
3470          */
3471 out_success:
3472         smp_store_release(&kvm->arch.shadow_root_allocated, true);
3473
3474 out_unlock:
3475         mutex_unlock(&kvm->slots_arch_lock);
3476         return r;
3477 }
3478
3479 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3480 {
3481         struct kvm_mmu *mmu = vcpu->arch.mmu;
3482         u64 pdptrs[4], pm_mask;
3483         gfn_t root_gfn, root_pgd;
3484         hpa_t root;
3485         unsigned i;
3486         int r;
3487
3488         root_pgd = mmu->get_guest_pgd(vcpu);
3489         root_gfn = root_pgd >> PAGE_SHIFT;
3490
3491         if (mmu_check_root(vcpu, root_gfn))
3492                 return 1;
3493
3494         /*
3495          * On SVM, reading PDPTRs might access guest memory, which might fault
3496          * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
3497          */
3498         if (mmu->root_level == PT32E_ROOT_LEVEL) {
3499                 for (i = 0; i < 4; ++i) {
3500                         pdptrs[i] = mmu->get_pdptr(vcpu, i);
3501                         if (!(pdptrs[i] & PT_PRESENT_MASK))
3502                                 continue;
3503
3504                         if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
3505                                 return 1;
3506                 }
3507         }
3508
3509         r = mmu_first_shadow_root_alloc(vcpu->kvm);
3510         if (r)
3511                 return r;
3512
3513         write_lock(&vcpu->kvm->mmu_lock);
3514         r = make_mmu_pages_available(vcpu);
3515         if (r < 0)
3516                 goto out_unlock;
3517
3518         /*
3519          * Do we shadow a long mode page table? If so we need to
3520          * write-protect the guests page table root.
3521          */
3522         if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3523                 root = mmu_alloc_root(vcpu, root_gfn, 0,
3524                                       mmu->shadow_root_level, false);
3525                 mmu->root.hpa = root;
3526                 goto set_root_pgd;
3527         }
3528
3529         if (WARN_ON_ONCE(!mmu->pae_root)) {
3530                 r = -EIO;
3531                 goto out_unlock;
3532         }
3533
3534         /*
3535          * We shadow a 32 bit page table. This may be a legacy 2-level
3536          * or a PAE 3-level page table. In either case we need to be aware that
3537          * the shadow page table may be a PAE or a long mode page table.
3538          */
3539         pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3540         if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3541                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3542
3543                 if (WARN_ON_ONCE(!mmu->pml4_root)) {
3544                         r = -EIO;
3545                         goto out_unlock;
3546                 }
3547                 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3548
3549                 if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
3550                         if (WARN_ON_ONCE(!mmu->pml5_root)) {
3551                                 r = -EIO;
3552                                 goto out_unlock;
3553                         }
3554                         mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3555                 }
3556         }
3557
3558         for (i = 0; i < 4; ++i) {
3559                 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3560
3561                 if (mmu->root_level == PT32E_ROOT_LEVEL) {
3562                         if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3563                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3564                                 continue;
3565                         }
3566                         root_gfn = pdptrs[i] >> PAGE_SHIFT;
3567                 }
3568
3569                 root = mmu_alloc_root(vcpu, root_gfn, i << 30,
3570                                       PT32_ROOT_LEVEL, false);
3571                 mmu->pae_root[i] = root | pm_mask;
3572         }
3573
3574         if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
3575                 mmu->root.hpa = __pa(mmu->pml5_root);
3576         else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3577                 mmu->root.hpa = __pa(mmu->pml4_root);
3578         else
3579                 mmu->root.hpa = __pa(mmu->pae_root);
3580
3581 set_root_pgd:
3582         mmu->root.pgd = root_pgd;
3583 out_unlock:
3584         write_unlock(&vcpu->kvm->mmu_lock);
3585
3586         return r;
3587 }
3588
3589 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3590 {
3591         struct kvm_mmu *mmu = vcpu->arch.mmu;
3592         bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
3593         u64 *pml5_root = NULL;
3594         u64 *pml4_root = NULL;
3595         u64 *pae_root;
3596
3597         /*
3598          * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3599          * tables are allocated and initialized at root creation as there is no
3600          * equivalent level in the guest's NPT to shadow.  Allocate the tables
3601          * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3602          */
3603         if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3604             mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3605                 return 0;
3606
3607         /*
3608          * NPT, the only paging mode that uses this horror, uses a fixed number
3609          * of levels for the shadow page tables, e.g. all MMUs are 4-level or
3610          * all MMus are 5-level.  Thus, this can safely require that pml5_root
3611          * is allocated if the other roots are valid and pml5 is needed, as any
3612          * prior MMU would also have required pml5.
3613          */
3614         if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3615                 return 0;
3616
3617         /*
3618          * The special roots should always be allocated in concert.  Yell and
3619          * bail if KVM ends up in a state where only one of the roots is valid.
3620          */
3621         if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3622                          (need_pml5 && mmu->pml5_root)))
3623                 return -EIO;
3624
3625         /*
3626          * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3627          * doesn't need to be decrypted.
3628          */
3629         pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3630         if (!pae_root)
3631                 return -ENOMEM;
3632
3633 #ifdef CONFIG_X86_64
3634         pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3635         if (!pml4_root)
3636                 goto err_pml4;
3637
3638         if (need_pml5) {
3639                 pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3640                 if (!pml5_root)
3641                         goto err_pml5;
3642         }
3643 #endif
3644
3645         mmu->pae_root = pae_root;
3646         mmu->pml4_root = pml4_root;
3647         mmu->pml5_root = pml5_root;
3648
3649         return 0;
3650
3651 #ifdef CONFIG_X86_64
3652 err_pml5:
3653         free_page((unsigned long)pml4_root);
3654 err_pml4:
3655         free_page((unsigned long)pae_root);
3656         return -ENOMEM;
3657 #endif
3658 }
3659
3660 static bool is_unsync_root(hpa_t root)
3661 {
3662         struct kvm_mmu_page *sp;
3663
3664         if (!VALID_PAGE(root))
3665                 return false;
3666
3667         /*
3668          * The read barrier orders the CPU's read of SPTE.W during the page table
3669          * walk before the reads of sp->unsync/sp->unsync_children here.
3670          *
3671          * Even if another CPU was marking the SP as unsync-ed simultaneously,
3672          * any guest page table changes are not guaranteed to be visible anyway
3673          * until this VCPU issues a TLB flush strictly after those changes are
3674          * made.  We only need to ensure that the other CPU sets these flags
3675          * before any actual changes to the page tables are made.  The comments
3676          * in mmu_try_to_unsync_pages() describe what could go wrong if this
3677          * requirement isn't satisfied.
3678          */
3679         smp_rmb();
3680         sp = to_shadow_page(root);
3681
3682         /*
3683          * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
3684          * PDPTEs for a given PAE root need to be synchronized individually.
3685          */
3686         if (WARN_ON_ONCE(!sp))
3687                 return false;
3688
3689         if (sp->unsync || sp->unsync_children)
3690                 return true;
3691
3692         return false;
3693 }
3694
3695 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3696 {
3697         int i;
3698         struct kvm_mmu_page *sp;
3699
3700         if (vcpu->arch.mmu->direct_map)
3701                 return;
3702
3703         if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
3704                 return;
3705
3706         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3707
3708         if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3709                 hpa_t root = vcpu->arch.mmu->root.hpa;
3710                 sp = to_shadow_page(root);
3711
3712                 if (!is_unsync_root(root))
3713                         return;
3714
3715                 write_lock(&vcpu->kvm->mmu_lock);
3716                 mmu_sync_children(vcpu, sp, true);
3717                 write_unlock(&vcpu->kvm->mmu_lock);
3718                 return;
3719         }
3720
3721         write_lock(&vcpu->kvm->mmu_lock);
3722
3723         for (i = 0; i < 4; ++i) {
3724                 hpa_t root = vcpu->arch.mmu->pae_root[i];
3725
3726                 if (IS_VALID_PAE_ROOT(root)) {
3727                         root &= PT64_BASE_ADDR_MASK;
3728                         sp = to_shadow_page(root);
3729                         mmu_sync_children(vcpu, sp, true);
3730                 }
3731         }
3732
3733         write_unlock(&vcpu->kvm->mmu_lock);
3734 }
3735
3736 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
3737 {
3738         unsigned long roots_to_free = 0;
3739         int i;
3740
3741         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3742                 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
3743                         roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3744
3745         /* sync prev_roots by simply freeing them */
3746         kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
3747 }
3748
3749 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3750                                   gpa_t vaddr, u64 access,
3751                                   struct x86_exception *exception)
3752 {
3753         if (exception)
3754                 exception->error_code = 0;
3755         return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
3756 }
3757
3758 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3759 {
3760         /*
3761          * A nested guest cannot use the MMIO cache if it is using nested
3762          * page tables, because cr2 is a nGPA while the cache stores GPAs.
3763          */
3764         if (mmu_is_nested(vcpu))
3765                 return false;
3766
3767         if (direct)
3768                 return vcpu_match_mmio_gpa(vcpu, addr);
3769
3770         return vcpu_match_mmio_gva(vcpu, addr);
3771 }
3772
3773 /*
3774  * Return the level of the lowest level SPTE added to sptes.
3775  * That SPTE may be non-present.
3776  *
3777  * Must be called between walk_shadow_page_lockless_{begin,end}.
3778  */
3779 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3780 {
3781         struct kvm_shadow_walk_iterator iterator;
3782         int leaf = -1;
3783         u64 spte;
3784
3785         for (shadow_walk_init(&iterator, vcpu, addr),
3786              *root_level = iterator.level;
3787              shadow_walk_okay(&iterator);
3788              __shadow_walk_next(&iterator, spte)) {
3789                 leaf = iterator.level;
3790                 spte = mmu_spte_get_lockless(iterator.sptep);
3791
3792                 sptes[leaf] = spte;
3793         }
3794
3795         return leaf;
3796 }
3797
3798 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
3799 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3800 {
3801         u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3802         struct rsvd_bits_validate *rsvd_check;
3803         int root, leaf, level;
3804         bool reserved = false;
3805
3806         walk_shadow_page_lockless_begin(vcpu);
3807
3808         if (is_tdp_mmu(vcpu->arch.mmu))
3809                 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3810         else
3811                 leaf = get_walk(vcpu, addr, sptes, &root);
3812
3813         walk_shadow_page_lockless_end(vcpu);
3814
3815         if (unlikely(leaf < 0)) {
3816                 *sptep = 0ull;
3817                 return reserved;
3818         }
3819
3820         *sptep = sptes[leaf];
3821
3822         /*
3823          * Skip reserved bits checks on the terminal leaf if it's not a valid
3824          * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
3825          * design, always have reserved bits set.  The purpose of the checks is
3826          * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
3827          */
3828         if (!is_shadow_present_pte(sptes[leaf]))
3829                 leaf++;
3830
3831         rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3832
3833         for (level = root; level >= leaf; level--)
3834                 reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3835
3836         if (reserved) {
3837                 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3838                        __func__, addr);
3839                 for (level = root; level >= leaf; level--)
3840                         pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3841                                sptes[level], level,
3842                                get_rsvd_bits(rsvd_check, sptes[level], level));
3843         }
3844
3845         return reserved;
3846 }
3847
3848 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3849 {
3850         u64 spte;
3851         bool reserved;
3852
3853         if (mmio_info_in_cache(vcpu, addr, direct))
3854                 return RET_PF_EMULATE;
3855
3856         reserved = get_mmio_spte(vcpu, addr, &spte);
3857         if (WARN_ON(reserved))
3858                 return -EINVAL;
3859
3860         if (is_mmio_spte(spte)) {
3861                 gfn_t gfn = get_mmio_spte_gfn(spte);
3862                 unsigned int access = get_mmio_spte_access(spte);
3863
3864                 if (!check_mmio_spte(vcpu, spte))
3865                         return RET_PF_INVALID;
3866
3867                 if (direct)
3868                         addr = 0;
3869
3870                 trace_handle_mmio_page_fault(addr, gfn, access);
3871                 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3872                 return RET_PF_EMULATE;
3873         }
3874
3875         /*
3876          * If the page table is zapped by other cpus, let CPU fault again on
3877          * the address.
3878          */
3879         return RET_PF_RETRY;
3880 }
3881
3882 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3883                                          struct kvm_page_fault *fault)
3884 {
3885         if (unlikely(fault->rsvd))
3886                 return false;
3887
3888         if (!fault->present || !fault->write)
3889                 return false;
3890
3891         /*
3892          * guest is writing the page which is write tracked which can
3893          * not be fixed by page fault handler.
3894          */
3895         if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
3896                 return true;
3897
3898         return false;
3899 }
3900
3901 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3902 {
3903         struct kvm_shadow_walk_iterator iterator;
3904         u64 spte;
3905
3906         walk_shadow_page_lockless_begin(vcpu);
3907         for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
3908                 clear_sp_write_flooding_count(iterator.sptep);
3909         walk_shadow_page_lockless_end(vcpu);
3910 }
3911
3912 static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
3913 {
3914         /* make sure the token value is not 0 */
3915         u32 id = vcpu->arch.apf.id;
3916
3917         if (id << 12 == 0)
3918                 vcpu->arch.apf.id = 1;
3919
3920         return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3921 }
3922
3923 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3924                                     gfn_t gfn)
3925 {
3926         struct kvm_arch_async_pf arch;
3927
3928         arch.token = alloc_apf_token(vcpu);
3929         arch.gfn = gfn;
3930         arch.direct_map = vcpu->arch.mmu->direct_map;
3931         arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3932
3933         return kvm_setup_async_pf(vcpu, cr2_or_gpa,
3934                                   kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3935 }
3936
3937 static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r)
3938 {
3939         struct kvm_memory_slot *slot = fault->slot;
3940         bool async;
3941
3942         /*
3943          * Retry the page fault if the gfn hit a memslot that is being deleted
3944          * or moved.  This ensures any existing SPTEs for the old memslot will
3945          * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3946          */
3947         if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3948                 goto out_retry;
3949
3950         if (!kvm_is_visible_memslot(slot)) {
3951                 /* Don't expose private memslots to L2. */
3952                 if (is_guest_mode(vcpu)) {
3953                         fault->slot = NULL;
3954                         fault->pfn = KVM_PFN_NOSLOT;
3955                         fault->map_writable = false;
3956                         return false;
3957                 }
3958                 /*
3959                  * If the APIC access page exists but is disabled, go directly
3960                  * to emulation without caching the MMIO access or creating a
3961                  * MMIO SPTE.  That way the cache doesn't need to be purged
3962                  * when the AVIC is re-enabled.
3963                  */
3964                 if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
3965                     !kvm_apicv_activated(vcpu->kvm)) {
3966                         *r = RET_PF_EMULATE;
3967                         return true;
3968                 }
3969         }
3970
3971         async = false;
3972         fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async,
3973                                           fault->write, &fault->map_writable,
3974                                           &fault->hva);
3975         if (!async)
3976                 return false; /* *pfn has correct page already */
3977
3978         if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
3979                 trace_kvm_try_async_get_page(fault->addr, fault->gfn);
3980                 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
3981                         trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
3982                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3983                         goto out_retry;
3984                 } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn))
3985                         goto out_retry;
3986         }
3987
3988         fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
3989                                           fault->write, &fault->map_writable,
3990                                           &fault->hva);
3991         return false;
3992
3993 out_retry:
3994         *r = RET_PF_RETRY;
3995         return true;
3996 }
3997
3998 /*
3999  * Returns true if the page fault is stale and needs to be retried, i.e. if the
4000  * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4001  */
4002 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
4003                                 struct kvm_page_fault *fault, int mmu_seq)
4004 {
4005         struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
4006
4007         /* Special roots, e.g. pae_root, are not backed by shadow pages. */
4008         if (sp && is_obsolete_sp(vcpu->kvm, sp))
4009                 return true;
4010
4011         /*
4012          * Roots without an associated shadow page are considered invalid if
4013          * there is a pending request to free obsolete roots.  The request is
4014          * only a hint that the current root _may_ be obsolete and needs to be
4015          * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
4016          * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4017          * to reload even if no vCPU is actively using the root.
4018          */
4019         if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4020                 return true;
4021
4022         return fault->slot &&
4023                mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
4024 }
4025
4026 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4027 {
4028         bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
4029
4030         unsigned long mmu_seq;
4031         int r;
4032
4033         fault->gfn = fault->addr >> PAGE_SHIFT;
4034         fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
4035
4036         if (page_fault_handle_page_track(vcpu, fault))
4037                 return RET_PF_EMULATE;
4038
4039         r = fast_page_fault(vcpu, fault);
4040         if (r != RET_PF_INVALID)
4041                 return r;
4042
4043         r = mmu_topup_memory_caches(vcpu, false);
4044         if (r)
4045                 return r;
4046
4047         mmu_seq = vcpu->kvm->mmu_notifier_seq;
4048         smp_rmb();
4049
4050         if (kvm_faultin_pfn(vcpu, fault, &r))
4051                 return r;
4052
4053         if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r))
4054                 return r;
4055
4056         r = RET_PF_RETRY;
4057
4058         if (is_tdp_mmu_fault)
4059                 read_lock(&vcpu->kvm->mmu_lock);
4060         else
4061                 write_lock(&vcpu->kvm->mmu_lock);
4062
4063         if (is_page_fault_stale(vcpu, fault, mmu_seq))
4064                 goto out_unlock;
4065
4066         r = make_mmu_pages_available(vcpu);
4067         if (r)
4068                 goto out_unlock;
4069
4070         if (is_tdp_mmu_fault)
4071                 r = kvm_tdp_mmu_map(vcpu, fault);
4072         else
4073                 r = __direct_map(vcpu, fault);
4074
4075 out_unlock:
4076         if (is_tdp_mmu_fault)
4077                 read_unlock(&vcpu->kvm->mmu_lock);
4078         else
4079                 write_unlock(&vcpu->kvm->mmu_lock);
4080         kvm_release_pfn_clean(fault->pfn);
4081         return r;
4082 }
4083
4084 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4085                                 struct kvm_page_fault *fault)
4086 {
4087         pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
4088
4089         /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4090         fault->max_level = PG_LEVEL_2M;
4091         return direct_page_fault(vcpu, fault);
4092 }
4093
4094 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4095                                 u64 fault_address, char *insn, int insn_len)
4096 {
4097         int r = 1;
4098         u32 flags = vcpu->arch.apf.host_apf_flags;
4099
4100 #ifndef CONFIG_X86_64
4101         /* A 64-bit CR2 should be impossible on 32-bit KVM. */
4102         if (WARN_ON_ONCE(fault_address >> 32))
4103                 return -EFAULT;
4104 #endif
4105
4106         vcpu->arch.l1tf_flush_l1d = true;
4107         if (!flags) {
4108                 trace_kvm_page_fault(fault_address, error_code);
4109
4110                 if (kvm_event_needs_reinjection(vcpu))
4111                         kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4112                 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4113                                 insn_len);
4114         } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4115                 vcpu->arch.apf.host_apf_flags = 0;
4116                 local_irq_disable();
4117                 kvm_async_pf_task_wait_schedule(fault_address);
4118                 local_irq_enable();
4119         } else {
4120                 WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4121         }
4122
4123         return r;
4124 }
4125 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4126
4127 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4128 {
4129         while (fault->max_level > PG_LEVEL_4K) {
4130                 int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
4131                 gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
4132
4133                 if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
4134                         break;
4135
4136                 --fault->max_level;
4137         }
4138
4139         return direct_page_fault(vcpu, fault);
4140 }
4141
4142 static void nonpaging_init_context(struct kvm_mmu *context)
4143 {
4144         context->page_fault = nonpaging_page_fault;
4145         context->gva_to_gpa = nonpaging_gva_to_gpa;
4146         context->sync_page = nonpaging_sync_page;
4147         context->invlpg = NULL;
4148         context->direct_map = true;
4149 }
4150
4151 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4152                                   union kvm_mmu_page_role role)
4153 {
4154         return (role.direct || pgd == root->pgd) &&
4155                VALID_PAGE(root->hpa) &&
4156                role.word == to_shadow_page(root->hpa)->role.word;
4157 }
4158
4159 /*
4160  * Find out if a previously cached root matching the new pgd/role is available,
4161  * and insert the current root as the MRU in the cache.
4162  * If a matching root is found, it is assigned to kvm_mmu->root and
4163  * true is returned.
4164  * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4165  * evicted to make room for the current root, and false is returned.
4166  */
4167 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
4168                                               gpa_t new_pgd,
4169                                               union kvm_mmu_page_role new_role)
4170 {
4171         uint i;
4172
4173         if (is_root_usable(&mmu->root, new_pgd, new_role))
4174                 return true;
4175
4176         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4177                 /*
4178                  * The swaps end up rotating the cache like this:
4179                  *   C   0 1 2 3   (on entry to the function)
4180                  *   0   C 1 2 3
4181                  *   1   C 0 2 3
4182                  *   2   C 0 1 3
4183                  *   3   C 0 1 2   (on exit from the loop)
4184                  */
4185                 swap(mmu->root, mmu->prev_roots[i]);
4186                 if (is_root_usable(&mmu->root, new_pgd, new_role))
4187                         return true;
4188         }
4189
4190         kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4191         return false;
4192 }
4193
4194 /*
4195  * Find out if a previously cached root matching the new pgd/role is available.
4196  * On entry, mmu->root is invalid.
4197  * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
4198  * of the cache becomes invalid, and true is returned.
4199  * If no match is found, kvm_mmu->root is left invalid and false is returned.
4200  */
4201 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
4202                                              gpa_t new_pgd,
4203                                              union kvm_mmu_page_role new_role)
4204 {
4205         uint i;
4206
4207         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4208                 if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
4209                         goto hit;
4210
4211         return false;
4212
4213 hit:
4214         swap(mmu->root, mmu->prev_roots[i]);
4215         /* Bubble up the remaining roots.  */
4216         for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
4217                 mmu->prev_roots[i] = mmu->prev_roots[i + 1];
4218         mmu->prev_roots[i].hpa = INVALID_PAGE;
4219         return true;
4220 }
4221
4222 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
4223                             gpa_t new_pgd, union kvm_mmu_page_role new_role)
4224 {
4225         /*
4226          * For now, limit the caching to 64-bit hosts+VMs in order to avoid
4227          * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4228          * later if necessary.
4229          */
4230         if (VALID_PAGE(mmu->root.hpa) && !to_shadow_page(mmu->root.hpa))
4231                 kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4232
4233         if (VALID_PAGE(mmu->root.hpa))
4234                 return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
4235         else
4236                 return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
4237 }
4238
4239 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4240 {
4241         struct kvm_mmu *mmu = vcpu->arch.mmu;
4242         union kvm_mmu_page_role new_role = mmu->mmu_role.base;
4243
4244         if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
4245                 /* kvm_mmu_ensure_valid_pgd will set up a new root.  */
4246                 return;
4247         }
4248
4249         /*
4250          * It's possible that the cached previous root page is obsolete because
4251          * of a change in the MMU generation number. However, changing the
4252          * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
4253          * which will free the root set here and allocate a new one.
4254          */
4255         kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4256
4257         if (force_flush_and_sync_on_reuse) {
4258                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4259                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4260         }
4261
4262         /*
4263          * The last MMIO access's GVA and GPA are cached in the VCPU. When
4264          * switching to a new CR3, that GVA->GPA mapping may no longer be
4265          * valid. So clear any cached MMIO info even when we don't need to sync
4266          * the shadow page tables.
4267          */
4268         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4269
4270         /*
4271          * If this is a direct root page, it doesn't have a write flooding
4272          * count. Otherwise, clear the write flooding count.
4273          */
4274         if (!new_role.direct)
4275                 __clear_sp_write_flooding_count(
4276                                 to_shadow_page(vcpu->arch.mmu->root.hpa));
4277 }
4278 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4279
4280 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4281 {
4282         return kvm_read_cr3(vcpu);
4283 }
4284
4285 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4286                            unsigned int access)
4287 {
4288         if (unlikely(is_mmio_spte(*sptep))) {
4289                 if (gfn != get_mmio_spte_gfn(*sptep)) {
4290                         mmu_spte_clear_no_track(sptep);
4291                         return true;
4292                 }
4293
4294                 mark_mmio_spte(vcpu, sptep, gfn, access);
4295                 return true;
4296         }
4297
4298         return false;
4299 }
4300
4301 #define PTTYPE_EPT 18 /* arbitrary */
4302 #define PTTYPE PTTYPE_EPT
4303 #include "paging_tmpl.h"
4304 #undef PTTYPE
4305
4306 #define PTTYPE 64
4307 #include "paging_tmpl.h"
4308 #undef PTTYPE
4309
4310 #define PTTYPE 32
4311 #include "paging_tmpl.h"
4312 #undef PTTYPE
4313
4314 static void
4315 __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4316                         u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4317                         bool pse, bool amd)
4318 {
4319         u64 gbpages_bit_rsvd = 0;
4320         u64 nonleaf_bit8_rsvd = 0;
4321         u64 high_bits_rsvd;
4322
4323         rsvd_check->bad_mt_xwr = 0;
4324
4325         if (!gbpages)
4326                 gbpages_bit_rsvd = rsvd_bits(7, 7);
4327
4328         if (level == PT32E_ROOT_LEVEL)
4329                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4330         else
4331                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4332
4333         /* Note, NX doesn't exist in PDPTEs, this is handled below. */
4334         if (!nx)
4335                 high_bits_rsvd |= rsvd_bits(63, 63);
4336
4337         /*
4338          * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4339          * leaf entries) on AMD CPUs only.
4340          */
4341         if (amd)
4342                 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4343
4344         switch (level) {
4345         case PT32_ROOT_LEVEL:
4346                 /* no rsvd bits for 2 level 4K page table entries */
4347                 rsvd_check->rsvd_bits_mask[0][1] = 0;
4348                 rsvd_check->rsvd_bits_mask[0][0] = 0;
4349                 rsvd_check->rsvd_bits_mask[1][0] =
4350                         rsvd_check->rsvd_bits_mask[0][0];
4351
4352                 if (!pse) {
4353                         rsvd_check->rsvd_bits_mask[1][1] = 0;
4354                         break;
4355                 }
4356
4357                 if (is_cpuid_PSE36())
4358                         /* 36bits PSE 4MB page */
4359                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4360                 else
4361                         /* 32 bits PSE 4MB page */
4362                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4363                 break;
4364         case PT32E_ROOT_LEVEL:
4365                 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
4366                                                    high_bits_rsvd |
4367                                                    rsvd_bits(5, 8) |
4368                                                    rsvd_bits(1, 2);     /* PDPTE */
4369                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;      /* PDE */
4370                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;      /* PTE */
4371                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4372                                                    rsvd_bits(13, 20);   /* large page */
4373                 rsvd_check->rsvd_bits_mask[1][0] =
4374                         rsvd_check->rsvd_bits_mask[0][0];
4375                 break;
4376         case PT64_ROOT_5LEVEL:
4377                 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
4378                                                    nonleaf_bit8_rsvd |
4379                                                    rsvd_bits(7, 7);
4380                 rsvd_check->rsvd_bits_mask[1][4] =
4381                         rsvd_check->rsvd_bits_mask[0][4];
4382                 fallthrough;
4383         case PT64_ROOT_4LEVEL:
4384                 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
4385                                                    nonleaf_bit8_rsvd |
4386                                                    rsvd_bits(7, 7);
4387                 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
4388                                                    gbpages_bit_rsvd;
4389                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4390                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4391                 rsvd_check->rsvd_bits_mask[1][3] =
4392                         rsvd_check->rsvd_bits_mask[0][3];
4393                 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
4394                                                    gbpages_bit_rsvd |
4395                                                    rsvd_bits(13, 29);
4396                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4397                                                    rsvd_bits(13, 20); /* large page */
4398                 rsvd_check->rsvd_bits_mask[1][0] =
4399                         rsvd_check->rsvd_bits_mask[0][0];
4400                 break;
4401         }
4402 }
4403
4404 static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
4405 {
4406         /*
4407          * If TDP is enabled, let the guest use GBPAGES if they're supported in
4408          * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
4409          * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
4410          * walk for performance and complexity reasons.  Not to mention KVM
4411          * _can't_ solve the problem because GVA->GPA walks aren't visible to
4412          * KVM once a TDP translation is installed.  Mimic hardware behavior so
4413          * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
4414          */
4415         return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
4416                              guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
4417 }
4418
4419 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4420                                   struct kvm_mmu *context)
4421 {
4422         __reset_rsvds_bits_mask(&context->guest_rsvd_check,
4423                                 vcpu->arch.reserved_gpa_bits,
4424                                 context->root_level, is_efer_nx(context),
4425                                 guest_can_use_gbpages(vcpu),
4426                                 is_cr4_pse(context),
4427                                 guest_cpuid_is_amd_or_hygon(vcpu));
4428 }
4429
4430 static void
4431 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4432                             u64 pa_bits_rsvd, bool execonly, int huge_page_level)
4433 {
4434         u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4435         u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
4436         u64 bad_mt_xwr;
4437
4438         if (huge_page_level < PG_LEVEL_1G)
4439                 large_1g_rsvd = rsvd_bits(7, 7);
4440         if (huge_page_level < PG_LEVEL_2M)
4441                 large_2m_rsvd = rsvd_bits(7, 7);
4442
4443         rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
4444         rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
4445         rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
4446         rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
4447         rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4448
4449         /* large page */
4450         rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4451         rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4452         rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
4453         rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
4454         rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4455
4456         bad_mt_xwr = 0xFFull << (2 * 8);        /* bits 3..5 must not be 2 */
4457         bad_mt_xwr |= 0xFFull << (3 * 8);       /* bits 3..5 must not be 3 */
4458         bad_mt_xwr |= 0xFFull << (7 * 8);       /* bits 3..5 must not be 7 */
4459         bad_mt_xwr |= REPEAT_BYTE(1ull << 2);   /* bits 0..2 must not be 010 */
4460         bad_mt_xwr |= REPEAT_BYTE(1ull << 6);   /* bits 0..2 must not be 110 */
4461         if (!execonly) {
4462                 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
4463                 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4464         }
4465         rsvd_check->bad_mt_xwr = bad_mt_xwr;
4466 }
4467
4468 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4469                 struct kvm_mmu *context, bool execonly, int huge_page_level)
4470 {
4471         __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4472                                     vcpu->arch.reserved_gpa_bits, execonly,
4473                                     huge_page_level);
4474 }
4475
4476 static inline u64 reserved_hpa_bits(void)
4477 {
4478         return rsvd_bits(shadow_phys_bits, 63);
4479 }
4480
4481 /*
4482  * the page table on host is the shadow page table for the page
4483  * table in guest or amd nested guest, its mmu features completely
4484  * follow the features in guest.
4485  */
4486 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4487                                         struct kvm_mmu *context)
4488 {
4489         /*
4490          * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4491          * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4492          * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4493          * The iTLB multi-hit workaround can be toggled at any time, so assume
4494          * NX can be used by any non-nested shadow MMU to avoid having to reset
4495          * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4496          */
4497         bool uses_nx = is_efer_nx(context) || !tdp_enabled;
4498
4499         /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
4500         bool is_amd = true;
4501         /* KVM doesn't use 2-level page tables for the shadow MMU. */
4502         bool is_pse = false;
4503         struct rsvd_bits_validate *shadow_zero_check;
4504         int i;
4505
4506         WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
4507
4508         shadow_zero_check = &context->shadow_zero_check;
4509         __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4510                                 context->shadow_root_level, uses_nx,
4511                                 guest_can_use_gbpages(vcpu), is_pse, is_amd);
4512
4513         if (!shadow_me_mask)
4514                 return;
4515
4516         for (i = context->shadow_root_level; --i >= 0;) {
4517                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4518                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4519         }
4520
4521 }
4522
4523 static inline bool boot_cpu_is_amd(void)
4524 {
4525         WARN_ON_ONCE(!tdp_enabled);
4526         return shadow_x_mask == 0;
4527 }
4528
4529 /*
4530  * the direct page table on host, use as much mmu features as
4531  * possible, however, kvm currently does not do execution-protection.
4532  */
4533 static void
4534 reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
4535 {
4536         struct rsvd_bits_validate *shadow_zero_check;
4537         int i;
4538
4539         shadow_zero_check = &context->shadow_zero_check;
4540
4541         if (boot_cpu_is_amd())
4542                 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4543                                         context->shadow_root_level, false,
4544                                         boot_cpu_has(X86_FEATURE_GBPAGES),
4545                                         false, true);
4546         else
4547                 __reset_rsvds_bits_mask_ept(shadow_zero_check,
4548                                             reserved_hpa_bits(), false,
4549                                             max_huge_page_level);
4550
4551         if (!shadow_me_mask)
4552                 return;
4553
4554         for (i = context->shadow_root_level; --i >= 0;) {
4555                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4556                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4557         }
4558 }
4559
4560 /*
4561  * as the comments in reset_shadow_zero_bits_mask() except it
4562  * is the shadow page table for intel nested guest.
4563  */
4564 static void
4565 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
4566 {
4567         __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4568                                     reserved_hpa_bits(), execonly,
4569                                     max_huge_page_level);
4570 }
4571
4572 #define BYTE_MASK(access) \
4573         ((1 & (access) ? 2 : 0) | \
4574          (2 & (access) ? 4 : 0) | \
4575          (3 & (access) ? 8 : 0) | \
4576          (4 & (access) ? 16 : 0) | \
4577          (5 & (access) ? 32 : 0) | \
4578          (6 & (access) ? 64 : 0) | \
4579          (7 & (access) ? 128 : 0))
4580
4581
4582 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4583 {
4584         unsigned byte;
4585
4586         const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4587         const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4588         const u8 u = BYTE_MASK(ACC_USER_MASK);
4589
4590         bool cr4_smep = is_cr4_smep(mmu);
4591         bool cr4_smap = is_cr4_smap(mmu);
4592         bool cr0_wp = is_cr0_wp(mmu);
4593         bool efer_nx = is_efer_nx(mmu);
4594
4595         for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4596                 unsigned pfec = byte << 1;
4597
4598                 /*
4599                  * Each "*f" variable has a 1 bit for each UWX value
4600                  * that causes a fault with the given PFEC.
4601                  */
4602
4603                 /* Faults from writes to non-writable pages */
4604                 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4605                 /* Faults from user mode accesses to supervisor pages */
4606                 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4607                 /* Faults from fetches of non-executable pages*/
4608                 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4609                 /* Faults from kernel mode fetches of user pages */
4610                 u8 smepf = 0;
4611                 /* Faults from kernel mode accesses of user pages */
4612                 u8 smapf = 0;
4613
4614                 if (!ept) {
4615                         /* Faults from kernel mode accesses to user pages */
4616                         u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4617
4618                         /* Not really needed: !nx will cause pte.nx to fault */
4619                         if (!efer_nx)
4620                                 ff = 0;
4621
4622                         /* Allow supervisor writes if !cr0.wp */
4623                         if (!cr0_wp)
4624                                 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4625
4626                         /* Disallow supervisor fetches of user code if cr4.smep */
4627                         if (cr4_smep)
4628                                 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4629
4630                         /*
4631                          * SMAP:kernel-mode data accesses from user-mode
4632                          * mappings should fault. A fault is considered
4633                          * as a SMAP violation if all of the following
4634                          * conditions are true:
4635                          *   - X86_CR4_SMAP is set in CR4
4636                          *   - A user page is accessed
4637                          *   - The access is not a fetch
4638                          *   - The access is supervisor mode
4639                          *   - If implicit supervisor access or X86_EFLAGS_AC is clear
4640                          *
4641                          * Here, we cover the first four conditions.
4642                          * The fifth is computed dynamically in permission_fault();
4643                          * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4644                          * *not* subject to SMAP restrictions.
4645                          */
4646                         if (cr4_smap)
4647                                 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4648                 }
4649
4650                 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4651         }
4652 }
4653
4654 /*
4655 * PKU is an additional mechanism by which the paging controls access to
4656 * user-mode addresses based on the value in the PKRU register.  Protection
4657 * key violations are reported through a bit in the page fault error code.
4658 * Unlike other bits of the error code, the PK bit is not known at the
4659 * call site of e.g. gva_to_gpa; it must be computed directly in
4660 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4661 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4662 *
4663 * In particular the following conditions come from the error code, the
4664 * page tables and the machine state:
4665 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4666 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4667 * - PK is always zero if U=0 in the page tables
4668 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4669 *
4670 * The PKRU bitmask caches the result of these four conditions.  The error
4671 * code (minus the P bit) and the page table's U bit form an index into the
4672 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4673 * with the two bits of the PKRU register corresponding to the protection key.
4674 * For the first three conditions above the bits will be 00, thus masking
4675 * away both AD and WD.  For all reads or if the last condition holds, WD
4676 * only will be masked away.
4677 */
4678 static void update_pkru_bitmask(struct kvm_mmu *mmu)
4679 {
4680         unsigned bit;
4681         bool wp;
4682
4683         mmu->pkru_mask = 0;
4684
4685         if (!is_cr4_pke(mmu))
4686                 return;
4687
4688         wp = is_cr0_wp(mmu);
4689
4690         for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4691                 unsigned pfec, pkey_bits;
4692                 bool check_pkey, check_write, ff, uf, wf, pte_user;
4693
4694                 pfec = bit << 1;
4695                 ff = pfec & PFERR_FETCH_MASK;
4696                 uf = pfec & PFERR_USER_MASK;
4697                 wf = pfec & PFERR_WRITE_MASK;
4698
4699                 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
4700                 pte_user = pfec & PFERR_RSVD_MASK;
4701
4702                 /*
4703                  * Only need to check the access which is not an
4704                  * instruction fetch and is to a user page.
4705                  */
4706                 check_pkey = (!ff && pte_user);
4707                 /*
4708                  * write access is controlled by PKRU if it is a
4709                  * user access or CR0.WP = 1.
4710                  */
4711                 check_write = check_pkey && wf && (uf || wp);
4712
4713                 /* PKRU.AD stops both read and write access. */
4714                 pkey_bits = !!check_pkey;
4715                 /* PKRU.WD stops write access. */
4716                 pkey_bits |= (!!check_write) << 1;
4717
4718                 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4719         }
4720 }
4721
4722 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
4723                                         struct kvm_mmu *mmu)
4724 {
4725         if (!is_cr0_pg(mmu))
4726                 return;
4727
4728         reset_rsvds_bits_mask(vcpu, mmu);
4729         update_permission_bitmask(mmu, false);
4730         update_pkru_bitmask(mmu);
4731 }
4732
4733 static void paging64_init_context(struct kvm_mmu *context)
4734 {
4735         context->page_fault = paging64_page_fault;
4736         context->gva_to_gpa = paging64_gva_to_gpa;
4737         context->sync_page = paging64_sync_page;
4738         context->invlpg = paging64_invlpg;
4739         context->direct_map = false;
4740 }
4741
4742 static void paging32_init_context(struct kvm_mmu *context)
4743 {
4744         context->page_fault = paging32_page_fault;
4745         context->gva_to_gpa = paging32_gva_to_gpa;
4746         context->sync_page = paging32_sync_page;
4747         context->invlpg = paging32_invlpg;
4748         context->direct_map = false;
4749 }
4750
4751 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4752                                                    const struct kvm_mmu_role_regs *regs)
4753 {
4754         union kvm_mmu_role role = {0};
4755
4756         role.base.access = ACC_ALL;
4757         if (____is_cr0_pg(regs)) {
4758                 role.ext.cr0_pg = 1;
4759                 role.base.efer_nx = ____is_efer_nx(regs);
4760                 role.base.cr0_wp = ____is_cr0_wp(regs);
4761
4762                 role.ext.cr4_pae = ____is_cr4_pae(regs);
4763                 role.ext.cr4_smep = ____is_cr4_smep(regs);
4764                 role.ext.cr4_smap = ____is_cr4_smap(regs);
4765                 role.ext.cr4_pse = ____is_cr4_pse(regs);
4766
4767                 /* PKEY and LA57 are active iff long mode is active. */
4768                 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
4769                 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4770                 role.ext.efer_lma = ____is_efer_lma(regs);
4771         }
4772         role.base.smm = is_smm(vcpu);
4773         role.base.guest_mode = is_guest_mode(vcpu);
4774         role.ext.valid = 1;
4775
4776         return role;
4777 }
4778
4779 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4780 {
4781         /* tdp_root_level is architecture forced level, use it if nonzero */
4782         if (tdp_root_level)
4783                 return tdp_root_level;
4784
4785         /* Use 5-level TDP if and only if it's useful/necessary. */
4786         if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4787                 return 4;
4788
4789         return max_tdp_level;
4790 }
4791
4792 static union kvm_mmu_role
4793 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
4794                                 const struct kvm_mmu_role_regs *regs)
4795 {
4796         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
4797
4798         role.base.ad_disabled = (shadow_accessed_mask == 0);
4799         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4800         role.base.direct = true;
4801         role.base.has_4_byte_gpte = false;
4802
4803         return role;
4804 }
4805
4806 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
4807                              const struct kvm_mmu_role_regs *regs)
4808 {
4809         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4810         union kvm_mmu_role new_role =
4811                 kvm_calc_tdp_mmu_root_page_role(vcpu, regs);
4812
4813         if (new_role.as_u64 == context->mmu_role.as_u64)
4814                 return;
4815
4816         context->mmu_role.as_u64 = new_role.as_u64;
4817         context->page_fault = kvm_tdp_page_fault;
4818         context->sync_page = nonpaging_sync_page;
4819         context->invlpg = NULL;
4820         context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4821         context->direct_map = true;
4822         context->get_guest_pgd = get_cr3;
4823         context->get_pdptr = kvm_pdptr_read;
4824         context->inject_page_fault = kvm_inject_page_fault;
4825         context->root_level = role_regs_to_root_level(regs);
4826
4827         if (!is_cr0_pg(context))
4828                 context->gva_to_gpa = nonpaging_gva_to_gpa;
4829         else if (is_cr4_pae(context))
4830                 context->gva_to_gpa = paging64_gva_to_gpa;
4831         else
4832                 context->gva_to_gpa = paging32_gva_to_gpa;
4833
4834         reset_guest_paging_metadata(vcpu, context);
4835         reset_tdp_shadow_zero_bits_mask(context);
4836 }
4837
4838 static union kvm_mmu_role
4839 kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
4840                                       const struct kvm_mmu_role_regs *regs)
4841 {
4842         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
4843
4844         role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
4845         role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4846         role.base.has_4_byte_gpte = ____is_cr0_pg(regs) && !____is_cr4_pae(regs);
4847
4848         return role;
4849 }
4850
4851 static union kvm_mmu_role
4852 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
4853                                    const struct kvm_mmu_role_regs *regs)
4854 {
4855         union kvm_mmu_role role =
4856                 kvm_calc_shadow_root_page_role_common(vcpu, regs);
4857
4858         role.base.direct = !____is_cr0_pg(regs);
4859
4860         if (!____is_efer_lma(regs))
4861                 role.base.level = PT32E_ROOT_LEVEL;
4862         else if (____is_cr4_la57(regs))
4863                 role.base.level = PT64_ROOT_5LEVEL;
4864         else
4865                 role.base.level = PT64_ROOT_4LEVEL;
4866
4867         return role;
4868 }
4869
4870 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4871                                     const struct kvm_mmu_role_regs *regs,
4872                                     union kvm_mmu_role new_role)
4873 {
4874         if (new_role.as_u64 == context->mmu_role.as_u64)
4875                 return;
4876
4877         context->mmu_role.as_u64 = new_role.as_u64;
4878
4879         if (!is_cr0_pg(context))
4880                 nonpaging_init_context(context);
4881         else if (is_cr4_pae(context))
4882                 paging64_init_context(context);
4883         else
4884                 paging32_init_context(context);
4885         context->root_level = role_regs_to_root_level(regs);
4886
4887         reset_guest_paging_metadata(vcpu, context);
4888         context->shadow_root_level = new_role.base.level;
4889
4890         reset_shadow_zero_bits_mask(vcpu, context);
4891 }
4892
4893 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4894                                 const struct kvm_mmu_role_regs *regs)
4895 {
4896         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4897         union kvm_mmu_role new_role =
4898                 kvm_calc_shadow_mmu_root_page_role(vcpu, regs);
4899
4900         shadow_mmu_init_context(vcpu, context, regs, new_role);
4901 }
4902
4903 static union kvm_mmu_role
4904 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
4905                                    const struct kvm_mmu_role_regs *regs)
4906 {
4907         union kvm_mmu_role role =
4908                 kvm_calc_shadow_root_page_role_common(vcpu, regs);
4909
4910         role.base.direct = false;
4911         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4912
4913         return role;
4914 }
4915
4916 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4917                              unsigned long cr4, u64 efer, gpa_t nested_cr3)
4918 {
4919         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4920         struct kvm_mmu_role_regs regs = {
4921                 .cr0 = cr0,
4922                 .cr4 = cr4 & ~X86_CR4_PKE,
4923                 .efer = efer,
4924         };
4925         union kvm_mmu_role new_role;
4926
4927         new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4928
4929         shadow_mmu_init_context(vcpu, context, &regs, new_role);
4930         kvm_mmu_new_pgd(vcpu, nested_cr3);
4931 }
4932 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4933
4934 static union kvm_mmu_role
4935 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4936                                    bool execonly, u8 level)
4937 {
4938         union kvm_mmu_role role = {0};
4939
4940         /*
4941          * KVM does not support SMM transfer monitors, and consequently does not
4942          * support the "entry to SMM" control either.  role.base.smm is always 0.
4943          */
4944         WARN_ON_ONCE(is_smm(vcpu));
4945         role.base.level = level;
4946         role.base.has_4_byte_gpte = false;
4947         role.base.direct = false;
4948         role.base.ad_disabled = !accessed_dirty;
4949         role.base.guest_mode = true;
4950         role.base.access = ACC_ALL;
4951
4952         /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4953         role.ext.word = 0;
4954         role.ext.execonly = execonly;
4955         role.ext.valid = 1;
4956
4957         return role;
4958 }
4959
4960 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4961                              int huge_page_level, bool accessed_dirty,
4962                              gpa_t new_eptp)
4963 {
4964         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4965         u8 level = vmx_eptp_page_walk_level(new_eptp);
4966         union kvm_mmu_role new_role =
4967                 kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4968                                                    execonly, level);
4969
4970         if (new_role.as_u64 != context->mmu_role.as_u64) {
4971                 context->mmu_role.as_u64 = new_role.as_u64;
4972
4973                 context->shadow_root_level = level;
4974
4975                 context->ept_ad = accessed_dirty;
4976                 context->page_fault = ept_page_fault;
4977                 context->gva_to_gpa = ept_gva_to_gpa;
4978                 context->sync_page = ept_sync_page;
4979                 context->invlpg = ept_invlpg;
4980                 context->root_level = level;
4981                 context->direct_map = false;
4982                 update_permission_bitmask(context, true);
4983                 context->pkru_mask = 0;
4984                 reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
4985                 reset_ept_shadow_zero_bits_mask(context, execonly);
4986         }
4987
4988         kvm_mmu_new_pgd(vcpu, new_eptp);
4989 }
4990 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4991
4992 static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
4993                              const struct kvm_mmu_role_regs *regs)
4994 {
4995         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4996
4997         kvm_init_shadow_mmu(vcpu, regs);
4998
4999         context->get_guest_pgd     = get_cr3;
5000         context->get_pdptr         = kvm_pdptr_read;
5001         context->inject_page_fault = kvm_inject_page_fault;
5002 }
5003
5004 static union kvm_mmu_role
5005 kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
5006 {
5007         union kvm_mmu_role role;
5008
5009         role = kvm_calc_shadow_root_page_role_common(vcpu, regs);
5010
5011         /*
5012          * Nested MMUs are used only for walking L2's gva->gpa, they never have
5013          * shadow pages of their own and so "direct" has no meaning.   Set it
5014          * to "true" to try to detect bogus usage of the nested MMU.
5015          */
5016         role.base.direct = true;
5017         role.base.level = role_regs_to_root_level(regs);
5018         return role;
5019 }
5020
5021 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
5022                                 const struct kvm_mmu_role_regs *regs)
5023 {
5024         union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, regs);
5025         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5026
5027         if (new_role.as_u64 == g_context->mmu_role.as_u64)
5028                 return;
5029
5030         g_context->mmu_role.as_u64 = new_role.as_u64;
5031         g_context->get_guest_pgd     = get_cr3;
5032         g_context->get_pdptr         = kvm_pdptr_read;
5033         g_context->inject_page_fault = kvm_inject_page_fault;
5034         g_context->root_level        = new_role.base.level;
5035
5036         /*
5037          * L2 page tables are never shadowed, so there is no need to sync
5038          * SPTEs.
5039          */
5040         g_context->invlpg            = NULL;
5041
5042         /*
5043          * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5044          * L1's nested page tables (e.g. EPT12). The nested translation
5045          * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5046          * L2's page tables as the first level of translation and L1's
5047          * nested page tables as the second level of translation. Basically
5048          * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5049          */
5050         if (!is_paging(vcpu))
5051                 g_context->gva_to_gpa = nonpaging_gva_to_gpa;
5052         else if (is_long_mode(vcpu))
5053                 g_context->gva_to_gpa = paging64_gva_to_gpa;
5054         else if (is_pae(vcpu))
5055                 g_context->gva_to_gpa = paging64_gva_to_gpa;
5056         else
5057                 g_context->gva_to_gpa = paging32_gva_to_gpa;
5058
5059         reset_guest_paging_metadata(vcpu, g_context);
5060 }
5061
5062 void kvm_init_mmu(struct kvm_vcpu *vcpu)
5063 {
5064         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
5065
5066         if (mmu_is_nested(vcpu))
5067                 init_kvm_nested_mmu(vcpu, &regs);
5068         else if (tdp_enabled)
5069                 init_kvm_tdp_mmu(vcpu, &regs);
5070         else
5071                 init_kvm_softmmu(vcpu, &regs);
5072 }
5073 EXPORT_SYMBOL_GPL(kvm_init_mmu);
5074
5075 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
5076 {
5077         /*
5078          * Invalidate all MMU roles to force them to reinitialize as CPUID
5079          * information is factored into reserved bit calculations.
5080          *
5081          * Correctly handling multiple vCPU models with respect to paging and
5082          * physical address properties) in a single VM would require tracking
5083          * all relevant CPUID information in kvm_mmu_page_role. That is very
5084          * undesirable as it would increase the memory requirements for
5085          * gfn_track (see struct kvm_mmu_page_role comments).  For now that
5086          * problem is swept under the rug; KVM's CPUID API is horrific and
5087          * it's all but impossible to solve it without introducing a new API.
5088          */
5089         vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
5090         vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
5091         vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
5092         kvm_mmu_reset_context(vcpu);
5093
5094         /*
5095          * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
5096          * kvm_arch_vcpu_ioctl().
5097          */
5098         KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
5099 }
5100
5101 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5102 {
5103         kvm_mmu_unload(vcpu);
5104         kvm_init_mmu(vcpu);
5105 }
5106 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5107
5108 int kvm_mmu_load(struct kvm_vcpu *vcpu)
5109 {
5110         int r;
5111
5112         r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
5113         if (r)
5114                 goto out;
5115         r = mmu_alloc_special_roots(vcpu);
5116         if (r)
5117                 goto out;
5118         if (vcpu->arch.mmu->direct_map)
5119                 r = mmu_alloc_direct_roots(vcpu);
5120         else
5121                 r = mmu_alloc_shadow_roots(vcpu);
5122         if (r)
5123                 goto out;
5124
5125         kvm_mmu_sync_roots(vcpu);
5126
5127         kvm_mmu_load_pgd(vcpu);
5128
5129         /*
5130          * Flush any TLB entries for the new root, the provenance of the root
5131          * is unknown.  Even if KVM ensures there are no stale TLB entries
5132          * for a freed root, in theory another hypervisor could have left
5133          * stale entries.  Flushing on alloc also allows KVM to skip the TLB
5134          * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5135          */
5136         static_call(kvm_x86_flush_tlb_current)(vcpu);
5137 out:
5138         return r;
5139 }
5140
5141 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5142 {
5143         struct kvm *kvm = vcpu->kvm;
5144
5145         kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5146         WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5147         kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5148         WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5149         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5150 }
5151
5152 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
5153 {
5154         struct kvm_mmu_page *sp;
5155
5156         if (!VALID_PAGE(root_hpa))
5157                 return false;
5158
5159         /*
5160          * When freeing obsolete roots, treat roots as obsolete if they don't
5161          * have an associated shadow page.  This does mean KVM will get false
5162          * positives and free roots that don't strictly need to be freed, but
5163          * such false positives are relatively rare:
5164          *
5165          *  (a) only PAE paging and nested NPT has roots without shadow pages
5166          *  (b) remote reloads due to a memslot update obsoletes _all_ roots
5167          *  (c) KVM doesn't track previous roots for PAE paging, and the guest
5168          *      is unlikely to zap an in-use PGD.
5169          */
5170         sp = to_shadow_page(root_hpa);
5171         return !sp || is_obsolete_sp(kvm, sp);
5172 }
5173
5174 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
5175 {
5176         unsigned long roots_to_free = 0;
5177         int i;
5178
5179         if (is_obsolete_root(kvm, mmu->root.hpa))
5180                 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5181
5182         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5183                 if (is_obsolete_root(kvm, mmu->root.hpa))
5184                         roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5185         }
5186
5187         if (roots_to_free)
5188                 kvm_mmu_free_roots(kvm, mmu, roots_to_free);
5189 }
5190
5191 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
5192 {
5193         __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5194         __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5195 }
5196
5197 static bool need_remote_flush(u64 old, u64 new)
5198 {
5199         if (!is_shadow_present_pte(old))
5200                 return false;
5201         if (!is_shadow_present_pte(new))
5202                 return true;
5203         if ((old ^ new) & PT64_BASE_ADDR_MASK)
5204                 return true;
5205         old ^= shadow_nx_mask;
5206         new ^= shadow_nx_mask;
5207         return (old & ~new & PT64_PERM_MASK) != 0;
5208 }
5209
5210 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5211                                     int *bytes)
5212 {
5213         u64 gentry = 0;
5214         int r;
5215
5216         /*
5217          * Assume that the pte write on a page table of the same type
5218          * as the current vcpu paging mode since we update the sptes only
5219          * when they have the same mode.
5220          */
5221         if (is_pae(vcpu) && *bytes == 4) {
5222                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5223                 *gpa &= ~(gpa_t)7;
5224                 *bytes = 8;
5225         }
5226
5227         if (*bytes == 4 || *bytes == 8) {
5228                 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5229                 if (r)
5230                         gentry = 0;
5231         }
5232
5233         return gentry;
5234 }
5235
5236 /*
5237  * If we're seeing too many writes to a page, it may no longer be a page table,
5238  * or we may be forking, in which case it is better to unmap the page.
5239  */
5240 static bool detect_write_flooding(struct kvm_mmu_page *sp)
5241 {
5242         /*
5243          * Skip write-flooding detected for the sp whose level is 1, because
5244          * it can become unsync, then the guest page is not write-protected.
5245          */
5246         if (sp->role.level == PG_LEVEL_4K)
5247                 return false;
5248
5249         atomic_inc(&sp->write_flooding_count);
5250         return atomic_read(&sp->write_flooding_count) >= 3;
5251 }
5252
5253 /*
5254  * Misaligned accesses are too much trouble to fix up; also, they usually
5255  * indicate a page is not used as a page table.
5256  */
5257 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5258                                     int bytes)
5259 {
5260         unsigned offset, pte_size, misaligned;
5261
5262         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5263                  gpa, bytes, sp->role.word);
5264
5265         offset = offset_in_page(gpa);
5266         pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5267
5268         /*
5269          * Sometimes, the OS only writes the last one bytes to update status
5270          * bits, for example, in linux, andb instruction is used in clear_bit().
5271          */
5272         if (!(offset & (pte_size - 1)) && bytes == 1)
5273                 return false;
5274
5275         misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5276         misaligned |= bytes < 4;
5277
5278         return misaligned;
5279 }
5280
5281 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5282 {
5283         unsigned page_offset, quadrant;
5284         u64 *spte;
5285         int level;
5286
5287         page_offset = offset_in_page(gpa);
5288         level = sp->role.level;
5289         *nspte = 1;
5290         if (sp->role.has_4_byte_gpte) {
5291                 page_offset <<= 1;      /* 32->64 */
5292                 /*
5293                  * A 32-bit pde maps 4MB while the shadow pdes map
5294                  * only 2MB.  So we need to double the offset again
5295                  * and zap two pdes instead of one.
5296                  */
5297                 if (level == PT32_ROOT_LEVEL) {
5298                         page_offset &= ~7; /* kill rounding error */
5299                         page_offset <<= 1;
5300                         *nspte = 2;
5301                 }
5302                 quadrant = page_offset >> PAGE_SHIFT;
5303                 page_offset &= ~PAGE_MASK;
5304                 if (quadrant != sp->role.quadrant)
5305                         return NULL;
5306         }
5307
5308         spte = &sp->spt[page_offset / sizeof(*spte)];
5309         return spte;
5310 }
5311
5312 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5313                               const u8 *new, int bytes,
5314                               struct kvm_page_track_notifier_node *node)
5315 {
5316         gfn_t gfn = gpa >> PAGE_SHIFT;
5317         struct kvm_mmu_page *sp;
5318         LIST_HEAD(invalid_list);
5319         u64 entry, gentry, *spte;
5320         int npte;
5321         bool flush = false;
5322
5323         /*
5324          * If we don't have indirect shadow pages, it means no page is
5325          * write-protected, so we can exit simply.
5326          */
5327         if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5328                 return;
5329
5330         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5331
5332         /*
5333          * No need to care whether allocation memory is successful
5334          * or not since pte prefetch is skipped if it does not have
5335          * enough objects in the cache.
5336          */
5337         mmu_topup_memory_caches(vcpu, true);
5338
5339         write_lock(&vcpu->kvm->mmu_lock);
5340
5341         gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5342
5343         ++vcpu->kvm->stat.mmu_pte_write;
5344
5345         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5346                 if (detect_write_misaligned(sp, gpa, bytes) ||
5347                       detect_write_flooding(sp)) {
5348                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5349                         ++vcpu->kvm->stat.mmu_flooded;
5350                         continue;
5351                 }
5352
5353                 spte = get_written_sptes(sp, gpa, &npte);
5354                 if (!spte)
5355                         continue;
5356
5357                 while (npte--) {
5358                         entry = *spte;
5359                         mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5360                         if (gentry && sp->role.level != PG_LEVEL_4K)
5361                                 ++vcpu->kvm->stat.mmu_pde_zapped;
5362                         if (need_remote_flush(entry, *spte))
5363                                 flush = true;
5364                         ++spte;
5365                 }
5366         }
5367         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5368         write_unlock(&vcpu->kvm->mmu_lock);
5369 }
5370
5371 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5372                        void *insn, int insn_len)
5373 {
5374         int r, emulation_type = EMULTYPE_PF;
5375         bool direct = vcpu->arch.mmu->direct_map;
5376
5377         if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5378                 return RET_PF_RETRY;
5379
5380         r = RET_PF_INVALID;
5381         if (unlikely(error_code & PFERR_RSVD_MASK)) {
5382                 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5383                 if (r == RET_PF_EMULATE)
5384                         goto emulate;
5385         }
5386
5387         if (r == RET_PF_INVALID) {
5388                 r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
5389                                           lower_32_bits(error_code), false);
5390                 if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
5391                         return -EIO;
5392         }
5393
5394         if (r < 0)
5395                 return r;
5396         if (r != RET_PF_EMULATE)
5397                 return 1;
5398
5399         /*
5400          * Before emulating the instruction, check if the error code
5401          * was due to a RO violation while translating the guest page.
5402          * This can occur when using nested virtualization with nested
5403          * paging in both guests. If true, we simply unprotect the page
5404          * and resume the guest.
5405          */
5406         if (vcpu->arch.mmu->direct_map &&
5407             (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5408                 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5409                 return 1;
5410         }
5411
5412         /*
5413          * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5414          * optimistically try to just unprotect the page and let the processor
5415          * re-execute the instruction that caused the page fault.  Do not allow
5416          * retrying MMIO emulation, as it's not only pointless but could also
5417          * cause us to enter an infinite loop because the processor will keep
5418          * faulting on the non-existent MMIO address.  Retrying an instruction
5419          * from a nested guest is also pointless and dangerous as we are only
5420          * explicitly shadowing L1's page tables, i.e. unprotecting something
5421          * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5422          */
5423         if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5424                 emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5425 emulate:
5426         return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5427                                        insn_len);
5428 }
5429 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5430
5431 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5432                             gva_t gva, hpa_t root_hpa)
5433 {
5434         int i;
5435
5436         /* It's actually a GPA for vcpu->arch.guest_mmu.  */
5437         if (mmu != &vcpu->arch.guest_mmu) {
5438                 /* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5439                 if (is_noncanonical_address(gva, vcpu))
5440                         return;
5441
5442                 static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
5443         }
5444
5445         if (!mmu->invlpg)
5446                 return;
5447
5448         if (root_hpa == INVALID_PAGE) {
5449                 mmu->invlpg(vcpu, gva, mmu->root.hpa);
5450
5451                 /*
5452                  * INVLPG is required to invalidate any global mappings for the VA,
5453                  * irrespective of PCID. Since it would take us roughly similar amount
5454                  * of work to determine whether any of the prev_root mappings of the VA
5455                  * is marked global, or to just sync it blindly, so we might as well
5456                  * just always sync it.
5457                  *
5458                  * Mappings not reachable via the current cr3 or the prev_roots will be
5459                  * synced when switching to that cr3, so nothing needs to be done here
5460                  * for them.
5461                  */
5462                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5463                         if (VALID_PAGE(mmu->prev_roots[i].hpa))
5464                                 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5465         } else {
5466                 mmu->invlpg(vcpu, gva, root_hpa);
5467         }
5468 }
5469
5470 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
5471 {
5472         kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
5473         ++vcpu->stat.invlpg;
5474 }
5475 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5476
5477
5478 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5479 {
5480         struct kvm_mmu *mmu = vcpu->arch.mmu;
5481         bool tlb_flush = false;
5482         uint i;
5483
5484         if (pcid == kvm_get_active_pcid(vcpu)) {
5485                 mmu->invlpg(vcpu, gva, mmu->root.hpa);
5486                 tlb_flush = true;
5487         }
5488
5489         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5490                 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5491                     pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5492                         mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5493                         tlb_flush = true;
5494                 }
5495         }
5496
5497         if (tlb_flush)
5498                 static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
5499
5500         ++vcpu->stat.invlpg;
5501
5502         /*
5503          * Mappings not reachable via the current cr3 or the prev_roots will be
5504          * synced when switching to that cr3, so nothing needs to be done here
5505          * for them.
5506          */
5507 }
5508
5509 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
5510                        int tdp_max_root_level, int tdp_huge_page_level)
5511 {
5512         tdp_enabled = enable_tdp;
5513         tdp_root_level = tdp_forced_root_level;
5514         max_tdp_level = tdp_max_root_level;
5515
5516         /*
5517          * max_huge_page_level reflects KVM's MMU capabilities irrespective
5518          * of kernel support, e.g. KVM may be capable of using 1GB pages when
5519          * the kernel is not.  But, KVM never creates a page size greater than
5520          * what is used by the kernel for any given HVA, i.e. the kernel's
5521          * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5522          */
5523         if (tdp_enabled)
5524                 max_huge_page_level = tdp_huge_page_level;
5525         else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5526                 max_huge_page_level = PG_LEVEL_1G;
5527         else
5528                 max_huge_page_level = PG_LEVEL_2M;
5529 }
5530 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5531
5532 /* The return value indicates if tlb flush on all vcpus is needed. */
5533 typedef bool (*slot_level_handler) (struct kvm *kvm,
5534                                     struct kvm_rmap_head *rmap_head,
5535                                     const struct kvm_memory_slot *slot);
5536
5537 /* The caller should hold mmu-lock before calling this function. */
5538 static __always_inline bool
5539 slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5540                         slot_level_handler fn, int start_level, int end_level,
5541                         gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
5542                         bool flush)
5543 {
5544         struct slot_rmap_walk_iterator iterator;
5545
5546         for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5547                         end_gfn, &iterator) {
5548                 if (iterator.rmap)
5549                         flush |= fn(kvm, iterator.rmap, memslot);
5550
5551                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5552                         if (flush && flush_on_yield) {
5553                                 kvm_flush_remote_tlbs_with_address(kvm,
5554                                                 start_gfn,
5555                                                 iterator.gfn - start_gfn + 1);
5556                                 flush = false;
5557                         }
5558                         cond_resched_rwlock_write(&kvm->mmu_lock);
5559                 }
5560         }
5561
5562         return flush;
5563 }
5564
5565 static __always_inline bool
5566 slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5567                   slot_level_handler fn, int start_level, int end_level,
5568                   bool flush_on_yield)
5569 {
5570         return slot_handle_level_range(kvm, memslot, fn, start_level,
5571                         end_level, memslot->base_gfn,
5572                         memslot->base_gfn + memslot->npages - 1,
5573                         flush_on_yield, false);
5574 }
5575
5576 static __always_inline bool
5577 slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5578                      slot_level_handler fn, bool flush_on_yield)
5579 {
5580         return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5581                                  PG_LEVEL_4K, flush_on_yield);
5582 }
5583
5584 static void free_mmu_pages(struct kvm_mmu *mmu)
5585 {
5586         if (!tdp_enabled && mmu->pae_root)
5587                 set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5588         free_page((unsigned long)mmu->pae_root);
5589         free_page((unsigned long)mmu->pml4_root);
5590         free_page((unsigned long)mmu->pml5_root);
5591 }
5592
5593 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5594 {
5595         struct page *page;
5596         int i;
5597
5598         mmu->root.hpa = INVALID_PAGE;
5599         mmu->root.pgd = 0;
5600         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5601                 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5602
5603         /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
5604         if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
5605                 return 0;
5606
5607         /*
5608          * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5609          * while the PDP table is a per-vCPU construct that's allocated at MMU
5610          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5611          * x86_64.  Therefore we need to allocate the PDP table in the first
5612          * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
5613          * generally doesn't use PAE paging and can skip allocating the PDP
5614          * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
5615          * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
5616          * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
5617          */
5618         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5619                 return 0;
5620
5621         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5622         if (!page)
5623                 return -ENOMEM;
5624
5625         mmu->pae_root = page_address(page);
5626
5627         /*
5628          * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
5629          * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
5630          * that KVM's writes and the CPU's reads get along.  Note, this is
5631          * only necessary when using shadow paging, as 64-bit NPT can get at
5632          * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
5633          * by 32-bit kernels (when KVM itself uses 32-bit NPT).
5634          */
5635         if (!tdp_enabled)
5636                 set_memory_decrypted((unsigned long)mmu->pae_root, 1);
5637         else
5638                 WARN_ON_ONCE(shadow_me_mask);
5639
5640         for (i = 0; i < 4; ++i)
5641                 mmu->pae_root[i] = INVALID_PAE_ROOT;
5642
5643         return 0;
5644 }
5645
5646 int kvm_mmu_create(struct kvm_vcpu *vcpu)
5647 {
5648         int ret;
5649
5650         vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5651         vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
5652
5653         vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5654         vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5655
5656         vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
5657
5658         vcpu->arch.mmu = &vcpu->arch.root_mmu;
5659         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5660
5661         ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5662         if (ret)
5663                 return ret;
5664
5665         ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5666         if (ret)
5667                 goto fail_allocate_root;
5668
5669         return ret;
5670  fail_allocate_root:
5671         free_mmu_pages(&vcpu->arch.guest_mmu);
5672         return ret;
5673 }
5674
5675 #define BATCH_ZAP_PAGES 10
5676 static void kvm_zap_obsolete_pages(struct kvm *kvm)
5677 {
5678         struct kvm_mmu_page *sp, *node;
5679         int nr_zapped, batch = 0;
5680
5681 restart:
5682         list_for_each_entry_safe_reverse(sp, node,
5683               &kvm->arch.active_mmu_pages, link) {
5684                 /*
5685                  * No obsolete valid page exists before a newly created page
5686                  * since active_mmu_pages is a FIFO list.
5687                  */
5688                 if (!is_obsolete_sp(kvm, sp))
5689                         break;
5690
5691                 /*
5692                  * Invalid pages should never land back on the list of active
5693                  * pages.  Skip the bogus page, otherwise we'll get stuck in an
5694                  * infinite loop if the page gets put back on the list (again).
5695                  */
5696                 if (WARN_ON(sp->role.invalid))
5697                         continue;
5698
5699                 /*
5700                  * No need to flush the TLB since we're only zapping shadow
5701                  * pages with an obsolete generation number and all vCPUS have
5702                  * loaded a new root, i.e. the shadow pages being zapped cannot
5703                  * be in active use by the guest.
5704                  */
5705                 if (batch >= BATCH_ZAP_PAGES &&
5706                     cond_resched_rwlock_write(&kvm->mmu_lock)) {
5707                         batch = 0;
5708                         goto restart;
5709                 }
5710
5711                 if (__kvm_mmu_prepare_zap_page(kvm, sp,
5712                                 &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5713                         batch += nr_zapped;
5714                         goto restart;
5715                 }
5716         }
5717
5718         /*
5719          * Kick all vCPUs (via remote TLB flush) before freeing the page tables
5720          * to ensure KVM is not in the middle of a lockless shadow page table
5721          * walk, which may reference the pages.  The remote TLB flush itself is
5722          * not required and is simply a convenient way to kick vCPUs as needed.
5723          * KVM performs a local TLB flush when allocating a new root (see
5724          * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
5725          * running with an obsolete MMU.
5726          */
5727         kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5728 }
5729
5730 /*
5731  * Fast invalidate all shadow pages and use lock-break technique
5732  * to zap obsolete pages.
5733  *
5734  * It's required when memslot is being deleted or VM is being
5735  * destroyed, in these cases, we should ensure that KVM MMU does
5736  * not use any resource of the being-deleted slot or all slots
5737  * after calling the function.
5738  */
5739 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5740 {
5741         lockdep_assert_held(&kvm->slots_lock);
5742
5743         write_lock(&kvm->mmu_lock);
5744         trace_kvm_mmu_zap_all_fast(kvm);
5745
5746         /*
5747          * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5748          * held for the entire duration of zapping obsolete pages, it's
5749          * impossible for there to be multiple invalid generations associated
5750          * with *valid* shadow pages at any given time, i.e. there is exactly
5751          * one valid generation and (at most) one invalid generation.
5752          */
5753         kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5754
5755         /*
5756          * In order to ensure all vCPUs drop their soon-to-be invalid roots,
5757          * invalidating TDP MMU roots must be done while holding mmu_lock for
5758          * write and in the same critical section as making the reload request,
5759          * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
5760          */
5761         if (is_tdp_mmu_enabled(kvm))
5762                 kvm_tdp_mmu_invalidate_all_roots(kvm);
5763
5764         /*
5765          * Notify all vcpus to reload its shadow page table and flush TLB.
5766          * Then all vcpus will switch to new shadow page table with the new
5767          * mmu_valid_gen.
5768          *
5769          * Note: we need to do this under the protection of mmu_lock,
5770          * otherwise, vcpu would purge shadow page but miss tlb flush.
5771          */
5772         kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
5773
5774         kvm_zap_obsolete_pages(kvm);
5775
5776         write_unlock(&kvm->mmu_lock);
5777
5778         /*
5779          * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
5780          * returning to the caller, e.g. if the zap is in response to a memslot
5781          * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
5782          * associated with the deleted memslot once the update completes, and
5783          * Deferring the zap until the final reference to the root is put would
5784          * lead to use-after-free.
5785          */
5786         if (is_tdp_mmu_enabled(kvm))
5787                 kvm_tdp_mmu_zap_invalidated_roots(kvm);
5788 }
5789
5790 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5791 {
5792         return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5793 }
5794
5795 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5796                         struct kvm_memory_slot *slot,
5797                         struct kvm_page_track_notifier_node *node)
5798 {
5799         kvm_mmu_zap_all_fast(kvm);
5800 }
5801
5802 int kvm_mmu_init_vm(struct kvm *kvm)
5803 {
5804         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5805         int r;
5806
5807         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5808         INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
5809         INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
5810         spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
5811
5812         r = kvm_mmu_init_tdp_mmu(kvm);
5813         if (r < 0)
5814                 return r;
5815
5816         node->track_write = kvm_mmu_pte_write;
5817         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5818         kvm_page_track_register_notifier(kvm, node);
5819         return 0;
5820 }
5821
5822 void kvm_mmu_uninit_vm(struct kvm *kvm)
5823 {
5824         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5825
5826         kvm_page_track_unregister_notifier(kvm, node);
5827
5828         kvm_mmu_uninit_tdp_mmu(kvm);
5829 }
5830
5831 static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5832 {
5833         const struct kvm_memory_slot *memslot;
5834         struct kvm_memslots *slots;
5835         struct kvm_memslot_iter iter;
5836         bool flush = false;
5837         gfn_t start, end;
5838         int i;
5839
5840         if (!kvm_memslots_have_rmaps(kvm))
5841                 return flush;
5842
5843         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5844                 slots = __kvm_memslots(kvm, i);
5845
5846                 kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
5847                         memslot = iter.slot;
5848                         start = max(gfn_start, memslot->base_gfn);
5849                         end = min(gfn_end, memslot->base_gfn + memslot->npages);
5850                         if (WARN_ON_ONCE(start >= end))
5851                                 continue;
5852
5853                         flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5854
5855                                                         PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
5856                                                         start, end - 1, true, flush);
5857                 }
5858         }
5859
5860         return flush;
5861 }
5862
5863 /*
5864  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
5865  * (not including it)
5866  */
5867 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5868 {
5869         bool flush;
5870         int i;
5871
5872         if (WARN_ON_ONCE(gfn_end <= gfn_start))
5873                 return;
5874
5875         write_lock(&kvm->mmu_lock);
5876
5877         kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
5878
5879         flush = __kvm_zap_rmaps(kvm, gfn_start, gfn_end);
5880
5881         if (is_tdp_mmu_enabled(kvm)) {
5882                 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
5883                         flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
5884                                                       gfn_end, true, flush);
5885         }
5886
5887         if (flush)
5888                 kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
5889                                                    gfn_end - gfn_start);
5890
5891         kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
5892
5893         write_unlock(&kvm->mmu_lock);
5894 }
5895
5896 static bool slot_rmap_write_protect(struct kvm *kvm,
5897                                     struct kvm_rmap_head *rmap_head,
5898                                     const struct kvm_memory_slot *slot)
5899 {
5900         return rmap_write_protect(rmap_head, false);
5901 }
5902
5903 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5904                                       const struct kvm_memory_slot *memslot,
5905                                       int start_level)
5906 {
5907         bool flush = false;
5908
5909         if (kvm_memslots_have_rmaps(kvm)) {
5910                 write_lock(&kvm->mmu_lock);
5911                 flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5912                                           start_level, KVM_MAX_HUGEPAGE_LEVEL,
5913                                           false);
5914                 write_unlock(&kvm->mmu_lock);
5915         }
5916
5917         if (is_tdp_mmu_enabled(kvm)) {
5918                 read_lock(&kvm->mmu_lock);
5919                 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
5920                 read_unlock(&kvm->mmu_lock);
5921         }
5922
5923         /*
5924          * Flush TLBs if any SPTEs had to be write-protected to ensure that
5925          * guest writes are reflected in the dirty bitmap before the memslot
5926          * update completes, i.e. before enabling dirty logging is visible to
5927          * userspace.
5928          *
5929          * Perform the TLB flush outside the mmu_lock to reduce the amount of
5930          * time the lock is held. However, this does mean that another CPU can
5931          * now grab mmu_lock and encounter a write-protected SPTE while CPUs
5932          * still have a writable mapping for the associated GFN in their TLB.
5933          *
5934          * This is safe but requires KVM to be careful when making decisions
5935          * based on the write-protection status of an SPTE. Specifically, KVM
5936          * also write-protects SPTEs to monitor changes to guest page tables
5937          * during shadow paging, and must guarantee no CPUs can write to those
5938          * page before the lock is dropped. As mentioned in the previous
5939          * paragraph, a write-protected SPTE is no guarantee that CPU cannot
5940          * perform writes. So to determine if a TLB flush is truly required, KVM
5941          * will clear a separate software-only bit (MMU-writable) and skip the
5942          * flush if-and-only-if this bit was already clear.
5943          *
5944          * See is_writable_pte() for more details.
5945          */
5946         if (flush)
5947                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5948 }
5949
5950 /* Must be called with the mmu_lock held in write-mode. */
5951 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
5952                                    const struct kvm_memory_slot *memslot,
5953                                    u64 start, u64 end,
5954                                    int target_level)
5955 {
5956         if (is_tdp_mmu_enabled(kvm))
5957                 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end,
5958                                                  target_level, false);
5959
5960         /*
5961          * A TLB flush is unnecessary at this point for the same resons as in
5962          * kvm_mmu_slot_try_split_huge_pages().
5963          */
5964 }
5965
5966 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
5967                                         const struct kvm_memory_slot *memslot,
5968                                         int target_level)
5969 {
5970         u64 start = memslot->base_gfn;
5971         u64 end = start + memslot->npages;
5972
5973         if (is_tdp_mmu_enabled(kvm)) {
5974                 read_lock(&kvm->mmu_lock);
5975                 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
5976                 read_unlock(&kvm->mmu_lock);
5977         }
5978
5979         /*
5980          * No TLB flush is necessary here. KVM will flush TLBs after
5981          * write-protecting and/or clearing dirty on the newly split SPTEs to
5982          * ensure that guest writes are reflected in the dirty log before the
5983          * ioctl to enable dirty logging on this memslot completes. Since the
5984          * split SPTEs retain the write and dirty bits of the huge SPTE, it is
5985          * safe for KVM to decide if a TLB flush is necessary based on the split
5986          * SPTEs.
5987          */
5988 }
5989
5990 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5991                                          struct kvm_rmap_head *rmap_head,
5992                                          const struct kvm_memory_slot *slot)
5993 {
5994         u64 *sptep;
5995         struct rmap_iterator iter;
5996         int need_tlb_flush = 0;
5997         kvm_pfn_t pfn;
5998         struct kvm_mmu_page *sp;
5999
6000 restart:
6001         for_each_rmap_spte(rmap_head, &iter, sptep) {
6002                 sp = sptep_to_sp(sptep);
6003                 pfn = spte_to_pfn(*sptep);
6004
6005                 /*
6006                  * We cannot do huge page mapping for indirect shadow pages,
6007                  * which are found on the last rmap (level = 1) when not using
6008                  * tdp; such shadow pages are synced with the page table in
6009                  * the guest, and the guest page table is using 4K page size
6010                  * mapping if the indirect sp has level = 1.
6011                  */
6012                 if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
6013                     sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
6014                                                                pfn, PG_LEVEL_NUM)) {
6015                         pte_list_remove(kvm, rmap_head, sptep);
6016
6017                         if (kvm_available_flush_tlb_with_range())
6018                                 kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
6019                                         KVM_PAGES_PER_HPAGE(sp->role.level));
6020                         else
6021                                 need_tlb_flush = 1;
6022
6023                         goto restart;
6024                 }
6025         }
6026
6027         return need_tlb_flush;
6028 }
6029
6030 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
6031                                    const struct kvm_memory_slot *slot)
6032 {
6033         if (kvm_memslots_have_rmaps(kvm)) {
6034                 write_lock(&kvm->mmu_lock);
6035                 /*
6036                  * Zap only 4k SPTEs since the legacy MMU only supports dirty
6037                  * logging at a 4k granularity and never creates collapsible
6038                  * 2m SPTEs during dirty logging.
6039                  */
6040                 if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
6041                         kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
6042                 write_unlock(&kvm->mmu_lock);
6043         }
6044
6045         if (is_tdp_mmu_enabled(kvm)) {
6046                 read_lock(&kvm->mmu_lock);
6047                 kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
6048                 read_unlock(&kvm->mmu_lock);
6049         }
6050 }
6051
6052 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
6053                                         const struct kvm_memory_slot *memslot)
6054 {
6055         /*
6056          * All current use cases for flushing the TLBs for a specific memslot
6057          * related to dirty logging, and many do the TLB flush out of mmu_lock.
6058          * The interaction between the various operations on memslot must be
6059          * serialized by slots_locks to ensure the TLB flush from one operation
6060          * is observed by any other operation on the same memslot.
6061          */
6062         lockdep_assert_held(&kvm->slots_lock);
6063         kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
6064                                            memslot->npages);
6065 }
6066
6067 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
6068                                    const struct kvm_memory_slot *memslot)
6069 {
6070         bool flush = false;
6071
6072         if (kvm_memslots_have_rmaps(kvm)) {
6073                 write_lock(&kvm->mmu_lock);
6074                 /*
6075                  * Clear dirty bits only on 4k SPTEs since the legacy MMU only
6076                  * support dirty logging at a 4k granularity.
6077                  */
6078                 flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
6079                 write_unlock(&kvm->mmu_lock);
6080         }
6081
6082         if (is_tdp_mmu_enabled(kvm)) {
6083                 read_lock(&kvm->mmu_lock);
6084                 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
6085                 read_unlock(&kvm->mmu_lock);
6086         }
6087
6088         /*
6089          * It's also safe to flush TLBs out of mmu lock here as currently this
6090          * function is only used for dirty logging, in which case flushing TLB
6091          * out of mmu lock also guarantees no dirty pages will be lost in
6092          * dirty_bitmap.
6093          */
6094         if (flush)
6095                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
6096 }
6097
6098 void kvm_mmu_zap_all(struct kvm *kvm)
6099 {
6100         struct kvm_mmu_page *sp, *node;
6101         LIST_HEAD(invalid_list);
6102         int ign;
6103
6104         write_lock(&kvm->mmu_lock);
6105 restart:
6106         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6107                 if (WARN_ON(sp->role.invalid))
6108                         continue;
6109                 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
6110                         goto restart;
6111                 if (cond_resched_rwlock_write(&kvm->mmu_lock))
6112                         goto restart;
6113         }
6114
6115         kvm_mmu_commit_zap_page(kvm, &invalid_list);
6116
6117         if (is_tdp_mmu_enabled(kvm))
6118                 kvm_tdp_mmu_zap_all(kvm);
6119
6120         write_unlock(&kvm->mmu_lock);
6121 }
6122
6123 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
6124 {
6125         WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
6126
6127         gen &= MMIO_SPTE_GEN_MASK;
6128
6129         /*
6130          * Generation numbers are incremented in multiples of the number of
6131          * address spaces in order to provide unique generations across all
6132          * address spaces.  Strip what is effectively the address space
6133          * modifier prior to checking for a wrap of the MMIO generation so
6134          * that a wrap in any address space is detected.
6135          */
6136         gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
6137
6138         /*
6139          * The very rare case: if the MMIO generation number has wrapped,
6140          * zap all shadow pages.
6141          */
6142         if (unlikely(gen == 0)) {
6143                 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
6144                 kvm_mmu_zap_all_fast(kvm);
6145         }
6146 }
6147
6148 static unsigned long
6149 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
6150 {
6151         struct kvm *kvm;
6152         int nr_to_scan = sc->nr_to_scan;
6153         unsigned long freed = 0;
6154
6155         mutex_lock(&kvm_lock);
6156
6157         list_for_each_entry(kvm, &vm_list, vm_list) {
6158                 int idx;
6159                 LIST_HEAD(invalid_list);
6160
6161                 /*
6162                  * Never scan more than sc->nr_to_scan VM instances.
6163                  * Will not hit this condition practically since we do not try
6164                  * to shrink more than one VM and it is very unlikely to see
6165                  * !n_used_mmu_pages so many times.
6166                  */
6167                 if (!nr_to_scan--)
6168                         break;
6169                 /*
6170                  * n_used_mmu_pages is accessed without holding kvm->mmu_lock
6171                  * here. We may skip a VM instance errorneosly, but we do not
6172                  * want to shrink a VM that only started to populate its MMU
6173                  * anyway.
6174                  */
6175                 if (!kvm->arch.n_used_mmu_pages &&
6176                     !kvm_has_zapped_obsolete_pages(kvm))
6177                         continue;
6178
6179                 idx = srcu_read_lock(&kvm->srcu);
6180                 write_lock(&kvm->mmu_lock);
6181
6182                 if (kvm_has_zapped_obsolete_pages(kvm)) {
6183                         kvm_mmu_commit_zap_page(kvm,
6184                               &kvm->arch.zapped_obsolete_pages);
6185                         goto unlock;
6186                 }
6187
6188                 freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
6189
6190 unlock:
6191                 write_unlock(&kvm->mmu_lock);
6192                 srcu_read_unlock(&kvm->srcu, idx);
6193
6194                 /*
6195                  * unfair on small ones
6196                  * per-vm shrinkers cry out
6197                  * sadness comes quickly
6198                  */
6199                 list_move_tail(&kvm->vm_list, &vm_list);
6200                 break;
6201         }
6202
6203         mutex_unlock(&kvm_lock);
6204         return freed;
6205 }
6206
6207 static unsigned long
6208 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
6209 {
6210         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6211 }
6212
6213 static struct shrinker mmu_shrinker = {
6214         .count_objects = mmu_shrink_count,
6215         .scan_objects = mmu_shrink_scan,
6216         .seeks = DEFAULT_SEEKS * 10,
6217 };
6218
6219 static void mmu_destroy_caches(void)
6220 {
6221         kmem_cache_destroy(pte_list_desc_cache);
6222         kmem_cache_destroy(mmu_page_header_cache);
6223 }
6224
6225 static bool get_nx_auto_mode(void)
6226 {
6227         /* Return true when CPU has the bug, and mitigations are ON */
6228         return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
6229 }
6230
6231 static void __set_nx_huge_pages(bool val)
6232 {
6233         nx_huge_pages = itlb_multihit_kvm_mitigation = val;
6234 }
6235
6236 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
6237 {
6238         bool old_val = nx_huge_pages;
6239         bool new_val;
6240
6241         /* In "auto" mode deploy workaround only if CPU has the bug. */
6242         if (sysfs_streq(val, "off"))
6243                 new_val = 0;
6244         else if (sysfs_streq(val, "force"))
6245                 new_val = 1;
6246         else if (sysfs_streq(val, "auto"))
6247                 new_val = get_nx_auto_mode();
6248         else if (strtobool(val, &new_val) < 0)
6249                 return -EINVAL;
6250
6251         __set_nx_huge_pages(new_val);
6252
6253         if (new_val != old_val) {
6254                 struct kvm *kvm;
6255
6256                 mutex_lock(&kvm_lock);
6257
6258                 list_for_each_entry(kvm, &vm_list, vm_list) {
6259                         mutex_lock(&kvm->slots_lock);
6260                         kvm_mmu_zap_all_fast(kvm);
6261                         mutex_unlock(&kvm->slots_lock);
6262
6263                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6264                 }
6265                 mutex_unlock(&kvm_lock);
6266         }
6267
6268         return 0;
6269 }
6270
6271 /*
6272  * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
6273  * its default value of -1 is technically undefined behavior for a boolean.
6274  */
6275 void kvm_mmu_x86_module_init(void)
6276 {
6277         if (nx_huge_pages == -1)
6278                 __set_nx_huge_pages(get_nx_auto_mode());
6279 }
6280
6281 /*
6282  * The bulk of the MMU initialization is deferred until the vendor module is
6283  * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
6284  * to be reset when a potentially different vendor module is loaded.
6285  */
6286 int kvm_mmu_vendor_module_init(void)
6287 {
6288         int ret = -ENOMEM;
6289
6290         /*
6291          * MMU roles use union aliasing which is, generally speaking, an
6292          * undefined behavior. However, we supposedly know how compilers behave
6293          * and the current status quo is unlikely to change. Guardians below are
6294          * supposed to let us know if the assumption becomes false.
6295          */
6296         BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6297         BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6298         BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6299
6300         kvm_mmu_reset_all_pte_masks();
6301
6302         pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6303                                             sizeof(struct pte_list_desc),
6304                                             0, SLAB_ACCOUNT, NULL);
6305         if (!pte_list_desc_cache)
6306                 goto out;
6307
6308         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6309                                                   sizeof(struct kvm_mmu_page),
6310                                                   0, SLAB_ACCOUNT, NULL);
6311         if (!mmu_page_header_cache)
6312                 goto out;
6313
6314         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6315                 goto out;
6316
6317         ret = register_shrinker(&mmu_shrinker);
6318         if (ret)
6319                 goto out;
6320
6321         return 0;
6322
6323 out:
6324         mmu_destroy_caches();
6325         return ret;
6326 }
6327
6328 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6329 {
6330         kvm_mmu_unload(vcpu);
6331         free_mmu_pages(&vcpu->arch.root_mmu);
6332         free_mmu_pages(&vcpu->arch.guest_mmu);
6333         mmu_free_memory_caches(vcpu);
6334 }
6335
6336 void kvm_mmu_vendor_module_exit(void)
6337 {
6338         mmu_destroy_caches();
6339         percpu_counter_destroy(&kvm_total_used_mmu_pages);
6340         unregister_shrinker(&mmu_shrinker);
6341 }
6342
6343 /*
6344  * Calculate the effective recovery period, accounting for '0' meaning "let KVM
6345  * select a halving time of 1 hour".  Returns true if recovery is enabled.
6346  */
6347 static bool calc_nx_huge_pages_recovery_period(uint *period)
6348 {
6349         /*
6350          * Use READ_ONCE to get the params, this may be called outside of the
6351          * param setters, e.g. by the kthread to compute its next timeout.
6352          */
6353         bool enabled = READ_ONCE(nx_huge_pages);
6354         uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6355
6356         if (!enabled || !ratio)
6357                 return false;
6358
6359         *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
6360         if (!*period) {
6361                 /* Make sure the period is not less than one second.  */
6362                 ratio = min(ratio, 3600u);
6363                 *period = 60 * 60 * 1000 / ratio;
6364         }
6365         return true;
6366 }
6367
6368 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
6369 {
6370         bool was_recovery_enabled, is_recovery_enabled;
6371         uint old_period, new_period;
6372         int err;
6373
6374         was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
6375
6376         err = param_set_uint(val, kp);
6377         if (err)
6378                 return err;
6379
6380         is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
6381
6382         if (is_recovery_enabled &&
6383             (!was_recovery_enabled || old_period > new_period)) {
6384                 struct kvm *kvm;
6385
6386                 mutex_lock(&kvm_lock);
6387
6388                 list_for_each_entry(kvm, &vm_list, vm_list)
6389                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6390
6391                 mutex_unlock(&kvm_lock);
6392         }
6393
6394         return err;
6395 }
6396
6397 static void kvm_recover_nx_lpages(struct kvm *kvm)
6398 {
6399         unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6400         int rcu_idx;
6401         struct kvm_mmu_page *sp;
6402         unsigned int ratio;
6403         LIST_HEAD(invalid_list);
6404         bool flush = false;
6405         ulong to_zap;
6406
6407         rcu_idx = srcu_read_lock(&kvm->srcu);
6408         write_lock(&kvm->mmu_lock);
6409
6410         /*
6411          * Zapping TDP MMU shadow pages, including the remote TLB flush, must
6412          * be done under RCU protection, because the pages are freed via RCU
6413          * callback.
6414          */
6415         rcu_read_lock();
6416
6417         ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6418         to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
6419         for ( ; to_zap; --to_zap) {
6420                 if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
6421                         break;
6422
6423                 /*
6424                  * We use a separate list instead of just using active_mmu_pages
6425                  * because the number of lpage_disallowed pages is expected to
6426                  * be relatively small compared to the total.
6427                  */
6428                 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6429                                       struct kvm_mmu_page,
6430                                       lpage_disallowed_link);
6431                 WARN_ON_ONCE(!sp->lpage_disallowed);
6432                 if (is_tdp_mmu_page(sp)) {
6433                         flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6434                 } else {
6435                         kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6436                         WARN_ON_ONCE(sp->lpage_disallowed);
6437                 }
6438
6439                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6440                         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6441                         rcu_read_unlock();
6442
6443                         cond_resched_rwlock_write(&kvm->mmu_lock);
6444                         flush = false;
6445
6446                         rcu_read_lock();
6447                 }
6448         }
6449         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6450
6451         rcu_read_unlock();
6452
6453         write_unlock(&kvm->mmu_lock);
6454         srcu_read_unlock(&kvm->srcu, rcu_idx);
6455 }
6456
6457 static long get_nx_lpage_recovery_timeout(u64 start_time)
6458 {
6459         bool enabled;
6460         uint period;
6461
6462         enabled = calc_nx_huge_pages_recovery_period(&period);
6463
6464         return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
6465                        : MAX_SCHEDULE_TIMEOUT;
6466 }
6467
6468 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6469 {
6470         u64 start_time;
6471         long remaining_time;
6472
6473         while (true) {
6474                 start_time = get_jiffies_64();
6475                 remaining_time = get_nx_lpage_recovery_timeout(start_time);
6476
6477                 set_current_state(TASK_INTERRUPTIBLE);
6478                 while (!kthread_should_stop() && remaining_time > 0) {
6479                         schedule_timeout(remaining_time);
6480                         remaining_time = get_nx_lpage_recovery_timeout(start_time);
6481                         set_current_state(TASK_INTERRUPTIBLE);
6482                 }
6483
6484                 set_current_state(TASK_RUNNING);
6485
6486                 if (kthread_should_stop())
6487                         return 0;
6488
6489                 kvm_recover_nx_lpages(kvm);
6490         }
6491 }
6492
6493 int kvm_mmu_post_init_vm(struct kvm *kvm)
6494 {
6495         int err;
6496
6497         err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6498                                           "kvm-nx-lpage-recovery",
6499                                           &kvm->arch.nx_lpage_recovery_thread);
6500         if (!err)
6501                 kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6502
6503         return err;
6504 }
6505
6506 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6507 {
6508         if (kvm->arch.nx_lpage_recovery_thread)
6509                 kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6510 }