91b27538328fed499823427cf3ea4e84d5129584
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17
18 #include "irq.h"
19 #include "ioapic.h"
20 #include "mmu.h"
21 #include "mmu_internal.h"
22 #include "tdp_mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "kvm_emulate.h"
26 #include "cpuid.h"
27 #include "spte.h"
28
29 #include <linux/kvm_host.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/mm.h>
33 #include <linux/highmem.h>
34 #include <linux/moduleparam.h>
35 #include <linux/export.h>
36 #include <linux/swap.h>
37 #include <linux/hugetlb.h>
38 #include <linux/compiler.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/sched/signal.h>
42 #include <linux/uaccess.h>
43 #include <linux/hash.h>
44 #include <linux/kern_levels.h>
45 #include <linux/kthread.h>
46
47 #include <asm/page.h>
48 #include <asm/memtype.h>
49 #include <asm/cmpxchg.h>
50 #include <asm/io.h>
51 #include <asm/set_memory.h>
52 #include <asm/vmx.h>
53 #include <asm/kvm_page_track.h>
54 #include "trace.h"
55
56 extern bool itlb_multihit_kvm_mitigation;
57
58 int __read_mostly nx_huge_pages = -1;
59 #ifdef CONFIG_PREEMPT_RT
60 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
61 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
62 #else
63 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
64 #endif
65
66 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
67 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
68
69 static const struct kernel_param_ops nx_huge_pages_ops = {
70         .set = set_nx_huge_pages,
71         .get = param_get_bool,
72 };
73
74 static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
75         .set = set_nx_huge_pages_recovery_ratio,
76         .get = param_get_uint,
77 };
78
79 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
80 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
81 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
82                 &nx_huge_pages_recovery_ratio, 0644);
83 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
84
85 static bool __read_mostly force_flush_and_sync_on_reuse;
86 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
87
88 /*
89  * When setting this variable to true it enables Two-Dimensional-Paging
90  * where the hardware walks 2 page tables:
91  * 1. the guest-virtual to guest-physical
92  * 2. while doing 1. it walks guest-physical to host-physical
93  * If the hardware supports that we don't need to do shadow paging.
94  */
95 bool tdp_enabled = false;
96
97 static int max_huge_page_level __read_mostly;
98 static int max_tdp_level __read_mostly;
99
100 enum {
101         AUDIT_PRE_PAGE_FAULT,
102         AUDIT_POST_PAGE_FAULT,
103         AUDIT_PRE_PTE_WRITE,
104         AUDIT_POST_PTE_WRITE,
105         AUDIT_PRE_SYNC,
106         AUDIT_POST_SYNC
107 };
108
109 #ifdef MMU_DEBUG
110 bool dbg = 0;
111 module_param(dbg, bool, 0644);
112 #endif
113
114 #define PTE_PREFETCH_NUM                8
115
116 #define PT32_LEVEL_BITS 10
117
118 #define PT32_LEVEL_SHIFT(level) \
119                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
120
121 #define PT32_LVL_OFFSET_MASK(level) \
122         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
123                                                 * PT32_LEVEL_BITS))) - 1))
124
125 #define PT32_INDEX(address, level)\
126         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127
128
129 #define PT32_BASE_ADDR_MASK PAGE_MASK
130 #define PT32_DIR_BASE_ADDR_MASK \
131         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132 #define PT32_LVL_ADDR_MASK(level) \
133         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134                                             * PT32_LEVEL_BITS))) - 1))
135
136 #include <trace/events/kvm.h>
137
138 /* make pte_list_desc fit well in cache line */
139 #define PTE_LIST_EXT 3
140
141 struct pte_list_desc {
142         u64 *sptes[PTE_LIST_EXT];
143         struct pte_list_desc *more;
144 };
145
146 struct kvm_shadow_walk_iterator {
147         u64 addr;
148         hpa_t shadow_addr;
149         u64 *sptep;
150         int level;
151         unsigned index;
152 };
153
154 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
155         for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
156                                          (_root), (_addr));                \
157              shadow_walk_okay(&(_walker));                                 \
158              shadow_walk_next(&(_walker)))
159
160 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
161         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
162              shadow_walk_okay(&(_walker));                      \
163              shadow_walk_next(&(_walker)))
164
165 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)     \
166         for (shadow_walk_init(&(_walker), _vcpu, _addr);                \
167              shadow_walk_okay(&(_walker)) &&                            \
168                 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });  \
169              __shadow_walk_next(&(_walker), spte))
170
171 static struct kmem_cache *pte_list_desc_cache;
172 struct kmem_cache *mmu_page_header_cache;
173 static struct percpu_counter kvm_total_used_mmu_pages;
174
175 static void mmu_spte_set(u64 *sptep, u64 spte);
176 static union kvm_mmu_page_role
177 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
178
179 struct kvm_mmu_role_regs {
180         const unsigned long cr0;
181         const unsigned long cr4;
182         const u64 efer;
183 };
184
185 #define CREATE_TRACE_POINTS
186 #include "mmutrace.h"
187
188 /*
189  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
190  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
191  * the single source of truth for the MMU's state.
192  */
193 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)                   \
194 static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
195 {                                                                       \
196         return !!(regs->reg & flag);                                    \
197 }
198 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
199 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
200 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
201 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
202 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
203 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
204 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
205 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
206 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
207 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
208
209 /*
210  * The MMU itself (with a valid role) is the single source of truth for the
211  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
212  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
213  * and the vCPU may be incorrect/irrelevant.
214  */
215 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)         \
216 static inline bool is_##reg##_##name(struct kvm_mmu *mmu)       \
217 {                                                               \
218         return !!(mmu->mmu_role. base_or_ext . reg##_##name);   \
219 }
220 BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
221 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
222 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
223 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
224 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
225 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
226 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
227 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
228 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
229
230 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
231 {
232         struct kvm_mmu_role_regs regs = {
233                 .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
234                 .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
235                 .efer = vcpu->arch.efer,
236         };
237
238         return regs;
239 }
240
241 static inline bool kvm_available_flush_tlb_with_range(void)
242 {
243         return kvm_x86_ops.tlb_remote_flush_with_range;
244 }
245
246 static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
247                 struct kvm_tlb_range *range)
248 {
249         int ret = -ENOTSUPP;
250
251         if (range && kvm_x86_ops.tlb_remote_flush_with_range)
252                 ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
253
254         if (ret)
255                 kvm_flush_remote_tlbs(kvm);
256 }
257
258 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
259                 u64 start_gfn, u64 pages)
260 {
261         struct kvm_tlb_range range;
262
263         range.start_gfn = start_gfn;
264         range.pages = pages;
265
266         kvm_flush_remote_tlbs_with_range(kvm, &range);
267 }
268
269 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
270                            unsigned int access)
271 {
272         u64 spte = make_mmio_spte(vcpu, gfn, access);
273
274         trace_mark_mmio_spte(sptep, gfn, spte);
275         mmu_spte_set(sptep, spte);
276 }
277
278 static gfn_t get_mmio_spte_gfn(u64 spte)
279 {
280         u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
281
282         gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
283                & shadow_nonpresent_or_rsvd_mask;
284
285         return gpa >> PAGE_SHIFT;
286 }
287
288 static unsigned get_mmio_spte_access(u64 spte)
289 {
290         return spte & shadow_mmio_access_mask;
291 }
292
293 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
294 {
295         u64 kvm_gen, spte_gen, gen;
296
297         gen = kvm_vcpu_memslots(vcpu)->generation;
298         if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
299                 return false;
300
301         kvm_gen = gen & MMIO_SPTE_GEN_MASK;
302         spte_gen = get_mmio_spte_generation(spte);
303
304         trace_check_mmio_spte(spte, kvm_gen, spte_gen);
305         return likely(kvm_gen == spte_gen);
306 }
307
308 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
309                                   struct x86_exception *exception)
310 {
311         /* Check if guest physical address doesn't exceed guest maximum */
312         if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
313                 exception->error_code |= PFERR_RSVD_MASK;
314                 return UNMAPPED_GVA;
315         }
316
317         return gpa;
318 }
319
320 static int is_cpuid_PSE36(void)
321 {
322         return 1;
323 }
324
325 static gfn_t pse36_gfn_delta(u32 gpte)
326 {
327         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
328
329         return (gpte & PT32_DIR_PSE36_MASK) << shift;
330 }
331
332 #ifdef CONFIG_X86_64
333 static void __set_spte(u64 *sptep, u64 spte)
334 {
335         WRITE_ONCE(*sptep, spte);
336 }
337
338 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
339 {
340         WRITE_ONCE(*sptep, spte);
341 }
342
343 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
344 {
345         return xchg(sptep, spte);
346 }
347
348 static u64 __get_spte_lockless(u64 *sptep)
349 {
350         return READ_ONCE(*sptep);
351 }
352 #else
353 union split_spte {
354         struct {
355                 u32 spte_low;
356                 u32 spte_high;
357         };
358         u64 spte;
359 };
360
361 static void count_spte_clear(u64 *sptep, u64 spte)
362 {
363         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
364
365         if (is_shadow_present_pte(spte))
366                 return;
367
368         /* Ensure the spte is completely set before we increase the count */
369         smp_wmb();
370         sp->clear_spte_count++;
371 }
372
373 static void __set_spte(u64 *sptep, u64 spte)
374 {
375         union split_spte *ssptep, sspte;
376
377         ssptep = (union split_spte *)sptep;
378         sspte = (union split_spte)spte;
379
380         ssptep->spte_high = sspte.spte_high;
381
382         /*
383          * If we map the spte from nonpresent to present, We should store
384          * the high bits firstly, then set present bit, so cpu can not
385          * fetch this spte while we are setting the spte.
386          */
387         smp_wmb();
388
389         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
390 }
391
392 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
393 {
394         union split_spte *ssptep, sspte;
395
396         ssptep = (union split_spte *)sptep;
397         sspte = (union split_spte)spte;
398
399         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
400
401         /*
402          * If we map the spte from present to nonpresent, we should clear
403          * present bit firstly to avoid vcpu fetch the old high bits.
404          */
405         smp_wmb();
406
407         ssptep->spte_high = sspte.spte_high;
408         count_spte_clear(sptep, spte);
409 }
410
411 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
412 {
413         union split_spte *ssptep, sspte, orig;
414
415         ssptep = (union split_spte *)sptep;
416         sspte = (union split_spte)spte;
417
418         /* xchg acts as a barrier before the setting of the high bits */
419         orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
420         orig.spte_high = ssptep->spte_high;
421         ssptep->spte_high = sspte.spte_high;
422         count_spte_clear(sptep, spte);
423
424         return orig.spte;
425 }
426
427 /*
428  * The idea using the light way get the spte on x86_32 guest is from
429  * gup_get_pte (mm/gup.c).
430  *
431  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
432  * coalesces them and we are running out of the MMU lock.  Therefore
433  * we need to protect against in-progress updates of the spte.
434  *
435  * Reading the spte while an update is in progress may get the old value
436  * for the high part of the spte.  The race is fine for a present->non-present
437  * change (because the high part of the spte is ignored for non-present spte),
438  * but for a present->present change we must reread the spte.
439  *
440  * All such changes are done in two steps (present->non-present and
441  * non-present->present), hence it is enough to count the number of
442  * present->non-present updates: if it changed while reading the spte,
443  * we might have hit the race.  This is done using clear_spte_count.
444  */
445 static u64 __get_spte_lockless(u64 *sptep)
446 {
447         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
448         union split_spte spte, *orig = (union split_spte *)sptep;
449         int count;
450
451 retry:
452         count = sp->clear_spte_count;
453         smp_rmb();
454
455         spte.spte_low = orig->spte_low;
456         smp_rmb();
457
458         spte.spte_high = orig->spte_high;
459         smp_rmb();
460
461         if (unlikely(spte.spte_low != orig->spte_low ||
462               count != sp->clear_spte_count))
463                 goto retry;
464
465         return spte.spte;
466 }
467 #endif
468
469 static bool spte_has_volatile_bits(u64 spte)
470 {
471         if (!is_shadow_present_pte(spte))
472                 return false;
473
474         /*
475          * Always atomically update spte if it can be updated
476          * out of mmu-lock, it can ensure dirty bit is not lost,
477          * also, it can help us to get a stable is_writable_pte()
478          * to ensure tlb flush is not missed.
479          */
480         if (spte_can_locklessly_be_made_writable(spte) ||
481             is_access_track_spte(spte))
482                 return true;
483
484         if (spte_ad_enabled(spte)) {
485                 if ((spte & shadow_accessed_mask) == 0 ||
486                     (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
487                         return true;
488         }
489
490         return false;
491 }
492
493 /* Rules for using mmu_spte_set:
494  * Set the sptep from nonpresent to present.
495  * Note: the sptep being assigned *must* be either not present
496  * or in a state where the hardware will not attempt to update
497  * the spte.
498  */
499 static void mmu_spte_set(u64 *sptep, u64 new_spte)
500 {
501         WARN_ON(is_shadow_present_pte(*sptep));
502         __set_spte(sptep, new_spte);
503 }
504
505 /*
506  * Update the SPTE (excluding the PFN), but do not track changes in its
507  * accessed/dirty status.
508  */
509 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
510 {
511         u64 old_spte = *sptep;
512
513         WARN_ON(!is_shadow_present_pte(new_spte));
514
515         if (!is_shadow_present_pte(old_spte)) {
516                 mmu_spte_set(sptep, new_spte);
517                 return old_spte;
518         }
519
520         if (!spte_has_volatile_bits(old_spte))
521                 __update_clear_spte_fast(sptep, new_spte);
522         else
523                 old_spte = __update_clear_spte_slow(sptep, new_spte);
524
525         WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
526
527         return old_spte;
528 }
529
530 /* Rules for using mmu_spte_update:
531  * Update the state bits, it means the mapped pfn is not changed.
532  *
533  * Whenever we overwrite a writable spte with a read-only one we
534  * should flush remote TLBs. Otherwise rmap_write_protect
535  * will find a read-only spte, even though the writable spte
536  * might be cached on a CPU's TLB, the return value indicates this
537  * case.
538  *
539  * Returns true if the TLB needs to be flushed
540  */
541 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
542 {
543         bool flush = false;
544         u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
545
546         if (!is_shadow_present_pte(old_spte))
547                 return false;
548
549         /*
550          * For the spte updated out of mmu-lock is safe, since
551          * we always atomically update it, see the comments in
552          * spte_has_volatile_bits().
553          */
554         if (spte_can_locklessly_be_made_writable(old_spte) &&
555               !is_writable_pte(new_spte))
556                 flush = true;
557
558         /*
559          * Flush TLB when accessed/dirty states are changed in the page tables,
560          * to guarantee consistency between TLB and page tables.
561          */
562
563         if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
564                 flush = true;
565                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
566         }
567
568         if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
569                 flush = true;
570                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
571         }
572
573         return flush;
574 }
575
576 /*
577  * Rules for using mmu_spte_clear_track_bits:
578  * It sets the sptep from present to nonpresent, and track the
579  * state bits, it is used to clear the last level sptep.
580  * Returns non-zero if the PTE was previously valid.
581  */
582 static int mmu_spte_clear_track_bits(u64 *sptep)
583 {
584         kvm_pfn_t pfn;
585         u64 old_spte = *sptep;
586
587         if (!spte_has_volatile_bits(old_spte))
588                 __update_clear_spte_fast(sptep, 0ull);
589         else
590                 old_spte = __update_clear_spte_slow(sptep, 0ull);
591
592         if (!is_shadow_present_pte(old_spte))
593                 return 0;
594
595         pfn = spte_to_pfn(old_spte);
596
597         /*
598          * KVM does not hold the refcount of the page used by
599          * kvm mmu, before reclaiming the page, we should
600          * unmap it from mmu first.
601          */
602         WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
603
604         if (is_accessed_spte(old_spte))
605                 kvm_set_pfn_accessed(pfn);
606
607         if (is_dirty_spte(old_spte))
608                 kvm_set_pfn_dirty(pfn);
609
610         return 1;
611 }
612
613 /*
614  * Rules for using mmu_spte_clear_no_track:
615  * Directly clear spte without caring the state bits of sptep,
616  * it is used to set the upper level spte.
617  */
618 static void mmu_spte_clear_no_track(u64 *sptep)
619 {
620         __update_clear_spte_fast(sptep, 0ull);
621 }
622
623 static u64 mmu_spte_get_lockless(u64 *sptep)
624 {
625         return __get_spte_lockless(sptep);
626 }
627
628 /* Restore an acc-track PTE back to a regular PTE */
629 static u64 restore_acc_track_spte(u64 spte)
630 {
631         u64 new_spte = spte;
632         u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
633                          & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
634
635         WARN_ON_ONCE(spte_ad_enabled(spte));
636         WARN_ON_ONCE(!is_access_track_spte(spte));
637
638         new_spte &= ~shadow_acc_track_mask;
639         new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
640                       SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
641         new_spte |= saved_bits;
642
643         return new_spte;
644 }
645
646 /* Returns the Accessed status of the PTE and resets it at the same time. */
647 static bool mmu_spte_age(u64 *sptep)
648 {
649         u64 spte = mmu_spte_get_lockless(sptep);
650
651         if (!is_accessed_spte(spte))
652                 return false;
653
654         if (spte_ad_enabled(spte)) {
655                 clear_bit((ffs(shadow_accessed_mask) - 1),
656                           (unsigned long *)sptep);
657         } else {
658                 /*
659                  * Capture the dirty status of the page, so that it doesn't get
660                  * lost when the SPTE is marked for access tracking.
661                  */
662                 if (is_writable_pte(spte))
663                         kvm_set_pfn_dirty(spte_to_pfn(spte));
664
665                 spte = mark_spte_for_access_track(spte);
666                 mmu_spte_update_no_track(sptep, spte);
667         }
668
669         return true;
670 }
671
672 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
673 {
674         /*
675          * Prevent page table teardown by making any free-er wait during
676          * kvm_flush_remote_tlbs() IPI to all active vcpus.
677          */
678         local_irq_disable();
679
680         /*
681          * Make sure a following spte read is not reordered ahead of the write
682          * to vcpu->mode.
683          */
684         smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
685 }
686
687 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
688 {
689         /*
690          * Make sure the write to vcpu->mode is not reordered in front of
691          * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
692          * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
693          */
694         smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
695         local_irq_enable();
696 }
697
698 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
699 {
700         int r;
701
702         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
703         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
704                                        1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
705         if (r)
706                 return r;
707         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
708                                        PT64_ROOT_MAX_LEVEL);
709         if (r)
710                 return r;
711         if (maybe_indirect) {
712                 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
713                                                PT64_ROOT_MAX_LEVEL);
714                 if (r)
715                         return r;
716         }
717         return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
718                                           PT64_ROOT_MAX_LEVEL);
719 }
720
721 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
722 {
723         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
724         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
725         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
726         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
727 }
728
729 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
730 {
731         return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
732 }
733
734 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
735 {
736         kmem_cache_free(pte_list_desc_cache, pte_list_desc);
737 }
738
739 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
740 {
741         if (!sp->role.direct)
742                 return sp->gfns[index];
743
744         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
745 }
746
747 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
748 {
749         if (!sp->role.direct) {
750                 sp->gfns[index] = gfn;
751                 return;
752         }
753
754         if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
755                 pr_err_ratelimited("gfn mismatch under direct page %llx "
756                                    "(expected %llx, got %llx)\n",
757                                    sp->gfn,
758                                    kvm_mmu_page_get_gfn(sp, index), gfn);
759 }
760
761 /*
762  * Return the pointer to the large page information for a given gfn,
763  * handling slots that are not large page aligned.
764  */
765 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
766                 const struct kvm_memory_slot *slot, int level)
767 {
768         unsigned long idx;
769
770         idx = gfn_to_index(gfn, slot->base_gfn, level);
771         return &slot->arch.lpage_info[level - 2][idx];
772 }
773
774 static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
775                                             gfn_t gfn, int count)
776 {
777         struct kvm_lpage_info *linfo;
778         int i;
779
780         for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
781                 linfo = lpage_info_slot(gfn, slot, i);
782                 linfo->disallow_lpage += count;
783                 WARN_ON(linfo->disallow_lpage < 0);
784         }
785 }
786
787 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
788 {
789         update_gfn_disallow_lpage_count(slot, gfn, 1);
790 }
791
792 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
793 {
794         update_gfn_disallow_lpage_count(slot, gfn, -1);
795 }
796
797 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
798 {
799         struct kvm_memslots *slots;
800         struct kvm_memory_slot *slot;
801         gfn_t gfn;
802
803         kvm->arch.indirect_shadow_pages++;
804         gfn = sp->gfn;
805         slots = kvm_memslots_for_spte_role(kvm, sp->role);
806         slot = __gfn_to_memslot(slots, gfn);
807
808         /* the non-leaf shadow pages are keeping readonly. */
809         if (sp->role.level > PG_LEVEL_4K)
810                 return kvm_slot_page_track_add_page(kvm, slot, gfn,
811                                                     KVM_PAGE_TRACK_WRITE);
812
813         kvm_mmu_gfn_disallow_lpage(slot, gfn);
814 }
815
816 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
817 {
818         if (sp->lpage_disallowed)
819                 return;
820
821         ++kvm->stat.nx_lpage_splits;
822         list_add_tail(&sp->lpage_disallowed_link,
823                       &kvm->arch.lpage_disallowed_mmu_pages);
824         sp->lpage_disallowed = true;
825 }
826
827 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
828 {
829         struct kvm_memslots *slots;
830         struct kvm_memory_slot *slot;
831         gfn_t gfn;
832
833         kvm->arch.indirect_shadow_pages--;
834         gfn = sp->gfn;
835         slots = kvm_memslots_for_spte_role(kvm, sp->role);
836         slot = __gfn_to_memslot(slots, gfn);
837         if (sp->role.level > PG_LEVEL_4K)
838                 return kvm_slot_page_track_remove_page(kvm, slot, gfn,
839                                                        KVM_PAGE_TRACK_WRITE);
840
841         kvm_mmu_gfn_allow_lpage(slot, gfn);
842 }
843
844 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
845 {
846         --kvm->stat.nx_lpage_splits;
847         sp->lpage_disallowed = false;
848         list_del(&sp->lpage_disallowed_link);
849 }
850
851 static struct kvm_memory_slot *
852 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
853                             bool no_dirty_log)
854 {
855         struct kvm_memory_slot *slot;
856
857         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
858         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
859                 return NULL;
860         if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
861                 return NULL;
862
863         return slot;
864 }
865
866 /*
867  * About rmap_head encoding:
868  *
869  * If the bit zero of rmap_head->val is clear, then it points to the only spte
870  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
871  * pte_list_desc containing more mappings.
872  */
873
874 /*
875  * Returns the number of pointers in the rmap chain, not counting the new one.
876  */
877 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
878                         struct kvm_rmap_head *rmap_head)
879 {
880         struct pte_list_desc *desc;
881         int i, count = 0;
882
883         if (!rmap_head->val) {
884                 rmap_printk("%p %llx 0->1\n", spte, *spte);
885                 rmap_head->val = (unsigned long)spte;
886         } else if (!(rmap_head->val & 1)) {
887                 rmap_printk("%p %llx 1->many\n", spte, *spte);
888                 desc = mmu_alloc_pte_list_desc(vcpu);
889                 desc->sptes[0] = (u64 *)rmap_head->val;
890                 desc->sptes[1] = spte;
891                 rmap_head->val = (unsigned long)desc | 1;
892                 ++count;
893         } else {
894                 rmap_printk("%p %llx many->many\n", spte, *spte);
895                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
896                 while (desc->sptes[PTE_LIST_EXT-1]) {
897                         count += PTE_LIST_EXT;
898
899                         if (!desc->more) {
900                                 desc->more = mmu_alloc_pte_list_desc(vcpu);
901                                 desc = desc->more;
902                                 break;
903                         }
904                         desc = desc->more;
905                 }
906                 for (i = 0; desc->sptes[i]; ++i)
907                         ++count;
908                 desc->sptes[i] = spte;
909         }
910         return count;
911 }
912
913 static void
914 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
915                            struct pte_list_desc *desc, int i,
916                            struct pte_list_desc *prev_desc)
917 {
918         int j;
919
920         for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
921                 ;
922         desc->sptes[i] = desc->sptes[j];
923         desc->sptes[j] = NULL;
924         if (j != 0)
925                 return;
926         if (!prev_desc && !desc->more)
927                 rmap_head->val = 0;
928         else
929                 if (prev_desc)
930                         prev_desc->more = desc->more;
931                 else
932                         rmap_head->val = (unsigned long)desc->more | 1;
933         mmu_free_pte_list_desc(desc);
934 }
935
936 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
937 {
938         struct pte_list_desc *desc;
939         struct pte_list_desc *prev_desc;
940         int i;
941
942         if (!rmap_head->val) {
943                 pr_err("%s: %p 0->BUG\n", __func__, spte);
944                 BUG();
945         } else if (!(rmap_head->val & 1)) {
946                 rmap_printk("%p 1->0\n", spte);
947                 if ((u64 *)rmap_head->val != spte) {
948                         pr_err("%s:  %p 1->BUG\n", __func__, spte);
949                         BUG();
950                 }
951                 rmap_head->val = 0;
952         } else {
953                 rmap_printk("%p many->many\n", spte);
954                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
955                 prev_desc = NULL;
956                 while (desc) {
957                         for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
958                                 if (desc->sptes[i] == spte) {
959                                         pte_list_desc_remove_entry(rmap_head,
960                                                         desc, i, prev_desc);
961                                         return;
962                                 }
963                         }
964                         prev_desc = desc;
965                         desc = desc->more;
966                 }
967                 pr_err("%s: %p many->many\n", __func__, spte);
968                 BUG();
969         }
970 }
971
972 static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
973 {
974         mmu_spte_clear_track_bits(sptep);
975         __pte_list_remove(sptep, rmap_head);
976 }
977
978 static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
979                                            struct kvm_memory_slot *slot)
980 {
981         unsigned long idx;
982
983         idx = gfn_to_index(gfn, slot->base_gfn, level);
984         return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
985 }
986
987 static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
988                                          struct kvm_mmu_page *sp)
989 {
990         struct kvm_memslots *slots;
991         struct kvm_memory_slot *slot;
992
993         slots = kvm_memslots_for_spte_role(kvm, sp->role);
994         slot = __gfn_to_memslot(slots, gfn);
995         return __gfn_to_rmap(gfn, sp->role.level, slot);
996 }
997
998 static bool rmap_can_add(struct kvm_vcpu *vcpu)
999 {
1000         struct kvm_mmu_memory_cache *mc;
1001
1002         mc = &vcpu->arch.mmu_pte_list_desc_cache;
1003         return kvm_mmu_memory_cache_nr_free_objects(mc);
1004 }
1005
1006 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1007 {
1008         struct kvm_mmu_page *sp;
1009         struct kvm_rmap_head *rmap_head;
1010
1011         sp = sptep_to_sp(spte);
1012         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1013         rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1014         return pte_list_add(vcpu, spte, rmap_head);
1015 }
1016
1017 static void rmap_remove(struct kvm *kvm, u64 *spte)
1018 {
1019         struct kvm_mmu_page *sp;
1020         gfn_t gfn;
1021         struct kvm_rmap_head *rmap_head;
1022
1023         sp = sptep_to_sp(spte);
1024         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1025         rmap_head = gfn_to_rmap(kvm, gfn, sp);
1026         __pte_list_remove(spte, rmap_head);
1027 }
1028
1029 /*
1030  * Used by the following functions to iterate through the sptes linked by a
1031  * rmap.  All fields are private and not assumed to be used outside.
1032  */
1033 struct rmap_iterator {
1034         /* private fields */
1035         struct pte_list_desc *desc;     /* holds the sptep if not NULL */
1036         int pos;                        /* index of the sptep */
1037 };
1038
1039 /*
1040  * Iteration must be started by this function.  This should also be used after
1041  * removing/dropping sptes from the rmap link because in such cases the
1042  * information in the iterator may not be valid.
1043  *
1044  * Returns sptep if found, NULL otherwise.
1045  */
1046 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1047                            struct rmap_iterator *iter)
1048 {
1049         u64 *sptep;
1050
1051         if (!rmap_head->val)
1052                 return NULL;
1053
1054         if (!(rmap_head->val & 1)) {
1055                 iter->desc = NULL;
1056                 sptep = (u64 *)rmap_head->val;
1057                 goto out;
1058         }
1059
1060         iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1061         iter->pos = 0;
1062         sptep = iter->desc->sptes[iter->pos];
1063 out:
1064         BUG_ON(!is_shadow_present_pte(*sptep));
1065         return sptep;
1066 }
1067
1068 /*
1069  * Must be used with a valid iterator: e.g. after rmap_get_first().
1070  *
1071  * Returns sptep if found, NULL otherwise.
1072  */
1073 static u64 *rmap_get_next(struct rmap_iterator *iter)
1074 {
1075         u64 *sptep;
1076
1077         if (iter->desc) {
1078                 if (iter->pos < PTE_LIST_EXT - 1) {
1079                         ++iter->pos;
1080                         sptep = iter->desc->sptes[iter->pos];
1081                         if (sptep)
1082                                 goto out;
1083                 }
1084
1085                 iter->desc = iter->desc->more;
1086
1087                 if (iter->desc) {
1088                         iter->pos = 0;
1089                         /* desc->sptes[0] cannot be NULL */
1090                         sptep = iter->desc->sptes[iter->pos];
1091                         goto out;
1092                 }
1093         }
1094
1095         return NULL;
1096 out:
1097         BUG_ON(!is_shadow_present_pte(*sptep));
1098         return sptep;
1099 }
1100
1101 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)                 \
1102         for (_spte_ = rmap_get_first(_rmap_head_, _iter_);              \
1103              _spte_; _spte_ = rmap_get_next(_iter_))
1104
1105 static void drop_spte(struct kvm *kvm, u64 *sptep)
1106 {
1107         if (mmu_spte_clear_track_bits(sptep))
1108                 rmap_remove(kvm, sptep);
1109 }
1110
1111
1112 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1113 {
1114         if (is_large_pte(*sptep)) {
1115                 WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1116                 drop_spte(kvm, sptep);
1117                 --kvm->stat.lpages;
1118                 return true;
1119         }
1120
1121         return false;
1122 }
1123
1124 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1125 {
1126         if (__drop_large_spte(vcpu->kvm, sptep)) {
1127                 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1128
1129                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1130                         KVM_PAGES_PER_HPAGE(sp->role.level));
1131         }
1132 }
1133
1134 /*
1135  * Write-protect on the specified @sptep, @pt_protect indicates whether
1136  * spte write-protection is caused by protecting shadow page table.
1137  *
1138  * Note: write protection is difference between dirty logging and spte
1139  * protection:
1140  * - for dirty logging, the spte can be set to writable at anytime if
1141  *   its dirty bitmap is properly set.
1142  * - for spte protection, the spte can be writable only after unsync-ing
1143  *   shadow page.
1144  *
1145  * Return true if tlb need be flushed.
1146  */
1147 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1148 {
1149         u64 spte = *sptep;
1150
1151         if (!is_writable_pte(spte) &&
1152               !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1153                 return false;
1154
1155         rmap_printk("spte %p %llx\n", sptep, *sptep);
1156
1157         if (pt_protect)
1158                 spte &= ~shadow_mmu_writable_mask;
1159         spte = spte & ~PT_WRITABLE_MASK;
1160
1161         return mmu_spte_update(sptep, spte);
1162 }
1163
1164 static bool __rmap_write_protect(struct kvm *kvm,
1165                                  struct kvm_rmap_head *rmap_head,
1166                                  bool pt_protect)
1167 {
1168         u64 *sptep;
1169         struct rmap_iterator iter;
1170         bool flush = false;
1171
1172         for_each_rmap_spte(rmap_head, &iter, sptep)
1173                 flush |= spte_write_protect(sptep, pt_protect);
1174
1175         return flush;
1176 }
1177
1178 static bool spte_clear_dirty(u64 *sptep)
1179 {
1180         u64 spte = *sptep;
1181
1182         rmap_printk("spte %p %llx\n", sptep, *sptep);
1183
1184         MMU_WARN_ON(!spte_ad_enabled(spte));
1185         spte &= ~shadow_dirty_mask;
1186         return mmu_spte_update(sptep, spte);
1187 }
1188
1189 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1190 {
1191         bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1192                                                (unsigned long *)sptep);
1193         if (was_writable && !spte_ad_enabled(*sptep))
1194                 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1195
1196         return was_writable;
1197 }
1198
1199 /*
1200  * Gets the GFN ready for another round of dirty logging by clearing the
1201  *      - D bit on ad-enabled SPTEs, and
1202  *      - W bit on ad-disabled SPTEs.
1203  * Returns true iff any D or W bits were cleared.
1204  */
1205 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1206                                struct kvm_memory_slot *slot)
1207 {
1208         u64 *sptep;
1209         struct rmap_iterator iter;
1210         bool flush = false;
1211
1212         for_each_rmap_spte(rmap_head, &iter, sptep)
1213                 if (spte_ad_need_write_protect(*sptep))
1214                         flush |= spte_wrprot_for_clear_dirty(sptep);
1215                 else
1216                         flush |= spte_clear_dirty(sptep);
1217
1218         return flush;
1219 }
1220
1221 /**
1222  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1223  * @kvm: kvm instance
1224  * @slot: slot to protect
1225  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1226  * @mask: indicates which pages we should protect
1227  *
1228  * Used when we do not need to care about huge page mappings.
1229  */
1230 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1231                                      struct kvm_memory_slot *slot,
1232                                      gfn_t gfn_offset, unsigned long mask)
1233 {
1234         struct kvm_rmap_head *rmap_head;
1235
1236         if (is_tdp_mmu_enabled(kvm))
1237                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1238                                 slot->base_gfn + gfn_offset, mask, true);
1239
1240         if (!kvm_memslots_have_rmaps(kvm))
1241                 return;
1242
1243         while (mask) {
1244                 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1245                                           PG_LEVEL_4K, slot);
1246                 __rmap_write_protect(kvm, rmap_head, false);
1247
1248                 /* clear the first set bit */
1249                 mask &= mask - 1;
1250         }
1251 }
1252
1253 /**
1254  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1255  * protect the page if the D-bit isn't supported.
1256  * @kvm: kvm instance
1257  * @slot: slot to clear D-bit
1258  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1259  * @mask: indicates which pages we should clear D-bit
1260  *
1261  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1262  */
1263 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1264                                          struct kvm_memory_slot *slot,
1265                                          gfn_t gfn_offset, unsigned long mask)
1266 {
1267         struct kvm_rmap_head *rmap_head;
1268
1269         if (is_tdp_mmu_enabled(kvm))
1270                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1271                                 slot->base_gfn + gfn_offset, mask, false);
1272
1273         if (!kvm_memslots_have_rmaps(kvm))
1274                 return;
1275
1276         while (mask) {
1277                 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1278                                           PG_LEVEL_4K, slot);
1279                 __rmap_clear_dirty(kvm, rmap_head, slot);
1280
1281                 /* clear the first set bit */
1282                 mask &= mask - 1;
1283         }
1284 }
1285
1286 /**
1287  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1288  * PT level pages.
1289  *
1290  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1291  * enable dirty logging for them.
1292  *
1293  * We need to care about huge page mappings: e.g. during dirty logging we may
1294  * have such mappings.
1295  */
1296 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1297                                 struct kvm_memory_slot *slot,
1298                                 gfn_t gfn_offset, unsigned long mask)
1299 {
1300         /*
1301          * Huge pages are NOT write protected when we start dirty logging in
1302          * initially-all-set mode; must write protect them here so that they
1303          * are split to 4K on the first write.
1304          *
1305          * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1306          * of memslot has no such restriction, so the range can cross two large
1307          * pages.
1308          */
1309         if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1310                 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1311                 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1312
1313                 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1314
1315                 /* Cross two large pages? */
1316                 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1317                     ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1318                         kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1319                                                        PG_LEVEL_2M);
1320         }
1321
1322         /* Now handle 4K PTEs.  */
1323         if (kvm_x86_ops.cpu_dirty_log_size)
1324                 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1325         else
1326                 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1327 }
1328
1329 int kvm_cpu_dirty_log_size(void)
1330 {
1331         return kvm_x86_ops.cpu_dirty_log_size;
1332 }
1333
1334 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1335                                     struct kvm_memory_slot *slot, u64 gfn,
1336                                     int min_level)
1337 {
1338         struct kvm_rmap_head *rmap_head;
1339         int i;
1340         bool write_protected = false;
1341
1342         if (kvm_memslots_have_rmaps(kvm)) {
1343                 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1344                         rmap_head = __gfn_to_rmap(gfn, i, slot);
1345                         write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1346                 }
1347         }
1348
1349         if (is_tdp_mmu_enabled(kvm))
1350                 write_protected |=
1351                         kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1352
1353         return write_protected;
1354 }
1355
1356 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1357 {
1358         struct kvm_memory_slot *slot;
1359
1360         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1361         return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1362 }
1363
1364 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1365                           struct kvm_memory_slot *slot)
1366 {
1367         u64 *sptep;
1368         struct rmap_iterator iter;
1369         bool flush = false;
1370
1371         while ((sptep = rmap_get_first(rmap_head, &iter))) {
1372                 rmap_printk("spte %p %llx.\n", sptep, *sptep);
1373
1374                 pte_list_remove(rmap_head, sptep);
1375                 flush = true;
1376         }
1377
1378         return flush;
1379 }
1380
1381 static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1382                             struct kvm_memory_slot *slot, gfn_t gfn, int level,
1383                             pte_t unused)
1384 {
1385         return kvm_zap_rmapp(kvm, rmap_head, slot);
1386 }
1387
1388 static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1389                               struct kvm_memory_slot *slot, gfn_t gfn, int level,
1390                               pte_t pte)
1391 {
1392         u64 *sptep;
1393         struct rmap_iterator iter;
1394         int need_flush = 0;
1395         u64 new_spte;
1396         kvm_pfn_t new_pfn;
1397
1398         WARN_ON(pte_huge(pte));
1399         new_pfn = pte_pfn(pte);
1400
1401 restart:
1402         for_each_rmap_spte(rmap_head, &iter, sptep) {
1403                 rmap_printk("spte %p %llx gfn %llx (%d)\n",
1404                             sptep, *sptep, gfn, level);
1405
1406                 need_flush = 1;
1407
1408                 if (pte_write(pte)) {
1409                         pte_list_remove(rmap_head, sptep);
1410                         goto restart;
1411                 } else {
1412                         new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1413                                         *sptep, new_pfn);
1414
1415                         mmu_spte_clear_track_bits(sptep);
1416                         mmu_spte_set(sptep, new_spte);
1417                 }
1418         }
1419
1420         if (need_flush && kvm_available_flush_tlb_with_range()) {
1421                 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1422                 return 0;
1423         }
1424
1425         return need_flush;
1426 }
1427
1428 struct slot_rmap_walk_iterator {
1429         /* input fields. */
1430         struct kvm_memory_slot *slot;
1431         gfn_t start_gfn;
1432         gfn_t end_gfn;
1433         int start_level;
1434         int end_level;
1435
1436         /* output fields. */
1437         gfn_t gfn;
1438         struct kvm_rmap_head *rmap;
1439         int level;
1440
1441         /* private field. */
1442         struct kvm_rmap_head *end_rmap;
1443 };
1444
1445 static void
1446 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1447 {
1448         iterator->level = level;
1449         iterator->gfn = iterator->start_gfn;
1450         iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1451         iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1452                                            iterator->slot);
1453 }
1454
1455 static void
1456 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1457                     struct kvm_memory_slot *slot, int start_level,
1458                     int end_level, gfn_t start_gfn, gfn_t end_gfn)
1459 {
1460         iterator->slot = slot;
1461         iterator->start_level = start_level;
1462         iterator->end_level = end_level;
1463         iterator->start_gfn = start_gfn;
1464         iterator->end_gfn = end_gfn;
1465
1466         rmap_walk_init_level(iterator, iterator->start_level);
1467 }
1468
1469 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1470 {
1471         return !!iterator->rmap;
1472 }
1473
1474 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1475 {
1476         if (++iterator->rmap <= iterator->end_rmap) {
1477                 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1478                 return;
1479         }
1480
1481         if (++iterator->level > iterator->end_level) {
1482                 iterator->rmap = NULL;
1483                 return;
1484         }
1485
1486         rmap_walk_init_level(iterator, iterator->level);
1487 }
1488
1489 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,    \
1490            _start_gfn, _end_gfn, _iter_)                                \
1491         for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,         \
1492                                  _end_level_, _start_gfn, _end_gfn);    \
1493              slot_rmap_walk_okay(_iter_);                               \
1494              slot_rmap_walk_next(_iter_))
1495
1496 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1497                                struct kvm_memory_slot *slot, gfn_t gfn,
1498                                int level, pte_t pte);
1499
1500 static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
1501                                                  struct kvm_gfn_range *range,
1502                                                  rmap_handler_t handler)
1503 {
1504         struct slot_rmap_walk_iterator iterator;
1505         bool ret = false;
1506
1507         for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1508                                  range->start, range->end - 1, &iterator)
1509                 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1510                                iterator.level, range->pte);
1511
1512         return ret;
1513 }
1514
1515 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1516 {
1517         bool flush = false;
1518
1519         if (kvm_memslots_have_rmaps(kvm))
1520                 flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1521
1522         if (is_tdp_mmu_enabled(kvm))
1523                 flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1524
1525         return flush;
1526 }
1527
1528 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1529 {
1530         bool flush = false;
1531
1532         if (kvm_memslots_have_rmaps(kvm))
1533                 flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
1534
1535         if (is_tdp_mmu_enabled(kvm))
1536                 flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
1537
1538         return flush;
1539 }
1540
1541 static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1542                           struct kvm_memory_slot *slot, gfn_t gfn, int level,
1543                           pte_t unused)
1544 {
1545         u64 *sptep;
1546         struct rmap_iterator iter;
1547         int young = 0;
1548
1549         for_each_rmap_spte(rmap_head, &iter, sptep)
1550                 young |= mmu_spte_age(sptep);
1551
1552         return young;
1553 }
1554
1555 static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1556                                struct kvm_memory_slot *slot, gfn_t gfn,
1557                                int level, pte_t unused)
1558 {
1559         u64 *sptep;
1560         struct rmap_iterator iter;
1561
1562         for_each_rmap_spte(rmap_head, &iter, sptep)
1563                 if (is_accessed_spte(*sptep))
1564                         return 1;
1565         return 0;
1566 }
1567
1568 #define RMAP_RECYCLE_THRESHOLD 1000
1569
1570 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1571 {
1572         struct kvm_rmap_head *rmap_head;
1573         struct kvm_mmu_page *sp;
1574
1575         sp = sptep_to_sp(spte);
1576
1577         rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1578
1579         kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1580         kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1581                         KVM_PAGES_PER_HPAGE(sp->role.level));
1582 }
1583
1584 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1585 {
1586         bool young = false;
1587
1588         if (kvm_memslots_have_rmaps(kvm))
1589                 young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
1590
1591         if (is_tdp_mmu_enabled(kvm))
1592                 young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1593
1594         return young;
1595 }
1596
1597 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1598 {
1599         bool young = false;
1600
1601         if (kvm_memslots_have_rmaps(kvm))
1602                 young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
1603
1604         if (is_tdp_mmu_enabled(kvm))
1605                 young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1606
1607         return young;
1608 }
1609
1610 #ifdef MMU_DEBUG
1611 static int is_empty_shadow_page(u64 *spt)
1612 {
1613         u64 *pos;
1614         u64 *end;
1615
1616         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1617                 if (is_shadow_present_pte(*pos)) {
1618                         printk(KERN_ERR "%s: %p %llx\n", __func__,
1619                                pos, *pos);
1620                         return 0;
1621                 }
1622         return 1;
1623 }
1624 #endif
1625
1626 /*
1627  * This value is the sum of all of the kvm instances's
1628  * kvm->arch.n_used_mmu_pages values.  We need a global,
1629  * aggregate version in order to make the slab shrinker
1630  * faster
1631  */
1632 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
1633 {
1634         kvm->arch.n_used_mmu_pages += nr;
1635         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1636 }
1637
1638 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1639 {
1640         MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1641         hlist_del(&sp->hash_link);
1642         list_del(&sp->link);
1643         free_page((unsigned long)sp->spt);
1644         if (!sp->role.direct)
1645                 free_page((unsigned long)sp->gfns);
1646         kmem_cache_free(mmu_page_header_cache, sp);
1647 }
1648
1649 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1650 {
1651         return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1652 }
1653
1654 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1655                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1656 {
1657         if (!parent_pte)
1658                 return;
1659
1660         pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1661 }
1662
1663 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1664                                        u64 *parent_pte)
1665 {
1666         __pte_list_remove(parent_pte, &sp->parent_ptes);
1667 }
1668
1669 static void drop_parent_pte(struct kvm_mmu_page *sp,
1670                             u64 *parent_pte)
1671 {
1672         mmu_page_remove_parent_pte(sp, parent_pte);
1673         mmu_spte_clear_no_track(parent_pte);
1674 }
1675
1676 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1677 {
1678         struct kvm_mmu_page *sp;
1679
1680         sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1681         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1682         if (!direct)
1683                 sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1684         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1685
1686         /*
1687          * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1688          * depends on valid pages being added to the head of the list.  See
1689          * comments in kvm_zap_obsolete_pages().
1690          */
1691         sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1692         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1693         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1694         return sp;
1695 }
1696
1697 static void mark_unsync(u64 *spte);
1698 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1699 {
1700         u64 *sptep;
1701         struct rmap_iterator iter;
1702
1703         for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1704                 mark_unsync(sptep);
1705         }
1706 }
1707
1708 static void mark_unsync(u64 *spte)
1709 {
1710         struct kvm_mmu_page *sp;
1711         unsigned int index;
1712
1713         sp = sptep_to_sp(spte);
1714         index = spte - sp->spt;
1715         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1716                 return;
1717         if (sp->unsync_children++)
1718                 return;
1719         kvm_mmu_mark_parents_unsync(sp);
1720 }
1721
1722 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1723                                struct kvm_mmu_page *sp)
1724 {
1725         return 0;
1726 }
1727
1728 #define KVM_PAGE_ARRAY_NR 16
1729
1730 struct kvm_mmu_pages {
1731         struct mmu_page_and_offset {
1732                 struct kvm_mmu_page *sp;
1733                 unsigned int idx;
1734         } page[KVM_PAGE_ARRAY_NR];
1735         unsigned int nr;
1736 };
1737
1738 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1739                          int idx)
1740 {
1741         int i;
1742
1743         if (sp->unsync)
1744                 for (i=0; i < pvec->nr; i++)
1745                         if (pvec->page[i].sp == sp)
1746                                 return 0;
1747
1748         pvec->page[pvec->nr].sp = sp;
1749         pvec->page[pvec->nr].idx = idx;
1750         pvec->nr++;
1751         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1752 }
1753
1754 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1755 {
1756         --sp->unsync_children;
1757         WARN_ON((int)sp->unsync_children < 0);
1758         __clear_bit(idx, sp->unsync_child_bitmap);
1759 }
1760
1761 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1762                            struct kvm_mmu_pages *pvec)
1763 {
1764         int i, ret, nr_unsync_leaf = 0;
1765
1766         for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1767                 struct kvm_mmu_page *child;
1768                 u64 ent = sp->spt[i];
1769
1770                 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1771                         clear_unsync_child_bit(sp, i);
1772                         continue;
1773                 }
1774
1775                 child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1776
1777                 if (child->unsync_children) {
1778                         if (mmu_pages_add(pvec, child, i))
1779                                 return -ENOSPC;
1780
1781                         ret = __mmu_unsync_walk(child, pvec);
1782                         if (!ret) {
1783                                 clear_unsync_child_bit(sp, i);
1784                                 continue;
1785                         } else if (ret > 0) {
1786                                 nr_unsync_leaf += ret;
1787                         } else
1788                                 return ret;
1789                 } else if (child->unsync) {
1790                         nr_unsync_leaf++;
1791                         if (mmu_pages_add(pvec, child, i))
1792                                 return -ENOSPC;
1793                 } else
1794                         clear_unsync_child_bit(sp, i);
1795         }
1796
1797         return nr_unsync_leaf;
1798 }
1799
1800 #define INVALID_INDEX (-1)
1801
1802 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1803                            struct kvm_mmu_pages *pvec)
1804 {
1805         pvec->nr = 0;
1806         if (!sp->unsync_children)
1807                 return 0;
1808
1809         mmu_pages_add(pvec, sp, INVALID_INDEX);
1810         return __mmu_unsync_walk(sp, pvec);
1811 }
1812
1813 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1814 {
1815         WARN_ON(!sp->unsync);
1816         trace_kvm_mmu_sync_page(sp);
1817         sp->unsync = 0;
1818         --kvm->stat.mmu_unsync;
1819 }
1820
1821 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1822                                      struct list_head *invalid_list);
1823 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1824                                     struct list_head *invalid_list);
1825
1826 #define for_each_valid_sp(_kvm, _sp, _list)                             \
1827         hlist_for_each_entry(_sp, _list, hash_link)                     \
1828                 if (is_obsolete_sp((_kvm), (_sp))) {                    \
1829                 } else
1830
1831 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                 \
1832         for_each_valid_sp(_kvm, _sp,                                    \
1833           &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
1834                 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1835
1836 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1837                          struct list_head *invalid_list)
1838 {
1839         if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1840                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1841                 return false;
1842         }
1843
1844         return true;
1845 }
1846
1847 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1848                                         struct list_head *invalid_list,
1849                                         bool remote_flush)
1850 {
1851         if (!remote_flush && list_empty(invalid_list))
1852                 return false;
1853
1854         if (!list_empty(invalid_list))
1855                 kvm_mmu_commit_zap_page(kvm, invalid_list);
1856         else
1857                 kvm_flush_remote_tlbs(kvm);
1858         return true;
1859 }
1860
1861 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1862                                  struct list_head *invalid_list,
1863                                  bool remote_flush, bool local_flush)
1864 {
1865         if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1866                 return;
1867
1868         if (local_flush)
1869                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1870 }
1871
1872 #ifdef CONFIG_KVM_MMU_AUDIT
1873 #include "mmu_audit.c"
1874 #else
1875 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1876 static void mmu_audit_disable(void) { }
1877 #endif
1878
1879 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1880 {
1881         return sp->role.invalid ||
1882                unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1883 }
1884
1885 struct mmu_page_path {
1886         struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1887         unsigned int idx[PT64_ROOT_MAX_LEVEL];
1888 };
1889
1890 #define for_each_sp(pvec, sp, parents, i)                       \
1891                 for (i = mmu_pages_first(&pvec, &parents);      \
1892                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1893                         i = mmu_pages_next(&pvec, &parents, i))
1894
1895 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1896                           struct mmu_page_path *parents,
1897                           int i)
1898 {
1899         int n;
1900
1901         for (n = i+1; n < pvec->nr; n++) {
1902                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1903                 unsigned idx = pvec->page[n].idx;
1904                 int level = sp->role.level;
1905
1906                 parents->idx[level-1] = idx;
1907                 if (level == PG_LEVEL_4K)
1908                         break;
1909
1910                 parents->parent[level-2] = sp;
1911         }
1912
1913         return n;
1914 }
1915
1916 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1917                            struct mmu_page_path *parents)
1918 {
1919         struct kvm_mmu_page *sp;
1920         int level;
1921
1922         if (pvec->nr == 0)
1923                 return 0;
1924
1925         WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1926
1927         sp = pvec->page[0].sp;
1928         level = sp->role.level;
1929         WARN_ON(level == PG_LEVEL_4K);
1930
1931         parents->parent[level-2] = sp;
1932
1933         /* Also set up a sentinel.  Further entries in pvec are all
1934          * children of sp, so this element is never overwritten.
1935          */
1936         parents->parent[level-1] = NULL;
1937         return mmu_pages_next(pvec, parents, 0);
1938 }
1939
1940 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1941 {
1942         struct kvm_mmu_page *sp;
1943         unsigned int level = 0;
1944
1945         do {
1946                 unsigned int idx = parents->idx[level];
1947                 sp = parents->parent[level];
1948                 if (!sp)
1949                         return;
1950
1951                 WARN_ON(idx == INVALID_INDEX);
1952                 clear_unsync_child_bit(sp, idx);
1953                 level++;
1954         } while (!sp->unsync_children);
1955 }
1956
1957 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1958                               struct kvm_mmu_page *parent)
1959 {
1960         int i;
1961         struct kvm_mmu_page *sp;
1962         struct mmu_page_path parents;
1963         struct kvm_mmu_pages pages;
1964         LIST_HEAD(invalid_list);
1965         bool flush = false;
1966
1967         while (mmu_unsync_walk(parent, &pages)) {
1968                 bool protected = false;
1969
1970                 for_each_sp(pages, sp, parents, i)
1971                         protected |= rmap_write_protect(vcpu, sp->gfn);
1972
1973                 if (protected) {
1974                         kvm_flush_remote_tlbs(vcpu->kvm);
1975                         flush = false;
1976                 }
1977
1978                 for_each_sp(pages, sp, parents, i) {
1979                         kvm_unlink_unsync_page(vcpu->kvm, sp);
1980                         flush |= kvm_sync_page(vcpu, sp, &invalid_list);
1981                         mmu_pages_clear_parents(&parents);
1982                 }
1983                 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
1984                         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1985                         cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
1986                         flush = false;
1987                 }
1988         }
1989
1990         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1991 }
1992
1993 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
1994 {
1995         atomic_set(&sp->write_flooding_count,  0);
1996 }
1997
1998 static void clear_sp_write_flooding_count(u64 *spte)
1999 {
2000         __clear_sp_write_flooding_count(sptep_to_sp(spte));
2001 }
2002
2003 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2004                                              gfn_t gfn,
2005                                              gva_t gaddr,
2006                                              unsigned level,
2007                                              int direct,
2008                                              unsigned int access)
2009 {
2010         bool direct_mmu = vcpu->arch.mmu->direct_map;
2011         union kvm_mmu_page_role role;
2012         struct hlist_head *sp_list;
2013         unsigned quadrant;
2014         struct kvm_mmu_page *sp;
2015         int collisions = 0;
2016         LIST_HEAD(invalid_list);
2017
2018         role = vcpu->arch.mmu->mmu_role.base;
2019         role.level = level;
2020         role.direct = direct;
2021         if (role.direct)
2022                 role.gpte_is_8_bytes = true;
2023         role.access = access;
2024         if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2025                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2026                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2027                 role.quadrant = quadrant;
2028         }
2029
2030         sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2031         for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2032                 if (sp->gfn != gfn) {
2033                         collisions++;
2034                         continue;
2035                 }
2036
2037                 if (sp->role.word != role.word) {
2038                         /*
2039                          * If the guest is creating an upper-level page, zap
2040                          * unsync pages for the same gfn.  While it's possible
2041                          * the guest is using recursive page tables, in all
2042                          * likelihood the guest has stopped using the unsync
2043                          * page and is installing a completely unrelated page.
2044                          * Unsync pages must not be left as is, because the new
2045                          * upper-level page will be write-protected.
2046                          */
2047                         if (level > PG_LEVEL_4K && sp->unsync)
2048                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2049                                                          &invalid_list);
2050                         continue;
2051                 }
2052
2053                 if (direct_mmu)
2054                         goto trace_get_page;
2055
2056                 if (sp->unsync) {
2057                         /*
2058                          * The page is good, but is stale.  kvm_sync_page does
2059                          * get the latest guest state, but (unlike mmu_unsync_children)
2060                          * it doesn't write-protect the page or mark it synchronized!
2061                          * This way the validity of the mapping is ensured, but the
2062                          * overhead of write protection is not incurred until the
2063                          * guest invalidates the TLB mapping.  This allows multiple
2064                          * SPs for a single gfn to be unsync.
2065                          *
2066                          * If the sync fails, the page is zapped.  If so, break
2067                          * in order to rebuild it.
2068                          */
2069                         if (!kvm_sync_page(vcpu, sp, &invalid_list))
2070                                 break;
2071
2072                         WARN_ON(!list_empty(&invalid_list));
2073                         kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2074                 }
2075
2076                 if (sp->unsync_children)
2077                         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2078
2079                 __clear_sp_write_flooding_count(sp);
2080
2081 trace_get_page:
2082                 trace_kvm_mmu_get_page(sp, false);
2083                 goto out;
2084         }
2085
2086         ++vcpu->kvm->stat.mmu_cache_miss;
2087
2088         sp = kvm_mmu_alloc_page(vcpu, direct);
2089
2090         sp->gfn = gfn;
2091         sp->role = role;
2092         hlist_add_head(&sp->hash_link, sp_list);
2093         if (!direct) {
2094                 account_shadowed(vcpu->kvm, sp);
2095                 if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2096                         kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2097         }
2098         trace_kvm_mmu_get_page(sp, true);
2099 out:
2100         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2101
2102         if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2103                 vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2104         return sp;
2105 }
2106
2107 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2108                                         struct kvm_vcpu *vcpu, hpa_t root,
2109                                         u64 addr)
2110 {
2111         iterator->addr = addr;
2112         iterator->shadow_addr = root;
2113         iterator->level = vcpu->arch.mmu->shadow_root_level;
2114
2115         if (iterator->level == PT64_ROOT_4LEVEL &&
2116             vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2117             !vcpu->arch.mmu->direct_map)
2118                 --iterator->level;
2119
2120         if (iterator->level == PT32E_ROOT_LEVEL) {
2121                 /*
2122                  * prev_root is currently only used for 64-bit hosts. So only
2123                  * the active root_hpa is valid here.
2124                  */
2125                 BUG_ON(root != vcpu->arch.mmu->root_hpa);
2126
2127                 iterator->shadow_addr
2128                         = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2129                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2130                 --iterator->level;
2131                 if (!iterator->shadow_addr)
2132                         iterator->level = 0;
2133         }
2134 }
2135
2136 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2137                              struct kvm_vcpu *vcpu, u64 addr)
2138 {
2139         shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2140                                     addr);
2141 }
2142
2143 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2144 {
2145         if (iterator->level < PG_LEVEL_4K)
2146                 return false;
2147
2148         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2149         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2150         return true;
2151 }
2152
2153 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2154                                u64 spte)
2155 {
2156         if (is_last_spte(spte, iterator->level)) {
2157                 iterator->level = 0;
2158                 return;
2159         }
2160
2161         iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2162         --iterator->level;
2163 }
2164
2165 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2166 {
2167         __shadow_walk_next(iterator, *iterator->sptep);
2168 }
2169
2170 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2171                              struct kvm_mmu_page *sp)
2172 {
2173         u64 spte;
2174
2175         BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2176
2177         spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2178
2179         mmu_spte_set(sptep, spte);
2180
2181         mmu_page_add_parent_pte(vcpu, sp, sptep);
2182
2183         if (sp->unsync_children || sp->unsync)
2184                 mark_unsync(sptep);
2185 }
2186
2187 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2188                                    unsigned direct_access)
2189 {
2190         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2191                 struct kvm_mmu_page *child;
2192
2193                 /*
2194                  * For the direct sp, if the guest pte's dirty bit
2195                  * changed form clean to dirty, it will corrupt the
2196                  * sp's access: allow writable in the read-only sp,
2197                  * so we should update the spte at this point to get
2198                  * a new sp with the correct access.
2199                  */
2200                 child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2201                 if (child->role.access == direct_access)
2202                         return;
2203
2204                 drop_parent_pte(child, sptep);
2205                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2206         }
2207 }
2208
2209 /* Returns the number of zapped non-leaf child shadow pages. */
2210 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2211                             u64 *spte, struct list_head *invalid_list)
2212 {
2213         u64 pte;
2214         struct kvm_mmu_page *child;
2215
2216         pte = *spte;
2217         if (is_shadow_present_pte(pte)) {
2218                 if (is_last_spte(pte, sp->role.level)) {
2219                         drop_spte(kvm, spte);
2220                         if (is_large_pte(pte))
2221                                 --kvm->stat.lpages;
2222                 } else {
2223                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2224                         drop_parent_pte(child, spte);
2225
2226                         /*
2227                          * Recursively zap nested TDP SPs, parentless SPs are
2228                          * unlikely to be used again in the near future.  This
2229                          * avoids retaining a large number of stale nested SPs.
2230                          */
2231                         if (tdp_enabled && invalid_list &&
2232                             child->role.guest_mode && !child->parent_ptes.val)
2233                                 return kvm_mmu_prepare_zap_page(kvm, child,
2234                                                                 invalid_list);
2235                 }
2236         } else if (is_mmio_spte(pte)) {
2237                 mmu_spte_clear_no_track(spte);
2238         }
2239         return 0;
2240 }
2241
2242 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2243                                         struct kvm_mmu_page *sp,
2244                                         struct list_head *invalid_list)
2245 {
2246         int zapped = 0;
2247         unsigned i;
2248
2249         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2250                 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2251
2252         return zapped;
2253 }
2254
2255 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2256 {
2257         u64 *sptep;
2258         struct rmap_iterator iter;
2259
2260         while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2261                 drop_parent_pte(sp, sptep);
2262 }
2263
2264 static int mmu_zap_unsync_children(struct kvm *kvm,
2265                                    struct kvm_mmu_page *parent,
2266                                    struct list_head *invalid_list)
2267 {
2268         int i, zapped = 0;
2269         struct mmu_page_path parents;
2270         struct kvm_mmu_pages pages;
2271
2272         if (parent->role.level == PG_LEVEL_4K)
2273                 return 0;
2274
2275         while (mmu_unsync_walk(parent, &pages)) {
2276                 struct kvm_mmu_page *sp;
2277
2278                 for_each_sp(pages, sp, parents, i) {
2279                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2280                         mmu_pages_clear_parents(&parents);
2281                         zapped++;
2282                 }
2283         }
2284
2285         return zapped;
2286 }
2287
2288 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2289                                        struct kvm_mmu_page *sp,
2290                                        struct list_head *invalid_list,
2291                                        int *nr_zapped)
2292 {
2293         bool list_unstable;
2294
2295         trace_kvm_mmu_prepare_zap_page(sp);
2296         ++kvm->stat.mmu_shadow_zapped;
2297         *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2298         *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2299         kvm_mmu_unlink_parents(kvm, sp);
2300
2301         /* Zapping children means active_mmu_pages has become unstable. */
2302         list_unstable = *nr_zapped;
2303
2304         if (!sp->role.invalid && !sp->role.direct)
2305                 unaccount_shadowed(kvm, sp);
2306
2307         if (sp->unsync)
2308                 kvm_unlink_unsync_page(kvm, sp);
2309         if (!sp->root_count) {
2310                 /* Count self */
2311                 (*nr_zapped)++;
2312
2313                 /*
2314                  * Already invalid pages (previously active roots) are not on
2315                  * the active page list.  See list_del() in the "else" case of
2316                  * !sp->root_count.
2317                  */
2318                 if (sp->role.invalid)
2319                         list_add(&sp->link, invalid_list);
2320                 else
2321                         list_move(&sp->link, invalid_list);
2322                 kvm_mod_used_mmu_pages(kvm, -1);
2323         } else {
2324                 /*
2325                  * Remove the active root from the active page list, the root
2326                  * will be explicitly freed when the root_count hits zero.
2327                  */
2328                 list_del(&sp->link);
2329
2330                 /*
2331                  * Obsolete pages cannot be used on any vCPUs, see the comment
2332                  * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2333                  * treats invalid shadow pages as being obsolete.
2334                  */
2335                 if (!is_obsolete_sp(kvm, sp))
2336                         kvm_reload_remote_mmus(kvm);
2337         }
2338
2339         if (sp->lpage_disallowed)
2340                 unaccount_huge_nx_page(kvm, sp);
2341
2342         sp->role.invalid = 1;
2343         return list_unstable;
2344 }
2345
2346 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2347                                      struct list_head *invalid_list)
2348 {
2349         int nr_zapped;
2350
2351         __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2352         return nr_zapped;
2353 }
2354
2355 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2356                                     struct list_head *invalid_list)
2357 {
2358         struct kvm_mmu_page *sp, *nsp;
2359
2360         if (list_empty(invalid_list))
2361                 return;
2362
2363         /*
2364          * We need to make sure everyone sees our modifications to
2365          * the page tables and see changes to vcpu->mode here. The barrier
2366          * in the kvm_flush_remote_tlbs() achieves this. This pairs
2367          * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2368          *
2369          * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2370          * guest mode and/or lockless shadow page table walks.
2371          */
2372         kvm_flush_remote_tlbs(kvm);
2373
2374         list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2375                 WARN_ON(!sp->role.invalid || sp->root_count);
2376                 kvm_mmu_free_page(sp);
2377         }
2378 }
2379
2380 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2381                                                   unsigned long nr_to_zap)
2382 {
2383         unsigned long total_zapped = 0;
2384         struct kvm_mmu_page *sp, *tmp;
2385         LIST_HEAD(invalid_list);
2386         bool unstable;
2387         int nr_zapped;
2388
2389         if (list_empty(&kvm->arch.active_mmu_pages))
2390                 return 0;
2391
2392 restart:
2393         list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2394                 /*
2395                  * Don't zap active root pages, the page itself can't be freed
2396                  * and zapping it will just force vCPUs to realloc and reload.
2397                  */
2398                 if (sp->root_count)
2399                         continue;
2400
2401                 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2402                                                       &nr_zapped);
2403                 total_zapped += nr_zapped;
2404                 if (total_zapped >= nr_to_zap)
2405                         break;
2406
2407                 if (unstable)
2408                         goto restart;
2409         }
2410
2411         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2412
2413         kvm->stat.mmu_recycled += total_zapped;
2414         return total_zapped;
2415 }
2416
2417 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2418 {
2419         if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2420                 return kvm->arch.n_max_mmu_pages -
2421                         kvm->arch.n_used_mmu_pages;
2422
2423         return 0;
2424 }
2425
2426 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2427 {
2428         unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2429
2430         if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2431                 return 0;
2432
2433         kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2434
2435         /*
2436          * Note, this check is intentionally soft, it only guarantees that one
2437          * page is available, while the caller may end up allocating as many as
2438          * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2439          * exceeding the (arbitrary by default) limit will not harm the host,
2440          * being too agressive may unnecessarily kill the guest, and getting an
2441          * exact count is far more trouble than it's worth, especially in the
2442          * page fault paths.
2443          */
2444         if (!kvm_mmu_available_pages(vcpu->kvm))
2445                 return -ENOSPC;
2446         return 0;
2447 }
2448
2449 /*
2450  * Changing the number of mmu pages allocated to the vm
2451  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2452  */
2453 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2454 {
2455         write_lock(&kvm->mmu_lock);
2456
2457         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2458                 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2459                                                   goal_nr_mmu_pages);
2460
2461                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2462         }
2463
2464         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2465
2466         write_unlock(&kvm->mmu_lock);
2467 }
2468
2469 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2470 {
2471         struct kvm_mmu_page *sp;
2472         LIST_HEAD(invalid_list);
2473         int r;
2474
2475         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2476         r = 0;
2477         write_lock(&kvm->mmu_lock);
2478         for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2479                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2480                          sp->role.word);
2481                 r = 1;
2482                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2483         }
2484         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2485         write_unlock(&kvm->mmu_lock);
2486
2487         return r;
2488 }
2489
2490 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2491 {
2492         gpa_t gpa;
2493         int r;
2494
2495         if (vcpu->arch.mmu->direct_map)
2496                 return 0;
2497
2498         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2499
2500         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2501
2502         return r;
2503 }
2504
2505 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2506 {
2507         trace_kvm_mmu_unsync_page(sp);
2508         ++vcpu->kvm->stat.mmu_unsync;
2509         sp->unsync = 1;
2510
2511         kvm_mmu_mark_parents_unsync(sp);
2512 }
2513
2514 /*
2515  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2516  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
2517  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2518  * be write-protected.
2519  */
2520 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
2521 {
2522         struct kvm_mmu_page *sp;
2523
2524         /*
2525          * Force write-protection if the page is being tracked.  Note, the page
2526          * track machinery is used to write-protect upper-level shadow pages,
2527          * i.e. this guards the role.level == 4K assertion below!
2528          */
2529         if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2530                 return -EPERM;
2531
2532         /*
2533          * The page is not write-tracked, mark existing shadow pages unsync
2534          * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
2535          * that case, KVM must complete emulation of the guest TLB flush before
2536          * allowing shadow pages to become unsync (writable by the guest).
2537          */
2538         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2539                 if (!can_unsync)
2540                         return -EPERM;
2541
2542                 if (sp->unsync)
2543                         continue;
2544
2545                 WARN_ON(sp->role.level != PG_LEVEL_4K);
2546                 kvm_unsync_page(vcpu, sp);
2547         }
2548
2549         /*
2550          * We need to ensure that the marking of unsync pages is visible
2551          * before the SPTE is updated to allow writes because
2552          * kvm_mmu_sync_roots() checks the unsync flags without holding
2553          * the MMU lock and so can race with this. If the SPTE was updated
2554          * before the page had been marked as unsync-ed, something like the
2555          * following could happen:
2556          *
2557          * CPU 1                    CPU 2
2558          * ---------------------------------------------------------------------
2559          * 1.2 Host updates SPTE
2560          *     to be writable
2561          *                      2.1 Guest writes a GPTE for GVA X.
2562          *                          (GPTE being in the guest page table shadowed
2563          *                           by the SP from CPU 1.)
2564          *                          This reads SPTE during the page table walk.
2565          *                          Since SPTE.W is read as 1, there is no
2566          *                          fault.
2567          *
2568          *                      2.2 Guest issues TLB flush.
2569          *                          That causes a VM Exit.
2570          *
2571          *                      2.3 Walking of unsync pages sees sp->unsync is
2572          *                          false and skips the page.
2573          *
2574          *                      2.4 Guest accesses GVA X.
2575          *                          Since the mapping in the SP was not updated,
2576          *                          so the old mapping for GVA X incorrectly
2577          *                          gets used.
2578          * 1.1 Host marks SP
2579          *     as unsync
2580          *     (sp->unsync = true)
2581          *
2582          * The write barrier below ensures that 1.1 happens before 1.2 and thus
2583          * the situation in 2.4 does not arise. The implicit barrier in 2.2
2584          * pairs with this write barrier.
2585          */
2586         smp_wmb();
2587
2588         return 0;
2589 }
2590
2591 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2592                     unsigned int pte_access, int level,
2593                     gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2594                     bool can_unsync, bool host_writable)
2595 {
2596         u64 spte;
2597         struct kvm_mmu_page *sp;
2598         int ret;
2599
2600         sp = sptep_to_sp(sptep);
2601
2602         ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2603                         can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2604
2605         if (spte & PT_WRITABLE_MASK)
2606                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
2607
2608         if (*sptep == spte)
2609                 ret |= SET_SPTE_SPURIOUS;
2610         else if (mmu_spte_update(sptep, spte))
2611                 ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2612         return ret;
2613 }
2614
2615 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2616                         unsigned int pte_access, bool write_fault, int level,
2617                         gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2618                         bool host_writable)
2619 {
2620         int was_rmapped = 0;
2621         int rmap_count;
2622         int set_spte_ret;
2623         int ret = RET_PF_FIXED;
2624         bool flush = false;
2625
2626         pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2627                  *sptep, write_fault, gfn);
2628
2629         if (unlikely(is_noslot_pfn(pfn))) {
2630                 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2631                 return RET_PF_EMULATE;
2632         }
2633
2634         if (is_shadow_present_pte(*sptep)) {
2635                 /*
2636                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2637                  * the parent of the now unreachable PTE.
2638                  */
2639                 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2640                         struct kvm_mmu_page *child;
2641                         u64 pte = *sptep;
2642
2643                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2644                         drop_parent_pte(child, sptep);
2645                         flush = true;
2646                 } else if (pfn != spte_to_pfn(*sptep)) {
2647                         pgprintk("hfn old %llx new %llx\n",
2648                                  spte_to_pfn(*sptep), pfn);
2649                         drop_spte(vcpu->kvm, sptep);
2650                         flush = true;
2651                 } else
2652                         was_rmapped = 1;
2653         }
2654
2655         set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2656                                 speculative, true, host_writable);
2657         if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2658                 if (write_fault)
2659                         ret = RET_PF_EMULATE;
2660                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2661         }
2662
2663         if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2664                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2665                                 KVM_PAGES_PER_HPAGE(level));
2666
2667         /*
2668          * The fault is fully spurious if and only if the new SPTE and old SPTE
2669          * are identical, and emulation is not required.
2670          */
2671         if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
2672                 WARN_ON_ONCE(!was_rmapped);
2673                 return RET_PF_SPURIOUS;
2674         }
2675
2676         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2677         trace_kvm_mmu_set_spte(level, gfn, sptep);
2678         if (!was_rmapped && is_large_pte(*sptep))
2679                 ++vcpu->kvm->stat.lpages;
2680
2681         if (is_shadow_present_pte(*sptep)) {
2682                 if (!was_rmapped) {
2683                         rmap_count = rmap_add(vcpu, sptep, gfn);
2684                         if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2685                                 rmap_recycle(vcpu, sptep, gfn);
2686                 }
2687         }
2688
2689         return ret;
2690 }
2691
2692 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2693                                      bool no_dirty_log)
2694 {
2695         struct kvm_memory_slot *slot;
2696
2697         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2698         if (!slot)
2699                 return KVM_PFN_ERR_FAULT;
2700
2701         return gfn_to_pfn_memslot_atomic(slot, gfn);
2702 }
2703
2704 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2705                                     struct kvm_mmu_page *sp,
2706                                     u64 *start, u64 *end)
2707 {
2708         struct page *pages[PTE_PREFETCH_NUM];
2709         struct kvm_memory_slot *slot;
2710         unsigned int access = sp->role.access;
2711         int i, ret;
2712         gfn_t gfn;
2713
2714         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2715         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2716         if (!slot)
2717                 return -1;
2718
2719         ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2720         if (ret <= 0)
2721                 return -1;
2722
2723         for (i = 0; i < ret; i++, gfn++, start++) {
2724                 mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2725                              page_to_pfn(pages[i]), true, true);
2726                 put_page(pages[i]);
2727         }
2728
2729         return 0;
2730 }
2731
2732 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2733                                   struct kvm_mmu_page *sp, u64 *sptep)
2734 {
2735         u64 *spte, *start = NULL;
2736         int i;
2737
2738         WARN_ON(!sp->role.direct);
2739
2740         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2741         spte = sp->spt + i;
2742
2743         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2744                 if (is_shadow_present_pte(*spte) || spte == sptep) {
2745                         if (!start)
2746                                 continue;
2747                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2748                                 break;
2749                         start = NULL;
2750                 } else if (!start)
2751                         start = spte;
2752         }
2753 }
2754
2755 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2756 {
2757         struct kvm_mmu_page *sp;
2758
2759         sp = sptep_to_sp(sptep);
2760
2761         /*
2762          * Without accessed bits, there's no way to distinguish between
2763          * actually accessed translations and prefetched, so disable pte
2764          * prefetch if accessed bits aren't available.
2765          */
2766         if (sp_ad_disabled(sp))
2767                 return;
2768
2769         if (sp->role.level > PG_LEVEL_4K)
2770                 return;
2771
2772         /*
2773          * If addresses are being invalidated, skip prefetching to avoid
2774          * accidentally prefetching those addresses.
2775          */
2776         if (unlikely(vcpu->kvm->mmu_notifier_count))
2777                 return;
2778
2779         __direct_pte_prefetch(vcpu, sp, sptep);
2780 }
2781
2782 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2783                                   const struct kvm_memory_slot *slot)
2784 {
2785         unsigned long hva;
2786         pte_t *pte;
2787         int level;
2788
2789         if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2790                 return PG_LEVEL_4K;
2791
2792         /*
2793          * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2794          * is not solely for performance, it's also necessary to avoid the
2795          * "writable" check in __gfn_to_hva_many(), which will always fail on
2796          * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2797          * page fault steps have already verified the guest isn't writing a
2798          * read-only memslot.
2799          */
2800         hva = __gfn_to_hva_memslot(slot, gfn);
2801
2802         pte = lookup_address_in_mm(kvm->mm, hva, &level);
2803         if (unlikely(!pte))
2804                 return PG_LEVEL_4K;
2805
2806         return level;
2807 }
2808
2809 int kvm_mmu_max_mapping_level(struct kvm *kvm,
2810                               const struct kvm_memory_slot *slot, gfn_t gfn,
2811                               kvm_pfn_t pfn, int max_level)
2812 {
2813         struct kvm_lpage_info *linfo;
2814
2815         max_level = min(max_level, max_huge_page_level);
2816         for ( ; max_level > PG_LEVEL_4K; max_level--) {
2817                 linfo = lpage_info_slot(gfn, slot, max_level);
2818                 if (!linfo->disallow_lpage)
2819                         break;
2820         }
2821
2822         if (max_level == PG_LEVEL_4K)
2823                 return PG_LEVEL_4K;
2824
2825         return host_pfn_mapping_level(kvm, gfn, pfn, slot);
2826 }
2827
2828 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
2829                             int max_level, kvm_pfn_t *pfnp,
2830                             bool huge_page_disallowed, int *req_level)
2831 {
2832         struct kvm_memory_slot *slot;
2833         kvm_pfn_t pfn = *pfnp;
2834         kvm_pfn_t mask;
2835         int level;
2836
2837         *req_level = PG_LEVEL_4K;
2838
2839         if (unlikely(max_level == PG_LEVEL_4K))
2840                 return PG_LEVEL_4K;
2841
2842         if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
2843                 return PG_LEVEL_4K;
2844
2845         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2846         if (!slot)
2847                 return PG_LEVEL_4K;
2848
2849         level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
2850         if (level == PG_LEVEL_4K)
2851                 return level;
2852
2853         *req_level = level = min(level, max_level);
2854
2855         /*
2856          * Enforce the iTLB multihit workaround after capturing the requested
2857          * level, which will be used to do precise, accurate accounting.
2858          */
2859         if (huge_page_disallowed)
2860                 return PG_LEVEL_4K;
2861
2862         /*
2863          * mmu_notifier_retry() was successful and mmu_lock is held, so
2864          * the pmd can't be split from under us.
2865          */
2866         mask = KVM_PAGES_PER_HPAGE(level) - 1;
2867         VM_BUG_ON((gfn & mask) != (pfn & mask));
2868         *pfnp = pfn & ~mask;
2869
2870         return level;
2871 }
2872
2873 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2874                                 kvm_pfn_t *pfnp, int *goal_levelp)
2875 {
2876         int level = *goal_levelp;
2877
2878         if (cur_level == level && level > PG_LEVEL_4K &&
2879             is_shadow_present_pte(spte) &&
2880             !is_large_pte(spte)) {
2881                 /*
2882                  * A small SPTE exists for this pfn, but FNAME(fetch)
2883                  * and __direct_map would like to create a large PTE
2884                  * instead: just force them to go down another level,
2885                  * patching back for them into pfn the next 9 bits of
2886                  * the address.
2887                  */
2888                 u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
2889                                 KVM_PAGES_PER_HPAGE(level - 1);
2890                 *pfnp |= gfn & page_mask;
2891                 (*goal_levelp)--;
2892         }
2893 }
2894
2895 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
2896                         int map_writable, int max_level, kvm_pfn_t pfn,
2897                         bool prefault, bool is_tdp)
2898 {
2899         bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
2900         bool write = error_code & PFERR_WRITE_MASK;
2901         bool exec = error_code & PFERR_FETCH_MASK;
2902         bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2903         struct kvm_shadow_walk_iterator it;
2904         struct kvm_mmu_page *sp;
2905         int level, req_level, ret;
2906         gfn_t gfn = gpa >> PAGE_SHIFT;
2907         gfn_t base_gfn = gfn;
2908
2909         level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
2910                                         huge_page_disallowed, &req_level);
2911
2912         trace_kvm_mmu_spte_requested(gpa, level, pfn);
2913         for_each_shadow_entry(vcpu, gpa, it) {
2914                 /*
2915                  * We cannot overwrite existing page tables with an NX
2916                  * large page, as the leaf could be executable.
2917                  */
2918                 if (nx_huge_page_workaround_enabled)
2919                         disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
2920                                                    &pfn, &level);
2921
2922                 base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2923                 if (it.level == level)
2924                         break;
2925
2926                 drop_large_spte(vcpu, it.sptep);
2927                 if (!is_shadow_present_pte(*it.sptep)) {
2928                         sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2929                                               it.level - 1, true, ACC_ALL);
2930
2931                         link_shadow_page(vcpu, it.sptep, sp);
2932                         if (is_tdp && huge_page_disallowed &&
2933                             req_level >= it.level)
2934                                 account_huge_nx_page(vcpu->kvm, sp);
2935                 }
2936         }
2937
2938         ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
2939                            write, level, base_gfn, pfn, prefault,
2940                            map_writable);
2941         if (ret == RET_PF_SPURIOUS)
2942                 return ret;
2943
2944         direct_pte_prefetch(vcpu, it.sptep);
2945         ++vcpu->stat.pf_fixed;
2946         return ret;
2947 }
2948
2949 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2950 {
2951         send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2952 }
2953
2954 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2955 {
2956         /*
2957          * Do not cache the mmio info caused by writing the readonly gfn
2958          * into the spte otherwise read access on readonly gfn also can
2959          * caused mmio page fault and treat it as mmio access.
2960          */
2961         if (pfn == KVM_PFN_ERR_RO_FAULT)
2962                 return RET_PF_EMULATE;
2963
2964         if (pfn == KVM_PFN_ERR_HWPOISON) {
2965                 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2966                 return RET_PF_RETRY;
2967         }
2968
2969         return -EFAULT;
2970 }
2971
2972 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2973                                 kvm_pfn_t pfn, unsigned int access,
2974                                 int *ret_val)
2975 {
2976         /* The pfn is invalid, report the error! */
2977         if (unlikely(is_error_pfn(pfn))) {
2978                 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2979                 return true;
2980         }
2981
2982         if (unlikely(is_noslot_pfn(pfn))) {
2983                 vcpu_cache_mmio_info(vcpu, gva, gfn,
2984                                      access & shadow_mmio_access_mask);
2985                 /*
2986                  * If MMIO caching is disabled, emulate immediately without
2987                  * touching the shadow page tables as attempting to install an
2988                  * MMIO SPTE will just be an expensive nop.
2989                  */
2990                 if (unlikely(!shadow_mmio_value)) {
2991                         *ret_val = RET_PF_EMULATE;
2992                         return true;
2993                 }
2994         }
2995
2996         return false;
2997 }
2998
2999 static bool page_fault_can_be_fast(u32 error_code)
3000 {
3001         /*
3002          * Do not fix the mmio spte with invalid generation number which
3003          * need to be updated by slow page fault path.
3004          */
3005         if (unlikely(error_code & PFERR_RSVD_MASK))
3006                 return false;
3007
3008         /* See if the page fault is due to an NX violation */
3009         if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
3010                       == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
3011                 return false;
3012
3013         /*
3014          * #PF can be fast if:
3015          * 1. The shadow page table entry is not present, which could mean that
3016          *    the fault is potentially caused by access tracking (if enabled).
3017          * 2. The shadow page table entry is present and the fault
3018          *    is caused by write-protect, that means we just need change the W
3019          *    bit of the spte which can be done out of mmu-lock.
3020          *
3021          * However, if access tracking is disabled we know that a non-present
3022          * page must be a genuine page fault where we have to create a new SPTE.
3023          * So, if access tracking is disabled, we return true only for write
3024          * accesses to a present page.
3025          */
3026
3027         return shadow_acc_track_mask != 0 ||
3028                ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
3029                 == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3030 }
3031
3032 /*
3033  * Returns true if the SPTE was fixed successfully. Otherwise,
3034  * someone else modified the SPTE from its original value.
3035  */
3036 static bool
3037 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3038                         u64 *sptep, u64 old_spte, u64 new_spte)
3039 {
3040         gfn_t gfn;
3041
3042         WARN_ON(!sp->role.direct);
3043
3044         /*
3045          * Theoretically we could also set dirty bit (and flush TLB) here in
3046          * order to eliminate unnecessary PML logging. See comments in
3047          * set_spte. But fast_page_fault is very unlikely to happen with PML
3048          * enabled, so we do not do this. This might result in the same GPA
3049          * to be logged in PML buffer again when the write really happens, and
3050          * eventually to be called by mark_page_dirty twice. But it's also no
3051          * harm. This also avoids the TLB flush needed after setting dirty bit
3052          * so non-PML cases won't be impacted.
3053          *
3054          * Compare with set_spte where instead shadow_dirty_mask is set.
3055          */
3056         if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3057                 return false;
3058
3059         if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3060                 /*
3061                  * The gfn of direct spte is stable since it is
3062                  * calculated by sp->gfn.
3063                  */
3064                 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3065                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3066         }
3067
3068         return true;
3069 }
3070
3071 static bool is_access_allowed(u32 fault_err_code, u64 spte)
3072 {
3073         if (fault_err_code & PFERR_FETCH_MASK)
3074                 return is_executable_pte(spte);
3075
3076         if (fault_err_code & PFERR_WRITE_MASK)
3077                 return is_writable_pte(spte);
3078
3079         /* Fault was on Read access */
3080         return spte & PT_PRESENT_MASK;
3081 }
3082
3083 /*
3084  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3085  */
3086 static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3087                            u32 error_code)
3088 {
3089         struct kvm_shadow_walk_iterator iterator;
3090         struct kvm_mmu_page *sp;
3091         int ret = RET_PF_INVALID;
3092         u64 spte = 0ull;
3093         uint retry_count = 0;
3094
3095         if (!page_fault_can_be_fast(error_code))
3096                 return ret;
3097
3098         walk_shadow_page_lockless_begin(vcpu);
3099
3100         do {
3101                 u64 new_spte;
3102
3103                 for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3104                         if (!is_shadow_present_pte(spte))
3105                                 break;
3106
3107                 if (!is_shadow_present_pte(spte))
3108                         break;
3109
3110                 sp = sptep_to_sp(iterator.sptep);
3111                 if (!is_last_spte(spte, sp->role.level))
3112                         break;
3113
3114                 /*
3115                  * Check whether the memory access that caused the fault would
3116                  * still cause it if it were to be performed right now. If not,
3117                  * then this is a spurious fault caused by TLB lazily flushed,
3118                  * or some other CPU has already fixed the PTE after the
3119                  * current CPU took the fault.
3120                  *
3121                  * Need not check the access of upper level table entries since
3122                  * they are always ACC_ALL.
3123                  */
3124                 if (is_access_allowed(error_code, spte)) {
3125                         ret = RET_PF_SPURIOUS;
3126                         break;
3127                 }
3128
3129                 new_spte = spte;
3130
3131                 if (is_access_track_spte(spte))
3132                         new_spte = restore_acc_track_spte(new_spte);
3133
3134                 /*
3135                  * Currently, to simplify the code, write-protection can
3136                  * be removed in the fast path only if the SPTE was
3137                  * write-protected for dirty-logging or access tracking.
3138                  */
3139                 if ((error_code & PFERR_WRITE_MASK) &&
3140                     spte_can_locklessly_be_made_writable(spte)) {
3141                         new_spte |= PT_WRITABLE_MASK;
3142
3143                         /*
3144                          * Do not fix write-permission on the large spte.  Since
3145                          * we only dirty the first page into the dirty-bitmap in
3146                          * fast_pf_fix_direct_spte(), other pages are missed
3147                          * if its slot has dirty logging enabled.
3148                          *
3149                          * Instead, we let the slow page fault path create a
3150                          * normal spte to fix the access.
3151                          *
3152                          * See the comments in kvm_arch_commit_memory_region().
3153                          */
3154                         if (sp->role.level > PG_LEVEL_4K)
3155                                 break;
3156                 }
3157
3158                 /* Verify that the fault can be handled in the fast path */
3159                 if (new_spte == spte ||
3160                     !is_access_allowed(error_code, new_spte))
3161                         break;
3162
3163                 /*
3164                  * Currently, fast page fault only works for direct mapping
3165                  * since the gfn is not stable for indirect shadow page. See
3166                  * Documentation/virt/kvm/locking.rst to get more detail.
3167                  */
3168                 if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3169                                             new_spte)) {
3170                         ret = RET_PF_FIXED;
3171                         break;
3172                 }
3173
3174                 if (++retry_count > 4) {
3175                         printk_once(KERN_WARNING
3176                                 "kvm: Fast #PF retrying more than 4 times.\n");
3177                         break;
3178                 }
3179
3180         } while (true);
3181
3182         trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3183                               spte, ret);
3184         walk_shadow_page_lockless_end(vcpu);
3185
3186         return ret;
3187 }
3188
3189 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3190                                struct list_head *invalid_list)
3191 {
3192         struct kvm_mmu_page *sp;
3193
3194         if (!VALID_PAGE(*root_hpa))
3195                 return;
3196
3197         sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3198
3199         if (is_tdp_mmu_page(sp))
3200                 kvm_tdp_mmu_put_root(kvm, sp, false);
3201         else if (!--sp->root_count && sp->role.invalid)
3202                 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3203
3204         *root_hpa = INVALID_PAGE;
3205 }
3206
3207 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3208 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3209                         ulong roots_to_free)
3210 {
3211         struct kvm *kvm = vcpu->kvm;
3212         int i;
3213         LIST_HEAD(invalid_list);
3214         bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3215
3216         BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3217
3218         /* Before acquiring the MMU lock, see if we need to do any real work. */
3219         if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3220                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3221                         if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3222                             VALID_PAGE(mmu->prev_roots[i].hpa))
3223                                 break;
3224
3225                 if (i == KVM_MMU_NUM_PREV_ROOTS)
3226                         return;
3227         }
3228
3229         write_lock(&kvm->mmu_lock);
3230
3231         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3232                 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3233                         mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3234                                            &invalid_list);
3235
3236         if (free_active_root) {
3237                 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3238                     (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3239                         mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3240                 } else if (mmu->pae_root) {
3241                         for (i = 0; i < 4; ++i) {
3242                                 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3243                                         continue;
3244
3245                                 mmu_free_root_page(kvm, &mmu->pae_root[i],
3246                                                    &invalid_list);
3247                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3248                         }
3249                 }
3250                 mmu->root_hpa = INVALID_PAGE;
3251                 mmu->root_pgd = 0;
3252         }
3253
3254         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3255         write_unlock(&kvm->mmu_lock);
3256 }
3257 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3258
3259 void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
3260 {
3261         unsigned long roots_to_free = 0;
3262         hpa_t root_hpa;
3263         int i;
3264
3265         /*
3266          * This should not be called while L2 is active, L2 can't invalidate
3267          * _only_ its own roots, e.g. INVVPID unconditionally exits.
3268          */
3269         WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
3270
3271         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3272                 root_hpa = mmu->prev_roots[i].hpa;
3273                 if (!VALID_PAGE(root_hpa))
3274                         continue;
3275
3276                 if (!to_shadow_page(root_hpa) ||
3277                         to_shadow_page(root_hpa)->role.guest_mode)
3278                         roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3279         }
3280
3281         kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
3282 }
3283 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3284
3285
3286 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3287 {
3288         int ret = 0;
3289
3290         if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3291                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3292                 ret = 1;
3293         }
3294
3295         return ret;
3296 }
3297
3298 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
3299                             u8 level, bool direct)
3300 {
3301         struct kvm_mmu_page *sp;
3302
3303         sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
3304         ++sp->root_count;
3305
3306         return __pa(sp->spt);
3307 }
3308
3309 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3310 {
3311         struct kvm_mmu *mmu = vcpu->arch.mmu;
3312         u8 shadow_root_level = mmu->shadow_root_level;
3313         hpa_t root;
3314         unsigned i;
3315         int r;
3316
3317         write_lock(&vcpu->kvm->mmu_lock);
3318         r = make_mmu_pages_available(vcpu);
3319         if (r < 0)
3320                 goto out_unlock;
3321
3322         if (is_tdp_mmu_enabled(vcpu->kvm)) {
3323                 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3324                 mmu->root_hpa = root;
3325         } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3326                 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3327                 mmu->root_hpa = root;
3328         } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3329                 if (WARN_ON_ONCE(!mmu->pae_root)) {
3330                         r = -EIO;
3331                         goto out_unlock;
3332                 }
3333
3334                 for (i = 0; i < 4; ++i) {
3335                         WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3336
3337                         root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
3338                                               i << 30, PT32_ROOT_LEVEL, true);
3339                         mmu->pae_root[i] = root | PT_PRESENT_MASK |
3340                                            shadow_me_mask;
3341                 }
3342                 mmu->root_hpa = __pa(mmu->pae_root);
3343         } else {
3344                 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3345                 r = -EIO;
3346                 goto out_unlock;
3347         }
3348
3349         /* root_pgd is ignored for direct MMUs. */
3350         mmu->root_pgd = 0;
3351 out_unlock:
3352         write_unlock(&vcpu->kvm->mmu_lock);
3353         return r;
3354 }
3355
3356 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3357 {
3358         struct kvm_mmu *mmu = vcpu->arch.mmu;
3359         u64 pdptrs[4], pm_mask;
3360         gfn_t root_gfn, root_pgd;
3361         hpa_t root;
3362         unsigned i;
3363         int r;
3364
3365         root_pgd = mmu->get_guest_pgd(vcpu);
3366         root_gfn = root_pgd >> PAGE_SHIFT;
3367
3368         if (mmu_check_root(vcpu, root_gfn))
3369                 return 1;
3370
3371         /*
3372          * On SVM, reading PDPTRs might access guest memory, which might fault
3373          * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
3374          */
3375         if (mmu->root_level == PT32E_ROOT_LEVEL) {
3376                 for (i = 0; i < 4; ++i) {
3377                         pdptrs[i] = mmu->get_pdptr(vcpu, i);
3378                         if (!(pdptrs[i] & PT_PRESENT_MASK))
3379                                 continue;
3380
3381                         if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
3382                                 return 1;
3383                 }
3384         }
3385
3386         r = alloc_all_memslots_rmaps(vcpu->kvm);
3387         if (r)
3388                 return r;
3389
3390         write_lock(&vcpu->kvm->mmu_lock);
3391         r = make_mmu_pages_available(vcpu);
3392         if (r < 0)
3393                 goto out_unlock;
3394
3395         /*
3396          * Do we shadow a long mode page table? If so we need to
3397          * write-protect the guests page table root.
3398          */
3399         if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3400                 root = mmu_alloc_root(vcpu, root_gfn, 0,
3401                                       mmu->shadow_root_level, false);
3402                 mmu->root_hpa = root;
3403                 goto set_root_pgd;
3404         }
3405
3406         if (WARN_ON_ONCE(!mmu->pae_root)) {
3407                 r = -EIO;
3408                 goto out_unlock;
3409         }
3410
3411         /*
3412          * We shadow a 32 bit page table. This may be a legacy 2-level
3413          * or a PAE 3-level page table. In either case we need to be aware that
3414          * the shadow page table may be a PAE or a long mode page table.
3415          */
3416         pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3417         if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3418                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3419
3420                 if (WARN_ON_ONCE(!mmu->pml4_root)) {
3421                         r = -EIO;
3422                         goto out_unlock;
3423                 }
3424
3425                 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3426         }
3427
3428         for (i = 0; i < 4; ++i) {
3429                 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3430
3431                 if (mmu->root_level == PT32E_ROOT_LEVEL) {
3432                         if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3433                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3434                                 continue;
3435                         }
3436                         root_gfn = pdptrs[i] >> PAGE_SHIFT;
3437                 }
3438
3439                 root = mmu_alloc_root(vcpu, root_gfn, i << 30,
3440                                       PT32_ROOT_LEVEL, false);
3441                 mmu->pae_root[i] = root | pm_mask;
3442         }
3443
3444         if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3445                 mmu->root_hpa = __pa(mmu->pml4_root);
3446         else
3447                 mmu->root_hpa = __pa(mmu->pae_root);
3448
3449 set_root_pgd:
3450         mmu->root_pgd = root_pgd;
3451 out_unlock:
3452         write_unlock(&vcpu->kvm->mmu_lock);
3453
3454         return 0;
3455 }
3456
3457 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3458 {
3459         struct kvm_mmu *mmu = vcpu->arch.mmu;
3460         u64 *pml4_root, *pae_root;
3461
3462         /*
3463          * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3464          * tables are allocated and initialized at root creation as there is no
3465          * equivalent level in the guest's NPT to shadow.  Allocate the tables
3466          * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3467          */
3468         if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3469             mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3470                 return 0;
3471
3472         /*
3473          * This mess only works with 4-level paging and needs to be updated to
3474          * work with 5-level paging.
3475          */
3476         if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
3477                 return -EIO;
3478
3479         if (mmu->pae_root && mmu->pml4_root)
3480                 return 0;
3481
3482         /*
3483          * The special roots should always be allocated in concert.  Yell and
3484          * bail if KVM ends up in a state where only one of the roots is valid.
3485          */
3486         if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
3487                 return -EIO;
3488
3489         /*
3490          * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3491          * doesn't need to be decrypted.
3492          */
3493         pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3494         if (!pae_root)
3495                 return -ENOMEM;
3496
3497         pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3498         if (!pml4_root) {
3499                 free_page((unsigned long)pae_root);
3500                 return -ENOMEM;
3501         }
3502
3503         mmu->pae_root = pae_root;
3504         mmu->pml4_root = pml4_root;
3505
3506         return 0;
3507 }
3508
3509 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3510 {
3511         int i;
3512         struct kvm_mmu_page *sp;
3513
3514         if (vcpu->arch.mmu->direct_map)
3515                 return;
3516
3517         if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3518                 return;
3519
3520         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3521
3522         if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3523                 hpa_t root = vcpu->arch.mmu->root_hpa;
3524                 sp = to_shadow_page(root);
3525
3526                 /*
3527                  * Even if another CPU was marking the SP as unsync-ed
3528                  * simultaneously, any guest page table changes are not
3529                  * guaranteed to be visible anyway until this VCPU issues a TLB
3530                  * flush strictly after those changes are made. We only need to
3531                  * ensure that the other CPU sets these flags before any actual
3532                  * changes to the page tables are made. The comments in
3533                  * mmu_try_to_unsync_pages() describe what could go wrong if
3534                  * this requirement isn't satisfied.
3535                  */
3536                 if (!smp_load_acquire(&sp->unsync) &&
3537                     !smp_load_acquire(&sp->unsync_children))
3538                         return;
3539
3540                 write_lock(&vcpu->kvm->mmu_lock);
3541                 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3542
3543                 mmu_sync_children(vcpu, sp);
3544
3545                 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3546                 write_unlock(&vcpu->kvm->mmu_lock);
3547                 return;
3548         }
3549
3550         write_lock(&vcpu->kvm->mmu_lock);
3551         kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3552
3553         for (i = 0; i < 4; ++i) {
3554                 hpa_t root = vcpu->arch.mmu->pae_root[i];
3555
3556                 if (IS_VALID_PAE_ROOT(root)) {
3557                         root &= PT64_BASE_ADDR_MASK;
3558                         sp = to_shadow_page(root);
3559                         mmu_sync_children(vcpu, sp);
3560                 }
3561         }
3562
3563         kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3564         write_unlock(&vcpu->kvm->mmu_lock);
3565 }
3566
3567 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3568                                   u32 access, struct x86_exception *exception)
3569 {
3570         if (exception)
3571                 exception->error_code = 0;
3572         return vaddr;
3573 }
3574
3575 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3576                                          u32 access,
3577                                          struct x86_exception *exception)
3578 {
3579         if (exception)
3580                 exception->error_code = 0;
3581         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3582 }
3583
3584 static bool
3585 __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3586 {
3587         int bit7 = (pte >> 7) & 1;
3588
3589         return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3590 }
3591
3592 static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3593 {
3594         return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3595 }
3596
3597 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3598 {
3599         /*
3600          * A nested guest cannot use the MMIO cache if it is using nested
3601          * page tables, because cr2 is a nGPA while the cache stores GPAs.
3602          */
3603         if (mmu_is_nested(vcpu))
3604                 return false;
3605
3606         if (direct)
3607                 return vcpu_match_mmio_gpa(vcpu, addr);
3608
3609         return vcpu_match_mmio_gva(vcpu, addr);
3610 }
3611
3612 /*
3613  * Return the level of the lowest level SPTE added to sptes.
3614  * That SPTE may be non-present.
3615  */
3616 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3617 {
3618         struct kvm_shadow_walk_iterator iterator;
3619         int leaf = -1;
3620         u64 spte;
3621
3622         walk_shadow_page_lockless_begin(vcpu);
3623
3624         for (shadow_walk_init(&iterator, vcpu, addr),
3625              *root_level = iterator.level;
3626              shadow_walk_okay(&iterator);
3627              __shadow_walk_next(&iterator, spte)) {
3628                 leaf = iterator.level;
3629                 spte = mmu_spte_get_lockless(iterator.sptep);
3630
3631                 sptes[leaf] = spte;
3632
3633                 if (!is_shadow_present_pte(spte))
3634                         break;
3635         }
3636
3637         walk_shadow_page_lockless_end(vcpu);
3638
3639         return leaf;
3640 }
3641
3642 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
3643 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3644 {
3645         u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3646         struct rsvd_bits_validate *rsvd_check;
3647         int root, leaf, level;
3648         bool reserved = false;
3649
3650         if (is_tdp_mmu(vcpu->arch.mmu))
3651                 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3652         else
3653                 leaf = get_walk(vcpu, addr, sptes, &root);
3654
3655         if (unlikely(leaf < 0)) {
3656                 *sptep = 0ull;
3657                 return reserved;
3658         }
3659
3660         *sptep = sptes[leaf];
3661
3662         /*
3663          * Skip reserved bits checks on the terminal leaf if it's not a valid
3664          * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
3665          * design, always have reserved bits set.  The purpose of the checks is
3666          * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
3667          */
3668         if (!is_shadow_present_pte(sptes[leaf]))
3669                 leaf++;
3670
3671         rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3672
3673         for (level = root; level >= leaf; level--)
3674                 /*
3675                  * Use a bitwise-OR instead of a logical-OR to aggregate the
3676                  * reserved bit and EPT's invalid memtype/XWR checks to avoid
3677                  * adding a Jcc in the loop.
3678                  */
3679                 reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
3680                             __is_rsvd_bits_set(rsvd_check, sptes[level], level);
3681
3682         if (reserved) {
3683                 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3684                        __func__, addr);
3685                 for (level = root; level >= leaf; level--)
3686                         pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3687                                sptes[level], level,
3688                                rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
3689         }
3690
3691         return reserved;
3692 }
3693
3694 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3695 {
3696         u64 spte;
3697         bool reserved;
3698
3699         if (mmio_info_in_cache(vcpu, addr, direct))
3700                 return RET_PF_EMULATE;
3701
3702         reserved = get_mmio_spte(vcpu, addr, &spte);
3703         if (WARN_ON(reserved))
3704                 return -EINVAL;
3705
3706         if (is_mmio_spte(spte)) {
3707                 gfn_t gfn = get_mmio_spte_gfn(spte);
3708                 unsigned int access = get_mmio_spte_access(spte);
3709
3710                 if (!check_mmio_spte(vcpu, spte))
3711                         return RET_PF_INVALID;
3712
3713                 if (direct)
3714                         addr = 0;
3715
3716                 trace_handle_mmio_page_fault(addr, gfn, access);
3717                 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3718                 return RET_PF_EMULATE;
3719         }
3720
3721         /*
3722          * If the page table is zapped by other cpus, let CPU fault again on
3723          * the address.
3724          */
3725         return RET_PF_RETRY;
3726 }
3727
3728 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3729                                          u32 error_code, gfn_t gfn)
3730 {
3731         if (unlikely(error_code & PFERR_RSVD_MASK))
3732                 return false;
3733
3734         if (!(error_code & PFERR_PRESENT_MASK) ||
3735               !(error_code & PFERR_WRITE_MASK))
3736                 return false;
3737
3738         /*
3739          * guest is writing the page which is write tracked which can
3740          * not be fixed by page fault handler.
3741          */
3742         if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3743                 return true;
3744
3745         return false;
3746 }
3747
3748 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3749 {
3750         struct kvm_shadow_walk_iterator iterator;
3751         u64 spte;
3752
3753         walk_shadow_page_lockless_begin(vcpu);
3754         for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3755                 clear_sp_write_flooding_count(iterator.sptep);
3756                 if (!is_shadow_present_pte(spte))
3757                         break;
3758         }
3759         walk_shadow_page_lockless_end(vcpu);
3760 }
3761
3762 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3763                                     gfn_t gfn)
3764 {
3765         struct kvm_arch_async_pf arch;
3766
3767         arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3768         arch.gfn = gfn;
3769         arch.direct_map = vcpu->arch.mmu->direct_map;
3770         arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3771
3772         return kvm_setup_async_pf(vcpu, cr2_or_gpa,
3773                                   kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3774 }
3775
3776 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3777                          gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
3778                          bool write, bool *writable)
3779 {
3780         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3781         bool async;
3782
3783         /*
3784          * Retry the page fault if the gfn hit a memslot that is being deleted
3785          * or moved.  This ensures any existing SPTEs for the old memslot will
3786          * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3787          */
3788         if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3789                 return true;
3790
3791         /* Don't expose private memslots to L2. */
3792         if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
3793                 *pfn = KVM_PFN_NOSLOT;
3794                 *writable = false;
3795                 return false;
3796         }
3797
3798         async = false;
3799         *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
3800                                     write, writable, hva);
3801         if (!async)
3802                 return false; /* *pfn has correct page already */
3803
3804         if (!prefault && kvm_can_do_async_pf(vcpu)) {
3805                 trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3806                 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3807                         trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3808                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3809                         return true;
3810                 } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3811                         return true;
3812         }
3813
3814         *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
3815                                     write, writable, hva);
3816         return false;
3817 }
3818
3819 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3820                              bool prefault, int max_level, bool is_tdp)
3821 {
3822         bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3823         bool write = error_code & PFERR_WRITE_MASK;
3824         bool map_writable;
3825
3826         gfn_t gfn = gpa >> PAGE_SHIFT;
3827         unsigned long mmu_seq;
3828         kvm_pfn_t pfn;
3829         hva_t hva;
3830         int r;
3831
3832         if (page_fault_handle_page_track(vcpu, error_code, gfn))
3833                 return RET_PF_EMULATE;
3834
3835         if (!is_tdp_mmu_fault) {
3836                 r = fast_page_fault(vcpu, gpa, error_code);
3837                 if (r != RET_PF_INVALID)
3838                         return r;
3839         }
3840
3841         r = mmu_topup_memory_caches(vcpu, false);
3842         if (r)
3843                 return r;
3844
3845         mmu_seq = vcpu->kvm->mmu_notifier_seq;
3846         smp_rmb();
3847
3848         if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
3849                          write, &map_writable))
3850                 return RET_PF_RETRY;
3851
3852         if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3853                 return r;
3854
3855         r = RET_PF_RETRY;
3856
3857         if (is_tdp_mmu_fault)
3858                 read_lock(&vcpu->kvm->mmu_lock);
3859         else
3860                 write_lock(&vcpu->kvm->mmu_lock);
3861
3862         if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
3863                 goto out_unlock;
3864         r = make_mmu_pages_available(vcpu);
3865         if (r)
3866                 goto out_unlock;
3867
3868         if (is_tdp_mmu_fault)
3869                 r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
3870                                     pfn, prefault);
3871         else
3872                 r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
3873                                  prefault, is_tdp);
3874
3875 out_unlock:
3876         if (is_tdp_mmu_fault)
3877                 read_unlock(&vcpu->kvm->mmu_lock);
3878         else
3879                 write_unlock(&vcpu->kvm->mmu_lock);
3880         kvm_release_pfn_clean(pfn);
3881         return r;
3882 }
3883
3884 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
3885                                 u32 error_code, bool prefault)
3886 {
3887         pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
3888
3889         /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
3890         return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
3891                                  PG_LEVEL_2M, false);
3892 }
3893
3894 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3895                                 u64 fault_address, char *insn, int insn_len)
3896 {
3897         int r = 1;
3898         u32 flags = vcpu->arch.apf.host_apf_flags;
3899
3900 #ifndef CONFIG_X86_64
3901         /* A 64-bit CR2 should be impossible on 32-bit KVM. */
3902         if (WARN_ON_ONCE(fault_address >> 32))
3903                 return -EFAULT;
3904 #endif
3905
3906         vcpu->arch.l1tf_flush_l1d = true;
3907         if (!flags) {
3908                 trace_kvm_page_fault(fault_address, error_code);
3909
3910                 if (kvm_event_needs_reinjection(vcpu))
3911                         kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3912                 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3913                                 insn_len);
3914         } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
3915                 vcpu->arch.apf.host_apf_flags = 0;
3916                 local_irq_disable();
3917                 kvm_async_pf_task_wait_schedule(fault_address);
3918                 local_irq_enable();
3919         } else {
3920                 WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
3921         }
3922
3923         return r;
3924 }
3925 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3926
3927 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3928                        bool prefault)
3929 {
3930         int max_level;
3931
3932         for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
3933              max_level > PG_LEVEL_4K;
3934              max_level--) {
3935                 int page_num = KVM_PAGES_PER_HPAGE(max_level);
3936                 gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
3937
3938                 if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
3939                         break;
3940         }
3941
3942         return direct_page_fault(vcpu, gpa, error_code, prefault,
3943                                  max_level, true);
3944 }
3945
3946 static void nonpaging_init_context(struct kvm_mmu *context)
3947 {
3948         context->page_fault = nonpaging_page_fault;
3949         context->gva_to_gpa = nonpaging_gva_to_gpa;
3950         context->sync_page = nonpaging_sync_page;
3951         context->invlpg = NULL;
3952         context->root_level = 0;
3953         context->direct_map = true;
3954 }
3955
3956 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
3957                                   union kvm_mmu_page_role role)
3958 {
3959         return (role.direct || pgd == root->pgd) &&
3960                VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
3961                role.word == to_shadow_page(root->hpa)->role.word;
3962 }
3963
3964 /*
3965  * Find out if a previously cached root matching the new pgd/role is available.
3966  * The current root is also inserted into the cache.
3967  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3968  * returned.
3969  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3970  * false is returned. This root should now be freed by the caller.
3971  */
3972 static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3973                                   union kvm_mmu_page_role new_role)
3974 {
3975         uint i;
3976         struct kvm_mmu_root_info root;
3977         struct kvm_mmu *mmu = vcpu->arch.mmu;
3978
3979         root.pgd = mmu->root_pgd;
3980         root.hpa = mmu->root_hpa;
3981
3982         if (is_root_usable(&root, new_pgd, new_role))
3983                 return true;
3984
3985         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3986                 swap(root, mmu->prev_roots[i]);
3987
3988                 if (is_root_usable(&root, new_pgd, new_role))
3989                         break;
3990         }
3991
3992         mmu->root_hpa = root.hpa;
3993         mmu->root_pgd = root.pgd;
3994
3995         return i < KVM_MMU_NUM_PREV_ROOTS;
3996 }
3997
3998 static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3999                             union kvm_mmu_page_role new_role)
4000 {
4001         struct kvm_mmu *mmu = vcpu->arch.mmu;
4002
4003         /*
4004          * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4005          * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4006          * later if necessary.
4007          */
4008         if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4009             mmu->root_level >= PT64_ROOT_4LEVEL)
4010                 return cached_root_available(vcpu, new_pgd, new_role);
4011
4012         return false;
4013 }
4014
4015 static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4016                               union kvm_mmu_page_role new_role)
4017 {
4018         if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4019                 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4020                 return;
4021         }
4022
4023         /*
4024          * It's possible that the cached previous root page is obsolete because
4025          * of a change in the MMU generation number. However, changing the
4026          * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4027          * free the root set here and allocate a new one.
4028          */
4029         kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4030
4031         if (force_flush_and_sync_on_reuse) {
4032                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4033                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4034         }
4035
4036         /*
4037          * The last MMIO access's GVA and GPA are cached in the VCPU. When
4038          * switching to a new CR3, that GVA->GPA mapping may no longer be
4039          * valid. So clear any cached MMIO info even when we don't need to sync
4040          * the shadow page tables.
4041          */
4042         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4043
4044         /*
4045          * If this is a direct root page, it doesn't have a write flooding
4046          * count. Otherwise, clear the write flooding count.
4047          */
4048         if (!new_role.direct)
4049                 __clear_sp_write_flooding_count(
4050                                 to_shadow_page(vcpu->arch.mmu->root_hpa));
4051 }
4052
4053 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4054 {
4055         __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4056 }
4057 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4058
4059 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4060 {
4061         return kvm_read_cr3(vcpu);
4062 }
4063
4064 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4065                            unsigned int access, int *nr_present)
4066 {
4067         if (unlikely(is_mmio_spte(*sptep))) {
4068                 if (gfn != get_mmio_spte_gfn(*sptep)) {
4069                         mmu_spte_clear_no_track(sptep);
4070                         return true;
4071                 }
4072
4073                 (*nr_present)++;
4074                 mark_mmio_spte(vcpu, sptep, gfn, access);
4075                 return true;
4076         }
4077
4078         return false;
4079 }
4080
4081 static inline bool is_last_gpte(struct kvm_mmu *mmu,
4082                                 unsigned level, unsigned gpte)
4083 {
4084         /*
4085          * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
4086          * If it is clear, there are no large pages at this level, so clear
4087          * PT_PAGE_SIZE_MASK in gpte if that is the case.
4088          */
4089         gpte &= level - mmu->last_nonleaf_level;
4090
4091         /*
4092          * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
4093          * iff level <= PG_LEVEL_4K, which for our purpose means
4094          * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
4095          */
4096         gpte |= level - PG_LEVEL_4K - 1;
4097
4098         return gpte & PT_PAGE_SIZE_MASK;
4099 }
4100
4101 #define PTTYPE_EPT 18 /* arbitrary */
4102 #define PTTYPE PTTYPE_EPT
4103 #include "paging_tmpl.h"
4104 #undef PTTYPE
4105
4106 #define PTTYPE 64
4107 #include "paging_tmpl.h"
4108 #undef PTTYPE
4109
4110 #define PTTYPE 32
4111 #include "paging_tmpl.h"
4112 #undef PTTYPE
4113
4114 static void
4115 __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4116                         u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4117                         bool pse, bool amd)
4118 {
4119         u64 gbpages_bit_rsvd = 0;
4120         u64 nonleaf_bit8_rsvd = 0;
4121         u64 high_bits_rsvd;
4122
4123         rsvd_check->bad_mt_xwr = 0;
4124
4125         if (!gbpages)
4126                 gbpages_bit_rsvd = rsvd_bits(7, 7);
4127
4128         if (level == PT32E_ROOT_LEVEL)
4129                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4130         else
4131                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4132
4133         /* Note, NX doesn't exist in PDPTEs, this is handled below. */
4134         if (!nx)
4135                 high_bits_rsvd |= rsvd_bits(63, 63);
4136
4137         /*
4138          * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4139          * leaf entries) on AMD CPUs only.
4140          */
4141         if (amd)
4142                 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4143
4144         switch (level) {
4145         case PT32_ROOT_LEVEL:
4146                 /* no rsvd bits for 2 level 4K page table entries */
4147                 rsvd_check->rsvd_bits_mask[0][1] = 0;
4148                 rsvd_check->rsvd_bits_mask[0][0] = 0;
4149                 rsvd_check->rsvd_bits_mask[1][0] =
4150                         rsvd_check->rsvd_bits_mask[0][0];
4151
4152                 if (!pse) {
4153                         rsvd_check->rsvd_bits_mask[1][1] = 0;
4154                         break;
4155                 }
4156
4157                 if (is_cpuid_PSE36())
4158                         /* 36bits PSE 4MB page */
4159                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4160                 else
4161                         /* 32 bits PSE 4MB page */
4162                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4163                 break;
4164         case PT32E_ROOT_LEVEL:
4165                 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
4166                                                    high_bits_rsvd |
4167                                                    rsvd_bits(5, 8) |
4168                                                    rsvd_bits(1, 2);     /* PDPTE */
4169                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;      /* PDE */
4170                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;      /* PTE */
4171                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4172                                                    rsvd_bits(13, 20);   /* large page */
4173                 rsvd_check->rsvd_bits_mask[1][0] =
4174                         rsvd_check->rsvd_bits_mask[0][0];
4175                 break;
4176         case PT64_ROOT_5LEVEL:
4177                 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
4178                                                    nonleaf_bit8_rsvd |
4179                                                    rsvd_bits(7, 7);
4180                 rsvd_check->rsvd_bits_mask[1][4] =
4181                         rsvd_check->rsvd_bits_mask[0][4];
4182                 fallthrough;
4183         case PT64_ROOT_4LEVEL:
4184                 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
4185                                                    nonleaf_bit8_rsvd |
4186                                                    rsvd_bits(7, 7);
4187                 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
4188                                                    gbpages_bit_rsvd;
4189                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4190                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4191                 rsvd_check->rsvd_bits_mask[1][3] =
4192                         rsvd_check->rsvd_bits_mask[0][3];
4193                 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
4194                                                    gbpages_bit_rsvd |
4195                                                    rsvd_bits(13, 29);
4196                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4197                                                    rsvd_bits(13, 20); /* large page */
4198                 rsvd_check->rsvd_bits_mask[1][0] =
4199                         rsvd_check->rsvd_bits_mask[0][0];
4200                 break;
4201         }
4202 }
4203
4204 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4205                                   struct kvm_mmu *context)
4206 {
4207         __reset_rsvds_bits_mask(&context->guest_rsvd_check,
4208                                 vcpu->arch.reserved_gpa_bits,
4209                                 context->root_level, is_efer_nx(context),
4210                                 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4211                                 is_cr4_pse(context),
4212                                 guest_cpuid_is_amd_or_hygon(vcpu));
4213 }
4214
4215 static void
4216 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4217                             u64 pa_bits_rsvd, bool execonly)
4218 {
4219         u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4220         u64 bad_mt_xwr;
4221
4222         rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
4223         rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
4224         rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
4225         rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
4226         rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4227
4228         /* large page */
4229         rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4230         rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4231         rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
4232         rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4233         rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4234
4235         bad_mt_xwr = 0xFFull << (2 * 8);        /* bits 3..5 must not be 2 */
4236         bad_mt_xwr |= 0xFFull << (3 * 8);       /* bits 3..5 must not be 3 */
4237         bad_mt_xwr |= 0xFFull << (7 * 8);       /* bits 3..5 must not be 7 */
4238         bad_mt_xwr |= REPEAT_BYTE(1ull << 2);   /* bits 0..2 must not be 010 */
4239         bad_mt_xwr |= REPEAT_BYTE(1ull << 6);   /* bits 0..2 must not be 110 */
4240         if (!execonly) {
4241                 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
4242                 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4243         }
4244         rsvd_check->bad_mt_xwr = bad_mt_xwr;
4245 }
4246
4247 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4248                 struct kvm_mmu *context, bool execonly)
4249 {
4250         __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4251                                     vcpu->arch.reserved_gpa_bits, execonly);
4252 }
4253
4254 static inline u64 reserved_hpa_bits(void)
4255 {
4256         return rsvd_bits(shadow_phys_bits, 63);
4257 }
4258
4259 /*
4260  * the page table on host is the shadow page table for the page
4261  * table in guest or amd nested guest, its mmu features completely
4262  * follow the features in guest.
4263  */
4264 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4265                                         struct kvm_mmu *context)
4266 {
4267         /*
4268          * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4269          * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4270          * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4271          * The iTLB multi-hit workaround can be toggled at any time, so assume
4272          * NX can be used by any non-nested shadow MMU to avoid having to reset
4273          * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4274          */
4275         bool uses_nx = is_efer_nx(context) || !tdp_enabled;
4276
4277         /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
4278         bool is_amd = true;
4279         /* KVM doesn't use 2-level page tables for the shadow MMU. */
4280         bool is_pse = false;
4281         struct rsvd_bits_validate *shadow_zero_check;
4282         int i;
4283
4284         WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
4285
4286         shadow_zero_check = &context->shadow_zero_check;
4287         __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4288                                 context->shadow_root_level, uses_nx,
4289                                 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4290                                 is_pse, is_amd);
4291
4292         if (!shadow_me_mask)
4293                 return;
4294
4295         for (i = context->shadow_root_level; --i >= 0;) {
4296                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4297                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4298         }
4299
4300 }
4301
4302 static inline bool boot_cpu_is_amd(void)
4303 {
4304         WARN_ON_ONCE(!tdp_enabled);
4305         return shadow_x_mask == 0;
4306 }
4307
4308 /*
4309  * the direct page table on host, use as much mmu features as
4310  * possible, however, kvm currently does not do execution-protection.
4311  */
4312 static void
4313 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4314                                 struct kvm_mmu *context)
4315 {
4316         struct rsvd_bits_validate *shadow_zero_check;
4317         int i;
4318
4319         shadow_zero_check = &context->shadow_zero_check;
4320
4321         if (boot_cpu_is_amd())
4322                 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4323                                         context->shadow_root_level, false,
4324                                         boot_cpu_has(X86_FEATURE_GBPAGES),
4325                                         false, true);
4326         else
4327                 __reset_rsvds_bits_mask_ept(shadow_zero_check,
4328                                             reserved_hpa_bits(), false);
4329
4330         if (!shadow_me_mask)
4331                 return;
4332
4333         for (i = context->shadow_root_level; --i >= 0;) {
4334                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4335                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4336         }
4337 }
4338
4339 /*
4340  * as the comments in reset_shadow_zero_bits_mask() except it
4341  * is the shadow page table for intel nested guest.
4342  */
4343 static void
4344 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4345                                 struct kvm_mmu *context, bool execonly)
4346 {
4347         __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4348                                     reserved_hpa_bits(), execonly);
4349 }
4350
4351 #define BYTE_MASK(access) \
4352         ((1 & (access) ? 2 : 0) | \
4353          (2 & (access) ? 4 : 0) | \
4354          (3 & (access) ? 8 : 0) | \
4355          (4 & (access) ? 16 : 0) | \
4356          (5 & (access) ? 32 : 0) | \
4357          (6 & (access) ? 64 : 0) | \
4358          (7 & (access) ? 128 : 0))
4359
4360
4361 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4362 {
4363         unsigned byte;
4364
4365         const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4366         const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4367         const u8 u = BYTE_MASK(ACC_USER_MASK);
4368
4369         bool cr4_smep = is_cr4_smep(mmu);
4370         bool cr4_smap = is_cr4_smap(mmu);
4371         bool cr0_wp = is_cr0_wp(mmu);
4372         bool efer_nx = is_efer_nx(mmu);
4373
4374         for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4375                 unsigned pfec = byte << 1;
4376
4377                 /*
4378                  * Each "*f" variable has a 1 bit for each UWX value
4379                  * that causes a fault with the given PFEC.
4380                  */
4381
4382                 /* Faults from writes to non-writable pages */
4383                 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4384                 /* Faults from user mode accesses to supervisor pages */
4385                 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4386                 /* Faults from fetches of non-executable pages*/
4387                 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4388                 /* Faults from kernel mode fetches of user pages */
4389                 u8 smepf = 0;
4390                 /* Faults from kernel mode accesses of user pages */
4391                 u8 smapf = 0;
4392
4393                 if (!ept) {
4394                         /* Faults from kernel mode accesses to user pages */
4395                         u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4396
4397                         /* Not really needed: !nx will cause pte.nx to fault */
4398                         if (!efer_nx)
4399                                 ff = 0;
4400
4401                         /* Allow supervisor writes if !cr0.wp */
4402                         if (!cr0_wp)
4403                                 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4404
4405                         /* Disallow supervisor fetches of user code if cr4.smep */
4406                         if (cr4_smep)
4407                                 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4408
4409                         /*
4410                          * SMAP:kernel-mode data accesses from user-mode
4411                          * mappings should fault. A fault is considered
4412                          * as a SMAP violation if all of the following
4413                          * conditions are true:
4414                          *   - X86_CR4_SMAP is set in CR4
4415                          *   - A user page is accessed
4416                          *   - The access is not a fetch
4417                          *   - Page fault in kernel mode
4418                          *   - if CPL = 3 or X86_EFLAGS_AC is clear
4419                          *
4420                          * Here, we cover the first three conditions.
4421                          * The fourth is computed dynamically in permission_fault();
4422                          * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4423                          * *not* subject to SMAP restrictions.
4424                          */
4425                         if (cr4_smap)
4426                                 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4427                 }
4428
4429                 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4430         }
4431 }
4432
4433 /*
4434 * PKU is an additional mechanism by which the paging controls access to
4435 * user-mode addresses based on the value in the PKRU register.  Protection
4436 * key violations are reported through a bit in the page fault error code.
4437 * Unlike other bits of the error code, the PK bit is not known at the
4438 * call site of e.g. gva_to_gpa; it must be computed directly in
4439 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4440 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4441 *
4442 * In particular the following conditions come from the error code, the
4443 * page tables and the machine state:
4444 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4445 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4446 * - PK is always zero if U=0 in the page tables
4447 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4448 *
4449 * The PKRU bitmask caches the result of these four conditions.  The error
4450 * code (minus the P bit) and the page table's U bit form an index into the
4451 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4452 * with the two bits of the PKRU register corresponding to the protection key.
4453 * For the first three conditions above the bits will be 00, thus masking
4454 * away both AD and WD.  For all reads or if the last condition holds, WD
4455 * only will be masked away.
4456 */
4457 static void update_pkru_bitmask(struct kvm_mmu *mmu)
4458 {
4459         unsigned bit;
4460         bool wp;
4461
4462         if (!is_cr4_pke(mmu)) {
4463                 mmu->pkru_mask = 0;
4464                 return;
4465         }
4466
4467         wp = is_cr0_wp(mmu);
4468
4469         for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4470                 unsigned pfec, pkey_bits;
4471                 bool check_pkey, check_write, ff, uf, wf, pte_user;
4472
4473                 pfec = bit << 1;
4474                 ff = pfec & PFERR_FETCH_MASK;
4475                 uf = pfec & PFERR_USER_MASK;
4476                 wf = pfec & PFERR_WRITE_MASK;
4477
4478                 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
4479                 pte_user = pfec & PFERR_RSVD_MASK;
4480
4481                 /*
4482                  * Only need to check the access which is not an
4483                  * instruction fetch and is to a user page.
4484                  */
4485                 check_pkey = (!ff && pte_user);
4486                 /*
4487                  * write access is controlled by PKRU if it is a
4488                  * user access or CR0.WP = 1.
4489                  */
4490                 check_write = check_pkey && wf && (uf || wp);
4491
4492                 /* PKRU.AD stops both read and write access. */
4493                 pkey_bits = !!check_pkey;
4494                 /* PKRU.WD stops write access. */
4495                 pkey_bits |= (!!check_write) << 1;
4496
4497                 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4498         }
4499 }
4500
4501 static void update_last_nonleaf_level(struct kvm_mmu *mmu)
4502 {
4503         unsigned root_level = mmu->root_level;
4504
4505         mmu->last_nonleaf_level = root_level;
4506         if (root_level == PT32_ROOT_LEVEL && is_cr4_pse(mmu))
4507                 mmu->last_nonleaf_level++;
4508 }
4509
4510 static void paging64_init_context_common(struct kvm_mmu *context,
4511                                          int root_level)
4512 {
4513         context->root_level = root_level;
4514
4515         WARN_ON_ONCE(!is_cr4_pae(context));
4516         context->page_fault = paging64_page_fault;
4517         context->gva_to_gpa = paging64_gva_to_gpa;
4518         context->sync_page = paging64_sync_page;
4519         context->invlpg = paging64_invlpg;
4520         context->direct_map = false;
4521 }
4522
4523 static void paging64_init_context(struct kvm_mmu *context,
4524                                   struct kvm_mmu_role_regs *regs)
4525 {
4526         int root_level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
4527                                                  PT64_ROOT_4LEVEL;
4528
4529         paging64_init_context_common(context, root_level);
4530 }
4531
4532 static void paging32_init_context(struct kvm_mmu *context)
4533 {
4534         context->root_level = PT32_ROOT_LEVEL;
4535         context->page_fault = paging32_page_fault;
4536         context->gva_to_gpa = paging32_gva_to_gpa;
4537         context->sync_page = paging32_sync_page;
4538         context->invlpg = paging32_invlpg;
4539         context->direct_map = false;
4540 }
4541
4542 static void paging32E_init_context(struct kvm_mmu *context)
4543 {
4544         paging64_init_context_common(context, PT32E_ROOT_LEVEL);
4545 }
4546
4547 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
4548                                                          struct kvm_mmu_role_regs *regs)
4549 {
4550         union kvm_mmu_extended_role ext = {0};
4551
4552         if (____is_cr0_pg(regs)) {
4553                 ext.cr0_pg = 1;
4554                 ext.cr4_pae = ____is_cr4_pae(regs);
4555                 ext.cr4_smep = ____is_cr4_smep(regs);
4556                 ext.cr4_smap = ____is_cr4_smap(regs);
4557                 ext.cr4_pse = ____is_cr4_pse(regs);
4558
4559                 /* PKEY and LA57 are active iff long mode is active. */
4560                 ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
4561                 ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4562         }
4563
4564         ext.valid = 1;
4565
4566         return ext;
4567 }
4568
4569 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4570                                                    struct kvm_mmu_role_regs *regs,
4571                                                    bool base_only)
4572 {
4573         union kvm_mmu_role role = {0};
4574
4575         role.base.access = ACC_ALL;
4576         if (____is_cr0_pg(regs)) {
4577                 role.base.efer_nx = ____is_efer_nx(regs);
4578                 role.base.cr0_wp = ____is_cr0_wp(regs);
4579         }
4580         role.base.smm = is_smm(vcpu);
4581         role.base.guest_mode = is_guest_mode(vcpu);
4582
4583         if (base_only)
4584                 return role;
4585
4586         role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4587
4588         return role;
4589 }
4590
4591 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4592 {
4593         /* Use 5-level TDP if and only if it's useful/necessary. */
4594         if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4595                 return 4;
4596
4597         return max_tdp_level;
4598 }
4599
4600 static union kvm_mmu_role
4601 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
4602                                 struct kvm_mmu_role_regs *regs, bool base_only)
4603 {
4604         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4605
4606         role.base.ad_disabled = (shadow_accessed_mask == 0);
4607         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4608         role.base.direct = true;
4609         role.base.gpte_is_8_bytes = true;
4610
4611         return role;
4612 }
4613
4614 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4615 {
4616         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4617         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4618         union kvm_mmu_role new_role =
4619                 kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4620
4621         if (new_role.as_u64 == context->mmu_role.as_u64)
4622                 return;
4623
4624         context->mmu_role.as_u64 = new_role.as_u64;
4625         context->page_fault = kvm_tdp_page_fault;
4626         context->sync_page = nonpaging_sync_page;
4627         context->invlpg = NULL;
4628         context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4629         context->direct_map = true;
4630         context->get_guest_pgd = get_cr3;
4631         context->get_pdptr = kvm_pdptr_read;
4632         context->inject_page_fault = kvm_inject_page_fault;
4633
4634         if (!is_paging(vcpu)) {
4635                 context->gva_to_gpa = nonpaging_gva_to_gpa;
4636                 context->root_level = 0;
4637         } else if (is_long_mode(vcpu)) {
4638                 context->root_level = is_la57_mode(vcpu) ?
4639                                 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4640                 reset_rsvds_bits_mask(vcpu, context);
4641                 context->gva_to_gpa = paging64_gva_to_gpa;
4642         } else if (is_pae(vcpu)) {
4643                 context->root_level = PT32E_ROOT_LEVEL;
4644                 reset_rsvds_bits_mask(vcpu, context);
4645                 context->gva_to_gpa = paging64_gva_to_gpa;
4646         } else {
4647                 context->root_level = PT32_ROOT_LEVEL;
4648                 reset_rsvds_bits_mask(vcpu, context);
4649                 context->gva_to_gpa = paging32_gva_to_gpa;
4650         }
4651
4652         update_permission_bitmask(context, false);
4653         update_pkru_bitmask(context);
4654         update_last_nonleaf_level(context);
4655         reset_tdp_shadow_zero_bits_mask(vcpu, context);
4656 }
4657
4658 static union kvm_mmu_role
4659 kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
4660                                       struct kvm_mmu_role_regs *regs, bool base_only)
4661 {
4662         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4663
4664         role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
4665         role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4666         role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4667
4668         return role;
4669 }
4670
4671 static union kvm_mmu_role
4672 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
4673                                    struct kvm_mmu_role_regs *regs, bool base_only)
4674 {
4675         union kvm_mmu_role role =
4676                 kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
4677
4678         role.base.direct = !____is_cr0_pg(regs);
4679
4680         if (!____is_efer_lma(regs))
4681                 role.base.level = PT32E_ROOT_LEVEL;
4682         else if (____is_cr4_la57(regs))
4683                 role.base.level = PT64_ROOT_5LEVEL;
4684         else
4685                 role.base.level = PT64_ROOT_4LEVEL;
4686
4687         return role;
4688 }
4689
4690 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4691                                     struct kvm_mmu_role_regs *regs,
4692                                     union kvm_mmu_role new_role)
4693 {
4694         if (new_role.as_u64 == context->mmu_role.as_u64)
4695                 return;
4696
4697         context->mmu_role.as_u64 = new_role.as_u64;
4698
4699         if (!____is_cr0_pg(regs))
4700                 nonpaging_init_context(context);
4701         else if (____is_efer_lma(regs))
4702                 paging64_init_context(context, regs);
4703         else if (____is_cr4_pae(regs))
4704                 paging32E_init_context(context);
4705         else
4706                 paging32_init_context(context);
4707
4708         if (____is_cr0_pg(regs)) {
4709                 reset_rsvds_bits_mask(vcpu, context);
4710                 update_permission_bitmask(context, false);
4711                 update_pkru_bitmask(context);
4712                 update_last_nonleaf_level(context);
4713         }
4714         context->shadow_root_level = new_role.base.level;
4715
4716         reset_shadow_zero_bits_mask(vcpu, context);
4717 }
4718
4719 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4720                                 struct kvm_mmu_role_regs *regs)
4721 {
4722         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4723         union kvm_mmu_role new_role =
4724                 kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
4725
4726         shadow_mmu_init_context(vcpu, context, regs, new_role);
4727 }
4728
4729 static union kvm_mmu_role
4730 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
4731                                    struct kvm_mmu_role_regs *regs)
4732 {
4733         union kvm_mmu_role role =
4734                 kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4735
4736         role.base.direct = false;
4737         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4738
4739         return role;
4740 }
4741
4742 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4743                              unsigned long cr4, u64 efer, gpa_t nested_cr3)
4744 {
4745         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4746         struct kvm_mmu_role_regs regs = {
4747                 .cr0 = cr0,
4748                 .cr4 = cr4,
4749                 .efer = efer,
4750         };
4751         union kvm_mmu_role new_role;
4752
4753         new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4754
4755         __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4756
4757         shadow_mmu_init_context(vcpu, context, &regs, new_role);
4758
4759         /*
4760          * Redo the shadow bits, the reset done by shadow_mmu_init_context()
4761          * (above) may use the wrong shadow_root_level.
4762          */
4763         reset_shadow_zero_bits_mask(vcpu, context);
4764 }
4765 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4766
4767 static union kvm_mmu_role
4768 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4769                                    bool execonly, u8 level)
4770 {
4771         union kvm_mmu_role role = {0};
4772
4773         /* SMM flag is inherited from root_mmu */
4774         role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4775
4776         role.base.level = level;
4777         role.base.gpte_is_8_bytes = true;
4778         role.base.direct = false;
4779         role.base.ad_disabled = !accessed_dirty;
4780         role.base.guest_mode = true;
4781         role.base.access = ACC_ALL;
4782
4783         /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4784         role.ext.word = 0;
4785         role.ext.execonly = execonly;
4786         role.ext.valid = 1;
4787
4788         return role;
4789 }
4790
4791 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4792                              bool accessed_dirty, gpa_t new_eptp)
4793 {
4794         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4795         u8 level = vmx_eptp_page_walk_level(new_eptp);
4796         union kvm_mmu_role new_role =
4797                 kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4798                                                    execonly, level);
4799
4800         __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4801
4802         if (new_role.as_u64 == context->mmu_role.as_u64)
4803                 return;
4804
4805         context->mmu_role.as_u64 = new_role.as_u64;
4806
4807         context->shadow_root_level = level;
4808
4809         context->ept_ad = accessed_dirty;
4810         context->page_fault = ept_page_fault;
4811         context->gva_to_gpa = ept_gva_to_gpa;
4812         context->sync_page = ept_sync_page;
4813         context->invlpg = ept_invlpg;
4814         context->root_level = level;
4815         context->direct_map = false;
4816
4817         update_permission_bitmask(context, true);
4818         update_last_nonleaf_level(context);
4819         update_pkru_bitmask(context);
4820         reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4821         reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4822 }
4823 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4824
4825 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4826 {
4827         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4828         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4829
4830         kvm_init_shadow_mmu(vcpu, &regs);
4831
4832         context->get_guest_pgd     = get_cr3;
4833         context->get_pdptr         = kvm_pdptr_read;
4834         context->inject_page_fault = kvm_inject_page_fault;
4835 }
4836
4837 static union kvm_mmu_role
4838 kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4839 {
4840         union kvm_mmu_role role;
4841
4842         role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4843
4844         /*
4845          * Nested MMUs are used only for walking L2's gva->gpa, they never have
4846          * shadow pages of their own and so "direct" has no meaning.   Set it
4847          * to "true" to try to detect bogus usage of the nested MMU.
4848          */
4849         role.base.direct = true;
4850
4851         if (!____is_cr0_pg(regs))
4852                 role.base.level = 0;
4853         else if (____is_efer_lma(regs))
4854                 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
4855                                                           PT64_ROOT_4LEVEL;
4856         else if (____is_cr4_pae(regs))
4857                 role.base.level = PT32E_ROOT_LEVEL;
4858         else
4859                 role.base.level = PT32_ROOT_LEVEL;
4860
4861         return role;
4862 }
4863
4864 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4865 {
4866         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4867         union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4868         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4869
4870         if (new_role.as_u64 == g_context->mmu_role.as_u64)
4871                 return;
4872
4873         g_context->mmu_role.as_u64 = new_role.as_u64;
4874         g_context->get_guest_pgd     = get_cr3;
4875         g_context->get_pdptr         = kvm_pdptr_read;
4876         g_context->inject_page_fault = kvm_inject_page_fault;
4877
4878         /*
4879          * L2 page tables are never shadowed, so there is no need to sync
4880          * SPTEs.
4881          */
4882         g_context->invlpg            = NULL;
4883
4884         /*
4885          * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4886          * L1's nested page tables (e.g. EPT12). The nested translation
4887          * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4888          * L2's page tables as the first level of translation and L1's
4889          * nested page tables as the second level of translation. Basically
4890          * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4891          */
4892         if (!is_paging(vcpu)) {
4893                 g_context->root_level = 0;
4894                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4895         } else if (is_long_mode(vcpu)) {
4896                 g_context->root_level = is_la57_mode(vcpu) ?
4897                                         PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4898                 reset_rsvds_bits_mask(vcpu, g_context);
4899                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4900         } else if (is_pae(vcpu)) {
4901                 g_context->root_level = PT32E_ROOT_LEVEL;
4902                 reset_rsvds_bits_mask(vcpu, g_context);
4903                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4904         } else {
4905                 g_context->root_level = PT32_ROOT_LEVEL;
4906                 reset_rsvds_bits_mask(vcpu, g_context);
4907                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4908         }
4909
4910         update_permission_bitmask(g_context, false);
4911         update_pkru_bitmask(g_context);
4912         update_last_nonleaf_level(g_context);
4913 }
4914
4915 void kvm_init_mmu(struct kvm_vcpu *vcpu)
4916 {
4917         if (mmu_is_nested(vcpu))
4918                 init_kvm_nested_mmu(vcpu);
4919         else if (tdp_enabled)
4920                 init_kvm_tdp_mmu(vcpu);
4921         else
4922                 init_kvm_softmmu(vcpu);
4923 }
4924 EXPORT_SYMBOL_GPL(kvm_init_mmu);
4925
4926 static union kvm_mmu_page_role
4927 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4928 {
4929         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4930         union kvm_mmu_role role;
4931
4932         if (tdp_enabled)
4933                 role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
4934         else
4935                 role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
4936
4937         return role.base;
4938 }
4939
4940 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
4941 {
4942         /*
4943          * Invalidate all MMU roles to force them to reinitialize as CPUID
4944          * information is factored into reserved bit calculations.
4945          */
4946         vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
4947         vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
4948         vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
4949         kvm_mmu_reset_context(vcpu);
4950
4951         /*
4952          * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
4953          * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
4954          * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
4955          * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
4956          * sweep the problem under the rug.
4957          *
4958          * KVM's horrific CPUID ABI makes the problem all but impossible to
4959          * solve, as correctly handling multiple vCPU models (with respect to
4960          * paging and physical address properties) in a single VM would require
4961          * tracking all relevant CPUID information in kvm_mmu_page_role.  That
4962          * is very undesirable as it would double the memory requirements for
4963          * gfn_track (see struct kvm_mmu_page_role comments), and in practice
4964          * no sane VMM mucks with the core vCPU model on the fly.
4965          */
4966         if (vcpu->arch.last_vmentry_cpu != -1) {
4967                 pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
4968                 pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
4969         }
4970 }
4971
4972 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4973 {
4974         kvm_mmu_unload(vcpu);
4975         kvm_init_mmu(vcpu);
4976 }
4977 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4978
4979 int kvm_mmu_load(struct kvm_vcpu *vcpu)
4980 {
4981         int r;
4982
4983         r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
4984         if (r)
4985                 goto out;
4986         r = mmu_alloc_special_roots(vcpu);
4987         if (r)
4988                 goto out;
4989         if (vcpu->arch.mmu->direct_map)
4990                 r = mmu_alloc_direct_roots(vcpu);
4991         else
4992                 r = mmu_alloc_shadow_roots(vcpu);
4993         if (r)
4994                 goto out;
4995
4996         kvm_mmu_sync_roots(vcpu);
4997
4998         kvm_mmu_load_pgd(vcpu);
4999         static_call(kvm_x86_tlb_flush_current)(vcpu);
5000 out:
5001         return r;
5002 }
5003
5004 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5005 {
5006         kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5007         WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5008         kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5009         WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5010 }
5011
5012 static bool need_remote_flush(u64 old, u64 new)
5013 {
5014         if (!is_shadow_present_pte(old))
5015                 return false;
5016         if (!is_shadow_present_pte(new))
5017                 return true;
5018         if ((old ^ new) & PT64_BASE_ADDR_MASK)
5019                 return true;
5020         old ^= shadow_nx_mask;
5021         new ^= shadow_nx_mask;
5022         return (old & ~new & PT64_PERM_MASK) != 0;
5023 }
5024
5025 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5026                                     int *bytes)
5027 {
5028         u64 gentry = 0;
5029         int r;
5030
5031         /*
5032          * Assume that the pte write on a page table of the same type
5033          * as the current vcpu paging mode since we update the sptes only
5034          * when they have the same mode.
5035          */
5036         if (is_pae(vcpu) && *bytes == 4) {
5037                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5038                 *gpa &= ~(gpa_t)7;
5039                 *bytes = 8;
5040         }
5041
5042         if (*bytes == 4 || *bytes == 8) {
5043                 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5044                 if (r)
5045                         gentry = 0;
5046         }
5047
5048         return gentry;
5049 }
5050
5051 /*
5052  * If we're seeing too many writes to a page, it may no longer be a page table,
5053  * or we may be forking, in which case it is better to unmap the page.
5054  */
5055 static bool detect_write_flooding(struct kvm_mmu_page *sp)
5056 {
5057         /*
5058          * Skip write-flooding detected for the sp whose level is 1, because
5059          * it can become unsync, then the guest page is not write-protected.
5060          */
5061         if (sp->role.level == PG_LEVEL_4K)
5062                 return false;
5063
5064         atomic_inc(&sp->write_flooding_count);
5065         return atomic_read(&sp->write_flooding_count) >= 3;
5066 }
5067
5068 /*
5069  * Misaligned accesses are too much trouble to fix up; also, they usually
5070  * indicate a page is not used as a page table.
5071  */
5072 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5073                                     int bytes)
5074 {
5075         unsigned offset, pte_size, misaligned;
5076
5077         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5078                  gpa, bytes, sp->role.word);
5079
5080         offset = offset_in_page(gpa);
5081         pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5082
5083         /*
5084          * Sometimes, the OS only writes the last one bytes to update status
5085          * bits, for example, in linux, andb instruction is used in clear_bit().
5086          */
5087         if (!(offset & (pte_size - 1)) && bytes == 1)
5088                 return false;
5089
5090         misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5091         misaligned |= bytes < 4;
5092
5093         return misaligned;
5094 }
5095
5096 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5097 {
5098         unsigned page_offset, quadrant;
5099         u64 *spte;
5100         int level;
5101
5102         page_offset = offset_in_page(gpa);
5103         level = sp->role.level;
5104         *nspte = 1;
5105         if (!sp->role.gpte_is_8_bytes) {
5106                 page_offset <<= 1;      /* 32->64 */
5107                 /*
5108                  * A 32-bit pde maps 4MB while the shadow pdes map
5109                  * only 2MB.  So we need to double the offset again
5110                  * and zap two pdes instead of one.
5111                  */
5112                 if (level == PT32_ROOT_LEVEL) {
5113                         page_offset &= ~7; /* kill rounding error */
5114                         page_offset <<= 1;
5115                         *nspte = 2;
5116                 }
5117                 quadrant = page_offset >> PAGE_SHIFT;
5118                 page_offset &= ~PAGE_MASK;
5119                 if (quadrant != sp->role.quadrant)
5120                         return NULL;
5121         }
5122
5123         spte = &sp->spt[page_offset / sizeof(*spte)];
5124         return spte;
5125 }
5126
5127 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5128                               const u8 *new, int bytes,
5129                               struct kvm_page_track_notifier_node *node)
5130 {
5131         gfn_t gfn = gpa >> PAGE_SHIFT;
5132         struct kvm_mmu_page *sp;
5133         LIST_HEAD(invalid_list);
5134         u64 entry, gentry, *spte;
5135         int npte;
5136         bool remote_flush, local_flush;
5137
5138         /*
5139          * If we don't have indirect shadow pages, it means no page is
5140          * write-protected, so we can exit simply.
5141          */
5142         if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5143                 return;
5144
5145         remote_flush = local_flush = false;
5146
5147         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5148
5149         /*
5150          * No need to care whether allocation memory is successful
5151          * or not since pte prefetch is skipped if it does not have
5152          * enough objects in the cache.
5153          */
5154         mmu_topup_memory_caches(vcpu, true);
5155
5156         write_lock(&vcpu->kvm->mmu_lock);
5157
5158         gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5159
5160         ++vcpu->kvm->stat.mmu_pte_write;
5161         kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5162
5163         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5164                 if (detect_write_misaligned(sp, gpa, bytes) ||
5165                       detect_write_flooding(sp)) {
5166                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5167                         ++vcpu->kvm->stat.mmu_flooded;
5168                         continue;
5169                 }
5170
5171                 spte = get_written_sptes(sp, gpa, &npte);
5172                 if (!spte)
5173                         continue;
5174
5175                 local_flush = true;
5176                 while (npte--) {
5177                         entry = *spte;
5178                         mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5179                         if (gentry && sp->role.level != PG_LEVEL_4K)
5180                                 ++vcpu->kvm->stat.mmu_pde_zapped;
5181                         if (need_remote_flush(entry, *spte))
5182                                 remote_flush = true;
5183                         ++spte;
5184                 }
5185         }
5186         kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5187         kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5188         write_unlock(&vcpu->kvm->mmu_lock);
5189 }
5190
5191 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5192                        void *insn, int insn_len)
5193 {
5194         int r, emulation_type = EMULTYPE_PF;
5195         bool direct = vcpu->arch.mmu->direct_map;
5196
5197         if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5198                 return RET_PF_RETRY;
5199
5200         r = RET_PF_INVALID;
5201         if (unlikely(error_code & PFERR_RSVD_MASK)) {
5202                 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5203                 if (r == RET_PF_EMULATE)
5204                         goto emulate;
5205         }
5206
5207         if (r == RET_PF_INVALID) {
5208                 r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
5209                                           lower_32_bits(error_code), false);
5210                 if (WARN_ON_ONCE(r == RET_PF_INVALID))
5211                         return -EIO;
5212         }
5213
5214         if (r < 0)
5215                 return r;
5216         if (r != RET_PF_EMULATE)
5217                 return 1;
5218
5219         /*
5220          * Before emulating the instruction, check if the error code
5221          * was due to a RO violation while translating the guest page.
5222          * This can occur when using nested virtualization with nested
5223          * paging in both guests. If true, we simply unprotect the page
5224          * and resume the guest.
5225          */
5226         if (vcpu->arch.mmu->direct_map &&
5227             (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5228                 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5229                 return 1;
5230         }
5231
5232         /*
5233          * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5234          * optimistically try to just unprotect the page and let the processor
5235          * re-execute the instruction that caused the page fault.  Do not allow
5236          * retrying MMIO emulation, as it's not only pointless but could also
5237          * cause us to enter an infinite loop because the processor will keep
5238          * faulting on the non-existent MMIO address.  Retrying an instruction
5239          * from a nested guest is also pointless and dangerous as we are only
5240          * explicitly shadowing L1's page tables, i.e. unprotecting something
5241          * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5242          */
5243         if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5244                 emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5245 emulate:
5246         return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5247                                        insn_len);
5248 }
5249 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5250
5251 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5252                             gva_t gva, hpa_t root_hpa)
5253 {
5254         int i;
5255
5256         /* It's actually a GPA for vcpu->arch.guest_mmu.  */
5257         if (mmu != &vcpu->arch.guest_mmu) {
5258                 /* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5259                 if (is_noncanonical_address(gva, vcpu))
5260                         return;
5261
5262                 static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5263         }
5264
5265         if (!mmu->invlpg)
5266                 return;
5267
5268         if (root_hpa == INVALID_PAGE) {
5269                 mmu->invlpg(vcpu, gva, mmu->root_hpa);
5270
5271                 /*
5272                  * INVLPG is required to invalidate any global mappings for the VA,
5273                  * irrespective of PCID. Since it would take us roughly similar amount
5274                  * of work to determine whether any of the prev_root mappings of the VA
5275                  * is marked global, or to just sync it blindly, so we might as well
5276                  * just always sync it.
5277                  *
5278                  * Mappings not reachable via the current cr3 or the prev_roots will be
5279                  * synced when switching to that cr3, so nothing needs to be done here
5280                  * for them.
5281                  */
5282                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5283                         if (VALID_PAGE(mmu->prev_roots[i].hpa))
5284                                 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5285         } else {
5286                 mmu->invlpg(vcpu, gva, root_hpa);
5287         }
5288 }
5289
5290 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
5291 {
5292         kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5293         ++vcpu->stat.invlpg;
5294 }
5295 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5296
5297
5298 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5299 {
5300         struct kvm_mmu *mmu = vcpu->arch.mmu;
5301         bool tlb_flush = false;
5302         uint i;
5303
5304         if (pcid == kvm_get_active_pcid(vcpu)) {
5305                 mmu->invlpg(vcpu, gva, mmu->root_hpa);
5306                 tlb_flush = true;
5307         }
5308
5309         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5310                 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5311                     pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5312                         mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5313                         tlb_flush = true;
5314                 }
5315         }
5316
5317         if (tlb_flush)
5318                 static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5319
5320         ++vcpu->stat.invlpg;
5321
5322         /*
5323          * Mappings not reachable via the current cr3 or the prev_roots will be
5324          * synced when switching to that cr3, so nothing needs to be done here
5325          * for them.
5326          */
5327 }
5328
5329 void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
5330                        int tdp_huge_page_level)
5331 {
5332         tdp_enabled = enable_tdp;
5333         max_tdp_level = tdp_max_root_level;
5334
5335         /*
5336          * max_huge_page_level reflects KVM's MMU capabilities irrespective
5337          * of kernel support, e.g. KVM may be capable of using 1GB pages when
5338          * the kernel is not.  But, KVM never creates a page size greater than
5339          * what is used by the kernel for any given HVA, i.e. the kernel's
5340          * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5341          */
5342         if (tdp_enabled)
5343                 max_huge_page_level = tdp_huge_page_level;
5344         else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5345                 max_huge_page_level = PG_LEVEL_1G;
5346         else
5347                 max_huge_page_level = PG_LEVEL_2M;
5348 }
5349 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5350
5351 /* The return value indicates if tlb flush on all vcpus is needed. */
5352 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head,
5353                                     struct kvm_memory_slot *slot);
5354
5355 /* The caller should hold mmu-lock before calling this function. */
5356 static __always_inline bool
5357 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5358                         slot_level_handler fn, int start_level, int end_level,
5359                         gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
5360                         bool flush)
5361 {
5362         struct slot_rmap_walk_iterator iterator;
5363
5364         for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5365                         end_gfn, &iterator) {
5366                 if (iterator.rmap)
5367                         flush |= fn(kvm, iterator.rmap, memslot);
5368
5369                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5370                         if (flush && flush_on_yield) {
5371                                 kvm_flush_remote_tlbs_with_address(kvm,
5372                                                 start_gfn,
5373                                                 iterator.gfn - start_gfn + 1);
5374                                 flush = false;
5375                         }
5376                         cond_resched_rwlock_write(&kvm->mmu_lock);
5377                 }
5378         }
5379
5380         return flush;
5381 }
5382
5383 static __always_inline bool
5384 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5385                   slot_level_handler fn, int start_level, int end_level,
5386                   bool flush_on_yield)
5387 {
5388         return slot_handle_level_range(kvm, memslot, fn, start_level,
5389                         end_level, memslot->base_gfn,
5390                         memslot->base_gfn + memslot->npages - 1,
5391                         flush_on_yield, false);
5392 }
5393
5394 static __always_inline bool
5395 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5396                  slot_level_handler fn, bool flush_on_yield)
5397 {
5398         return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5399                                  PG_LEVEL_4K, flush_on_yield);
5400 }
5401
5402 static void free_mmu_pages(struct kvm_mmu *mmu)
5403 {
5404         if (!tdp_enabled && mmu->pae_root)
5405                 set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5406         free_page((unsigned long)mmu->pae_root);
5407         free_page((unsigned long)mmu->pml4_root);
5408 }
5409
5410 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5411 {
5412         struct page *page;
5413         int i;
5414
5415         mmu->root_hpa = INVALID_PAGE;
5416         mmu->root_pgd = 0;
5417         mmu->translate_gpa = translate_gpa;
5418         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5419                 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5420
5421         /*
5422          * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5423          * while the PDP table is a per-vCPU construct that's allocated at MMU
5424          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5425          * x86_64.  Therefore we need to allocate the PDP table in the first
5426          * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
5427          * generally doesn't use PAE paging and can skip allocating the PDP
5428          * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
5429          * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
5430          * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5431          */
5432         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5433                 return 0;
5434
5435         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5436         if (!page)
5437                 return -ENOMEM;
5438
5439         mmu->pae_root = page_address(page);
5440
5441         /*
5442          * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
5443          * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
5444          * that KVM's writes and the CPU's reads get along.  Note, this is
5445          * only necessary when using shadow paging, as 64-bit NPT can get at
5446          * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
5447          * by 32-bit kernels (when KVM itself uses 32-bit NPT).
5448          */
5449         if (!tdp_enabled)
5450                 set_memory_decrypted((unsigned long)mmu->pae_root, 1);
5451         else
5452                 WARN_ON_ONCE(shadow_me_mask);
5453
5454         for (i = 0; i < 4; ++i)
5455                 mmu->pae_root[i] = INVALID_PAE_ROOT;
5456
5457         return 0;
5458 }
5459
5460 int kvm_mmu_create(struct kvm_vcpu *vcpu)
5461 {
5462         int ret;
5463
5464         vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5465         vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
5466
5467         vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5468         vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5469
5470         vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
5471
5472         vcpu->arch.mmu = &vcpu->arch.root_mmu;
5473         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5474
5475         vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5476
5477         ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5478         if (ret)
5479                 return ret;
5480
5481         ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5482         if (ret)
5483                 goto fail_allocate_root;
5484
5485         return ret;
5486  fail_allocate_root:
5487         free_mmu_pages(&vcpu->arch.guest_mmu);
5488         return ret;
5489 }
5490
5491 #define BATCH_ZAP_PAGES 10
5492 static void kvm_zap_obsolete_pages(struct kvm *kvm)
5493 {
5494         struct kvm_mmu_page *sp, *node;
5495         int nr_zapped, batch = 0;
5496
5497 restart:
5498         list_for_each_entry_safe_reverse(sp, node,
5499               &kvm->arch.active_mmu_pages, link) {
5500                 /*
5501                  * No obsolete valid page exists before a newly created page
5502                  * since active_mmu_pages is a FIFO list.
5503                  */
5504                 if (!is_obsolete_sp(kvm, sp))
5505                         break;
5506
5507                 /*
5508                  * Invalid pages should never land back on the list of active
5509                  * pages.  Skip the bogus page, otherwise we'll get stuck in an
5510                  * infinite loop if the page gets put back on the list (again).
5511                  */
5512                 if (WARN_ON(sp->role.invalid))
5513                         continue;
5514
5515                 /*
5516                  * No need to flush the TLB since we're only zapping shadow
5517                  * pages with an obsolete generation number and all vCPUS have
5518                  * loaded a new root, i.e. the shadow pages being zapped cannot
5519                  * be in active use by the guest.
5520                  */
5521                 if (batch >= BATCH_ZAP_PAGES &&
5522                     cond_resched_rwlock_write(&kvm->mmu_lock)) {
5523                         batch = 0;
5524                         goto restart;
5525                 }
5526
5527                 if (__kvm_mmu_prepare_zap_page(kvm, sp,
5528                                 &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5529                         batch += nr_zapped;
5530                         goto restart;
5531                 }
5532         }
5533
5534         /*
5535          * Trigger a remote TLB flush before freeing the page tables to ensure
5536          * KVM is not in the middle of a lockless shadow page table walk, which
5537          * may reference the pages.
5538          */
5539         kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5540 }
5541
5542 /*
5543  * Fast invalidate all shadow pages and use lock-break technique
5544  * to zap obsolete pages.
5545  *
5546  * It's required when memslot is being deleted or VM is being
5547  * destroyed, in these cases, we should ensure that KVM MMU does
5548  * not use any resource of the being-deleted slot or all slots
5549  * after calling the function.
5550  */
5551 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5552 {
5553         lockdep_assert_held(&kvm->slots_lock);
5554
5555         write_lock(&kvm->mmu_lock);
5556         trace_kvm_mmu_zap_all_fast(kvm);
5557
5558         /*
5559          * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5560          * held for the entire duration of zapping obsolete pages, it's
5561          * impossible for there to be multiple invalid generations associated
5562          * with *valid* shadow pages at any given time, i.e. there is exactly
5563          * one valid generation and (at most) one invalid generation.
5564          */
5565         kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5566
5567         /* In order to ensure all threads see this change when
5568          * handling the MMU reload signal, this must happen in the
5569          * same critical section as kvm_reload_remote_mmus, and
5570          * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
5571          * could drop the MMU lock and yield.
5572          */
5573         if (is_tdp_mmu_enabled(kvm))
5574                 kvm_tdp_mmu_invalidate_all_roots(kvm);
5575
5576         /*
5577          * Notify all vcpus to reload its shadow page table and flush TLB.
5578          * Then all vcpus will switch to new shadow page table with the new
5579          * mmu_valid_gen.
5580          *
5581          * Note: we need to do this under the protection of mmu_lock,
5582          * otherwise, vcpu would purge shadow page but miss tlb flush.
5583          */
5584         kvm_reload_remote_mmus(kvm);
5585
5586         kvm_zap_obsolete_pages(kvm);
5587
5588         write_unlock(&kvm->mmu_lock);
5589
5590         if (is_tdp_mmu_enabled(kvm)) {
5591                 read_lock(&kvm->mmu_lock);
5592                 kvm_tdp_mmu_zap_invalidated_roots(kvm);
5593                 read_unlock(&kvm->mmu_lock);
5594         }
5595 }
5596
5597 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5598 {
5599         return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5600 }
5601
5602 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5603                         struct kvm_memory_slot *slot,
5604                         struct kvm_page_track_notifier_node *node)
5605 {
5606         kvm_mmu_zap_all_fast(kvm);
5607 }
5608
5609 void kvm_mmu_init_vm(struct kvm *kvm)
5610 {
5611         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5612
5613         if (!kvm_mmu_init_tdp_mmu(kvm))
5614                 /*
5615                  * No smp_load/store wrappers needed here as we are in
5616                  * VM init and there cannot be any memslots / other threads
5617                  * accessing this struct kvm yet.
5618                  */
5619                 kvm->arch.memslots_have_rmaps = true;
5620
5621         node->track_write = kvm_mmu_pte_write;
5622         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5623         kvm_page_track_register_notifier(kvm, node);
5624 }
5625
5626 void kvm_mmu_uninit_vm(struct kvm *kvm)
5627 {
5628         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5629
5630         kvm_page_track_unregister_notifier(kvm, node);
5631
5632         kvm_mmu_uninit_tdp_mmu(kvm);
5633 }
5634
5635 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5636 {
5637         struct kvm_memslots *slots;
5638         struct kvm_memory_slot *memslot;
5639         int i;
5640         bool flush = false;
5641
5642         if (kvm_memslots_have_rmaps(kvm)) {
5643                 write_lock(&kvm->mmu_lock);
5644                 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5645                         slots = __kvm_memslots(kvm, i);
5646                         kvm_for_each_memslot(memslot, slots) {
5647                                 gfn_t start, end;
5648
5649                                 start = max(gfn_start, memslot->base_gfn);
5650                                 end = min(gfn_end, memslot->base_gfn + memslot->npages);
5651                                 if (start >= end)
5652                                         continue;
5653
5654                                 flush = slot_handle_level_range(kvm, memslot,
5655                                                 kvm_zap_rmapp, PG_LEVEL_4K,
5656                                                 KVM_MAX_HUGEPAGE_LEVEL, start,
5657                                                 end - 1, true, flush);
5658                         }
5659                 }
5660                 if (flush)
5661                         kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
5662                 write_unlock(&kvm->mmu_lock);
5663         }
5664
5665         if (is_tdp_mmu_enabled(kvm)) {
5666                 flush = false;
5667
5668                 read_lock(&kvm->mmu_lock);
5669                 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
5670                         flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
5671                                                           gfn_end, flush, true);
5672                 if (flush)
5673                         kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
5674                                                            gfn_end);
5675
5676                 read_unlock(&kvm->mmu_lock);
5677         }
5678 }
5679
5680 static bool slot_rmap_write_protect(struct kvm *kvm,
5681                                     struct kvm_rmap_head *rmap_head,
5682                                     struct kvm_memory_slot *slot)
5683 {
5684         return __rmap_write_protect(kvm, rmap_head, false);
5685 }
5686
5687 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5688                                       struct kvm_memory_slot *memslot,
5689                                       int start_level)
5690 {
5691         bool flush = false;
5692
5693         if (kvm_memslots_have_rmaps(kvm)) {
5694                 write_lock(&kvm->mmu_lock);
5695                 flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5696                                           start_level, KVM_MAX_HUGEPAGE_LEVEL,
5697                                           false);
5698                 write_unlock(&kvm->mmu_lock);
5699         }
5700
5701         if (is_tdp_mmu_enabled(kvm)) {
5702                 read_lock(&kvm->mmu_lock);
5703                 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
5704                 read_unlock(&kvm->mmu_lock);
5705         }
5706
5707         /*
5708          * We can flush all the TLBs out of the mmu lock without TLB
5709          * corruption since we just change the spte from writable to
5710          * readonly so that we only need to care the case of changing
5711          * spte from present to present (changing the spte from present
5712          * to nonpresent will flush all the TLBs immediately), in other
5713          * words, the only case we care is mmu_spte_update() where we
5714          * have checked Host-writable | MMU-writable instead of
5715          * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
5716          * anymore.
5717          */
5718         if (flush)
5719                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5720 }
5721
5722 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5723                                          struct kvm_rmap_head *rmap_head,
5724                                          struct kvm_memory_slot *slot)
5725 {
5726         u64 *sptep;
5727         struct rmap_iterator iter;
5728         int need_tlb_flush = 0;
5729         kvm_pfn_t pfn;
5730         struct kvm_mmu_page *sp;
5731
5732 restart:
5733         for_each_rmap_spte(rmap_head, &iter, sptep) {
5734                 sp = sptep_to_sp(sptep);
5735                 pfn = spte_to_pfn(*sptep);
5736
5737                 /*
5738                  * We cannot do huge page mapping for indirect shadow pages,
5739                  * which are found on the last rmap (level = 1) when not using
5740                  * tdp; such shadow pages are synced with the page table in
5741                  * the guest, and the guest page table is using 4K page size
5742                  * mapping if the indirect sp has level = 1.
5743                  */
5744                 if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5745                     sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
5746                                                                pfn, PG_LEVEL_NUM)) {
5747                         pte_list_remove(rmap_head, sptep);
5748
5749                         if (kvm_available_flush_tlb_with_range())
5750                                 kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5751                                         KVM_PAGES_PER_HPAGE(sp->role.level));
5752                         else
5753                                 need_tlb_flush = 1;
5754
5755                         goto restart;
5756                 }
5757         }
5758
5759         return need_tlb_flush;
5760 }
5761
5762 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5763                                    const struct kvm_memory_slot *memslot)
5764 {
5765         /* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5766         struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
5767         bool flush = false;
5768
5769         if (kvm_memslots_have_rmaps(kvm)) {
5770                 write_lock(&kvm->mmu_lock);
5771                 flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5772                 if (flush)
5773                         kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5774                 write_unlock(&kvm->mmu_lock);
5775         }
5776
5777         if (is_tdp_mmu_enabled(kvm)) {
5778                 read_lock(&kvm->mmu_lock);
5779                 flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
5780                 if (flush)
5781                         kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5782                 read_unlock(&kvm->mmu_lock);
5783         }
5784 }
5785
5786 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5787                                         const struct kvm_memory_slot *memslot)
5788 {
5789         /*
5790          * All current use cases for flushing the TLBs for a specific memslot
5791          * related to dirty logging, and many do the TLB flush out of mmu_lock.
5792          * The interaction between the various operations on memslot must be
5793          * serialized by slots_locks to ensure the TLB flush from one operation
5794          * is observed by any other operation on the same memslot.
5795          */
5796         lockdep_assert_held(&kvm->slots_lock);
5797         kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5798                                            memslot->npages);
5799 }
5800
5801 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5802                                    struct kvm_memory_slot *memslot)
5803 {
5804         bool flush = false;
5805
5806         if (kvm_memslots_have_rmaps(kvm)) {
5807                 write_lock(&kvm->mmu_lock);
5808                 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
5809                                          false);
5810                 write_unlock(&kvm->mmu_lock);
5811         }
5812
5813         if (is_tdp_mmu_enabled(kvm)) {
5814                 read_lock(&kvm->mmu_lock);
5815                 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5816                 read_unlock(&kvm->mmu_lock);
5817         }
5818
5819         /*
5820          * It's also safe to flush TLBs out of mmu lock here as currently this
5821          * function is only used for dirty logging, in which case flushing TLB
5822          * out of mmu lock also guarantees no dirty pages will be lost in
5823          * dirty_bitmap.
5824          */
5825         if (flush)
5826                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5827 }
5828
5829 void kvm_mmu_zap_all(struct kvm *kvm)
5830 {
5831         struct kvm_mmu_page *sp, *node;
5832         LIST_HEAD(invalid_list);
5833         int ign;
5834
5835         write_lock(&kvm->mmu_lock);
5836 restart:
5837         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5838                 if (WARN_ON(sp->role.invalid))
5839                         continue;
5840                 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5841                         goto restart;
5842                 if (cond_resched_rwlock_write(&kvm->mmu_lock))
5843                         goto restart;
5844         }
5845
5846         kvm_mmu_commit_zap_page(kvm, &invalid_list);
5847
5848         if (is_tdp_mmu_enabled(kvm))
5849                 kvm_tdp_mmu_zap_all(kvm);
5850
5851         write_unlock(&kvm->mmu_lock);
5852 }
5853
5854 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5855 {
5856         WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5857
5858         gen &= MMIO_SPTE_GEN_MASK;
5859
5860         /*
5861          * Generation numbers are incremented in multiples of the number of
5862          * address spaces in order to provide unique generations across all
5863          * address spaces.  Strip what is effectively the address space
5864          * modifier prior to checking for a wrap of the MMIO generation so
5865          * that a wrap in any address space is detected.
5866          */
5867         gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5868
5869         /*
5870          * The very rare case: if the MMIO generation number has wrapped,
5871          * zap all shadow pages.
5872          */
5873         if (unlikely(gen == 0)) {
5874                 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5875                 kvm_mmu_zap_all_fast(kvm);
5876         }
5877 }
5878
5879 static unsigned long
5880 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5881 {
5882         struct kvm *kvm;
5883         int nr_to_scan = sc->nr_to_scan;
5884         unsigned long freed = 0;
5885
5886         mutex_lock(&kvm_lock);
5887
5888         list_for_each_entry(kvm, &vm_list, vm_list) {
5889                 int idx;
5890                 LIST_HEAD(invalid_list);
5891
5892                 /*
5893                  * Never scan more than sc->nr_to_scan VM instances.
5894                  * Will not hit this condition practically since we do not try
5895                  * to shrink more than one VM and it is very unlikely to see
5896                  * !n_used_mmu_pages so many times.
5897                  */
5898                 if (!nr_to_scan--)
5899                         break;
5900                 /*
5901                  * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5902                  * here. We may skip a VM instance errorneosly, but we do not
5903                  * want to shrink a VM that only started to populate its MMU
5904                  * anyway.
5905                  */
5906                 if (!kvm->arch.n_used_mmu_pages &&
5907                     !kvm_has_zapped_obsolete_pages(kvm))
5908                         continue;
5909
5910                 idx = srcu_read_lock(&kvm->srcu);
5911                 write_lock(&kvm->mmu_lock);
5912
5913                 if (kvm_has_zapped_obsolete_pages(kvm)) {
5914                         kvm_mmu_commit_zap_page(kvm,
5915                               &kvm->arch.zapped_obsolete_pages);
5916                         goto unlock;
5917                 }
5918
5919                 freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5920
5921 unlock:
5922                 write_unlock(&kvm->mmu_lock);
5923                 srcu_read_unlock(&kvm->srcu, idx);
5924
5925                 /*
5926                  * unfair on small ones
5927                  * per-vm shrinkers cry out
5928                  * sadness comes quickly
5929                  */
5930                 list_move_tail(&kvm->vm_list, &vm_list);
5931                 break;
5932         }
5933
5934         mutex_unlock(&kvm_lock);
5935         return freed;
5936 }
5937
5938 static unsigned long
5939 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5940 {
5941         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5942 }
5943
5944 static struct shrinker mmu_shrinker = {
5945         .count_objects = mmu_shrink_count,
5946         .scan_objects = mmu_shrink_scan,
5947         .seeks = DEFAULT_SEEKS * 10,
5948 };
5949
5950 static void mmu_destroy_caches(void)
5951 {
5952         kmem_cache_destroy(pte_list_desc_cache);
5953         kmem_cache_destroy(mmu_page_header_cache);
5954 }
5955
5956 static bool get_nx_auto_mode(void)
5957 {
5958         /* Return true when CPU has the bug, and mitigations are ON */
5959         return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5960 }
5961
5962 static void __set_nx_huge_pages(bool val)
5963 {
5964         nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5965 }
5966
5967 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5968 {
5969         bool old_val = nx_huge_pages;
5970         bool new_val;
5971
5972         /* In "auto" mode deploy workaround only if CPU has the bug. */
5973         if (sysfs_streq(val, "off"))
5974                 new_val = 0;
5975         else if (sysfs_streq(val, "force"))
5976                 new_val = 1;
5977         else if (sysfs_streq(val, "auto"))
5978                 new_val = get_nx_auto_mode();
5979         else if (strtobool(val, &new_val) < 0)
5980                 return -EINVAL;
5981
5982         __set_nx_huge_pages(new_val);
5983
5984         if (new_val != old_val) {
5985                 struct kvm *kvm;
5986
5987                 mutex_lock(&kvm_lock);
5988
5989                 list_for_each_entry(kvm, &vm_list, vm_list) {
5990                         mutex_lock(&kvm->slots_lock);
5991                         kvm_mmu_zap_all_fast(kvm);
5992                         mutex_unlock(&kvm->slots_lock);
5993
5994                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5995                 }
5996                 mutex_unlock(&kvm_lock);
5997         }
5998
5999         return 0;
6000 }
6001
6002 int kvm_mmu_module_init(void)
6003 {
6004         int ret = -ENOMEM;
6005
6006         if (nx_huge_pages == -1)
6007                 __set_nx_huge_pages(get_nx_auto_mode());
6008
6009         /*
6010          * MMU roles use union aliasing which is, generally speaking, an
6011          * undefined behavior. However, we supposedly know how compilers behave
6012          * and the current status quo is unlikely to change. Guardians below are
6013          * supposed to let us know if the assumption becomes false.
6014          */
6015         BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6016         BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6017         BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6018
6019         kvm_mmu_reset_all_pte_masks();
6020
6021         pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6022                                             sizeof(struct pte_list_desc),
6023                                             0, SLAB_ACCOUNT, NULL);
6024         if (!pte_list_desc_cache)
6025                 goto out;
6026
6027         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6028                                                   sizeof(struct kvm_mmu_page),
6029                                                   0, SLAB_ACCOUNT, NULL);
6030         if (!mmu_page_header_cache)
6031                 goto out;
6032
6033         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6034                 goto out;
6035
6036         ret = register_shrinker(&mmu_shrinker);
6037         if (ret)
6038                 goto out;
6039
6040         return 0;
6041
6042 out:
6043         mmu_destroy_caches();
6044         return ret;
6045 }
6046
6047 /*
6048  * Calculate mmu pages needed for kvm.
6049  */
6050 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6051 {
6052         unsigned long nr_mmu_pages;
6053         unsigned long nr_pages = 0;
6054         struct kvm_memslots *slots;
6055         struct kvm_memory_slot *memslot;
6056         int i;
6057
6058         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6059                 slots = __kvm_memslots(kvm, i);
6060
6061                 kvm_for_each_memslot(memslot, slots)
6062                         nr_pages += memslot->npages;
6063         }
6064
6065         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6066         nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6067
6068         return nr_mmu_pages;
6069 }
6070
6071 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6072 {
6073         kvm_mmu_unload(vcpu);
6074         free_mmu_pages(&vcpu->arch.root_mmu);
6075         free_mmu_pages(&vcpu->arch.guest_mmu);
6076         mmu_free_memory_caches(vcpu);
6077 }
6078
6079 void kvm_mmu_module_exit(void)
6080 {
6081         mmu_destroy_caches();
6082         percpu_counter_destroy(&kvm_total_used_mmu_pages);
6083         unregister_shrinker(&mmu_shrinker);
6084         mmu_audit_disable();
6085 }
6086
6087 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
6088 {
6089         unsigned int old_val;
6090         int err;
6091
6092         old_val = nx_huge_pages_recovery_ratio;
6093         err = param_set_uint(val, kp);
6094         if (err)
6095                 return err;
6096
6097         if (READ_ONCE(nx_huge_pages) &&
6098             !old_val && nx_huge_pages_recovery_ratio) {
6099                 struct kvm *kvm;
6100
6101                 mutex_lock(&kvm_lock);
6102
6103                 list_for_each_entry(kvm, &vm_list, vm_list)
6104                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6105
6106                 mutex_unlock(&kvm_lock);
6107         }
6108
6109         return err;
6110 }
6111
6112 static void kvm_recover_nx_lpages(struct kvm *kvm)
6113 {
6114         unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6115         int rcu_idx;
6116         struct kvm_mmu_page *sp;
6117         unsigned int ratio;
6118         LIST_HEAD(invalid_list);
6119         bool flush = false;
6120         ulong to_zap;
6121
6122         rcu_idx = srcu_read_lock(&kvm->srcu);
6123         write_lock(&kvm->mmu_lock);
6124
6125         ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6126         to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
6127         for ( ; to_zap; --to_zap) {
6128                 if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
6129                         break;
6130
6131                 /*
6132                  * We use a separate list instead of just using active_mmu_pages
6133                  * because the number of lpage_disallowed pages is expected to
6134                  * be relatively small compared to the total.
6135                  */
6136                 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6137                                       struct kvm_mmu_page,
6138                                       lpage_disallowed_link);
6139                 WARN_ON_ONCE(!sp->lpage_disallowed);
6140                 if (is_tdp_mmu_page(sp)) {
6141                         flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6142                 } else {
6143                         kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6144                         WARN_ON_ONCE(sp->lpage_disallowed);
6145                 }
6146
6147                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6148                         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6149                         cond_resched_rwlock_write(&kvm->mmu_lock);
6150                         flush = false;
6151                 }
6152         }
6153         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6154
6155         write_unlock(&kvm->mmu_lock);
6156         srcu_read_unlock(&kvm->srcu, rcu_idx);
6157 }
6158
6159 static long get_nx_lpage_recovery_timeout(u64 start_time)
6160 {
6161         return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6162                 ? start_time + 60 * HZ - get_jiffies_64()
6163                 : MAX_SCHEDULE_TIMEOUT;
6164 }
6165
6166 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6167 {
6168         u64 start_time;
6169         long remaining_time;
6170
6171         while (true) {
6172                 start_time = get_jiffies_64();
6173                 remaining_time = get_nx_lpage_recovery_timeout(start_time);
6174
6175                 set_current_state(TASK_INTERRUPTIBLE);
6176                 while (!kthread_should_stop() && remaining_time > 0) {
6177                         schedule_timeout(remaining_time);
6178                         remaining_time = get_nx_lpage_recovery_timeout(start_time);
6179                         set_current_state(TASK_INTERRUPTIBLE);
6180                 }
6181
6182                 set_current_state(TASK_RUNNING);
6183
6184                 if (kthread_should_stop())
6185                         return 0;
6186
6187                 kvm_recover_nx_lpages(kvm);
6188         }
6189 }
6190
6191 int kvm_mmu_post_init_vm(struct kvm *kvm)
6192 {
6193         int err;
6194
6195         err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6196                                           "kvm-nx-lpage-recovery",
6197                                           &kvm->arch.nx_lpage_recovery_thread);
6198         if (!err)
6199                 kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6200
6201         return err;
6202 }
6203
6204 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6205 {
6206         if (kvm->arch.nx_lpage_recovery_thread)
6207                 kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6208 }