Merge branch 'kvm-tdp-fix-flushes' into HEAD
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17
18 #include "irq.h"
19 #include "ioapic.h"
20 #include "mmu.h"
21 #include "mmu_internal.h"
22 #include "tdp_mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "kvm_emulate.h"
26 #include "cpuid.h"
27 #include "spte.h"
28
29 #include <linux/kvm_host.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/mm.h>
33 #include <linux/highmem.h>
34 #include <linux/moduleparam.h>
35 #include <linux/export.h>
36 #include <linux/swap.h>
37 #include <linux/hugetlb.h>
38 #include <linux/compiler.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/sched/signal.h>
42 #include <linux/uaccess.h>
43 #include <linux/hash.h>
44 #include <linux/kern_levels.h>
45 #include <linux/kthread.h>
46
47 #include <asm/page.h>
48 #include <asm/memtype.h>
49 #include <asm/cmpxchg.h>
50 #include <asm/io.h>
51 #include <asm/set_memory.h>
52 #include <asm/vmx.h>
53 #include <asm/kvm_page_track.h>
54 #include "trace.h"
55
56 extern bool itlb_multihit_kvm_mitigation;
57
58 static int __read_mostly nx_huge_pages = -1;
59 #ifdef CONFIG_PREEMPT_RT
60 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
61 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
62 #else
63 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
64 #endif
65
66 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
67 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
68
69 static const struct kernel_param_ops nx_huge_pages_ops = {
70         .set = set_nx_huge_pages,
71         .get = param_get_bool,
72 };
73
74 static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
75         .set = set_nx_huge_pages_recovery_ratio,
76         .get = param_get_uint,
77 };
78
79 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
80 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
81 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
82                 &nx_huge_pages_recovery_ratio, 0644);
83 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
84
85 static bool __read_mostly force_flush_and_sync_on_reuse;
86 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
87
88 /*
89  * When setting this variable to true it enables Two-Dimensional-Paging
90  * where the hardware walks 2 page tables:
91  * 1. the guest-virtual to guest-physical
92  * 2. while doing 1. it walks guest-physical to host-physical
93  * If the hardware supports that we don't need to do shadow paging.
94  */
95 bool tdp_enabled = false;
96
97 static int max_huge_page_level __read_mostly;
98 static int max_tdp_level __read_mostly;
99
100 enum {
101         AUDIT_PRE_PAGE_FAULT,
102         AUDIT_POST_PAGE_FAULT,
103         AUDIT_PRE_PTE_WRITE,
104         AUDIT_POST_PTE_WRITE,
105         AUDIT_PRE_SYNC,
106         AUDIT_POST_SYNC
107 };
108
109 #ifdef MMU_DEBUG
110 bool dbg = 0;
111 module_param(dbg, bool, 0644);
112 #endif
113
114 #define PTE_PREFETCH_NUM                8
115
116 #define PT32_LEVEL_BITS 10
117
118 #define PT32_LEVEL_SHIFT(level) \
119                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
120
121 #define PT32_LVL_OFFSET_MASK(level) \
122         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
123                                                 * PT32_LEVEL_BITS))) - 1))
124
125 #define PT32_INDEX(address, level)\
126         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127
128
129 #define PT32_BASE_ADDR_MASK PAGE_MASK
130 #define PT32_DIR_BASE_ADDR_MASK \
131         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132 #define PT32_LVL_ADDR_MASK(level) \
133         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134                                             * PT32_LEVEL_BITS))) - 1))
135
136 #include <trace/events/kvm.h>
137
138 /* make pte_list_desc fit well in cache line */
139 #define PTE_LIST_EXT 3
140
141 struct pte_list_desc {
142         u64 *sptes[PTE_LIST_EXT];
143         struct pte_list_desc *more;
144 };
145
146 struct kvm_shadow_walk_iterator {
147         u64 addr;
148         hpa_t shadow_addr;
149         u64 *sptep;
150         int level;
151         unsigned index;
152 };
153
154 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
155         for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
156                                          (_root), (_addr));                \
157              shadow_walk_okay(&(_walker));                                 \
158              shadow_walk_next(&(_walker)))
159
160 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
161         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
162              shadow_walk_okay(&(_walker));                      \
163              shadow_walk_next(&(_walker)))
164
165 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)     \
166         for (shadow_walk_init(&(_walker), _vcpu, _addr);                \
167              shadow_walk_okay(&(_walker)) &&                            \
168                 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });  \
169              __shadow_walk_next(&(_walker), spte))
170
171 static struct kmem_cache *pte_list_desc_cache;
172 struct kmem_cache *mmu_page_header_cache;
173 static struct percpu_counter kvm_total_used_mmu_pages;
174
175 static void mmu_spte_set(u64 *sptep, u64 spte);
176 static union kvm_mmu_page_role
177 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
178
179 #define CREATE_TRACE_POINTS
180 #include "mmutrace.h"
181
182
183 static inline bool kvm_available_flush_tlb_with_range(void)
184 {
185         return kvm_x86_ops.tlb_remote_flush_with_range;
186 }
187
188 static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
189                 struct kvm_tlb_range *range)
190 {
191         int ret = -ENOTSUPP;
192
193         if (range && kvm_x86_ops.tlb_remote_flush_with_range)
194                 ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
195
196         if (ret)
197                 kvm_flush_remote_tlbs(kvm);
198 }
199
200 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
201                 u64 start_gfn, u64 pages)
202 {
203         struct kvm_tlb_range range;
204
205         range.start_gfn = start_gfn;
206         range.pages = pages;
207
208         kvm_flush_remote_tlbs_with_range(kvm, &range);
209 }
210
211 bool is_nx_huge_page_enabled(void)
212 {
213         return READ_ONCE(nx_huge_pages);
214 }
215
216 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
217                            unsigned int access)
218 {
219         u64 spte = make_mmio_spte(vcpu, gfn, access);
220
221         trace_mark_mmio_spte(sptep, gfn, spte);
222         mmu_spte_set(sptep, spte);
223 }
224
225 static gfn_t get_mmio_spte_gfn(u64 spte)
226 {
227         u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
228
229         gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
230                & shadow_nonpresent_or_rsvd_mask;
231
232         return gpa >> PAGE_SHIFT;
233 }
234
235 static unsigned get_mmio_spte_access(u64 spte)
236 {
237         return spte & shadow_mmio_access_mask;
238 }
239
240 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
241 {
242         u64 kvm_gen, spte_gen, gen;
243
244         gen = kvm_vcpu_memslots(vcpu)->generation;
245         if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
246                 return false;
247
248         kvm_gen = gen & MMIO_SPTE_GEN_MASK;
249         spte_gen = get_mmio_spte_generation(spte);
250
251         trace_check_mmio_spte(spte, kvm_gen, spte_gen);
252         return likely(kvm_gen == spte_gen);
253 }
254
255 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
256                                   struct x86_exception *exception)
257 {
258         /* Check if guest physical address doesn't exceed guest maximum */
259         if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
260                 exception->error_code |= PFERR_RSVD_MASK;
261                 return UNMAPPED_GVA;
262         }
263
264         return gpa;
265 }
266
267 static int is_cpuid_PSE36(void)
268 {
269         return 1;
270 }
271
272 static int is_nx(struct kvm_vcpu *vcpu)
273 {
274         return vcpu->arch.efer & EFER_NX;
275 }
276
277 static gfn_t pse36_gfn_delta(u32 gpte)
278 {
279         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
280
281         return (gpte & PT32_DIR_PSE36_MASK) << shift;
282 }
283
284 #ifdef CONFIG_X86_64
285 static void __set_spte(u64 *sptep, u64 spte)
286 {
287         WRITE_ONCE(*sptep, spte);
288 }
289
290 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
291 {
292         WRITE_ONCE(*sptep, spte);
293 }
294
295 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
296 {
297         return xchg(sptep, spte);
298 }
299
300 static u64 __get_spte_lockless(u64 *sptep)
301 {
302         return READ_ONCE(*sptep);
303 }
304 #else
305 union split_spte {
306         struct {
307                 u32 spte_low;
308                 u32 spte_high;
309         };
310         u64 spte;
311 };
312
313 static void count_spte_clear(u64 *sptep, u64 spte)
314 {
315         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
316
317         if (is_shadow_present_pte(spte))
318                 return;
319
320         /* Ensure the spte is completely set before we increase the count */
321         smp_wmb();
322         sp->clear_spte_count++;
323 }
324
325 static void __set_spte(u64 *sptep, u64 spte)
326 {
327         union split_spte *ssptep, sspte;
328
329         ssptep = (union split_spte *)sptep;
330         sspte = (union split_spte)spte;
331
332         ssptep->spte_high = sspte.spte_high;
333
334         /*
335          * If we map the spte from nonpresent to present, We should store
336          * the high bits firstly, then set present bit, so cpu can not
337          * fetch this spte while we are setting the spte.
338          */
339         smp_wmb();
340
341         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
342 }
343
344 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
345 {
346         union split_spte *ssptep, sspte;
347
348         ssptep = (union split_spte *)sptep;
349         sspte = (union split_spte)spte;
350
351         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
352
353         /*
354          * If we map the spte from present to nonpresent, we should clear
355          * present bit firstly to avoid vcpu fetch the old high bits.
356          */
357         smp_wmb();
358
359         ssptep->spte_high = sspte.spte_high;
360         count_spte_clear(sptep, spte);
361 }
362
363 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
364 {
365         union split_spte *ssptep, sspte, orig;
366
367         ssptep = (union split_spte *)sptep;
368         sspte = (union split_spte)spte;
369
370         /* xchg acts as a barrier before the setting of the high bits */
371         orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
372         orig.spte_high = ssptep->spte_high;
373         ssptep->spte_high = sspte.spte_high;
374         count_spte_clear(sptep, spte);
375
376         return orig.spte;
377 }
378
379 /*
380  * The idea using the light way get the spte on x86_32 guest is from
381  * gup_get_pte (mm/gup.c).
382  *
383  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
384  * coalesces them and we are running out of the MMU lock.  Therefore
385  * we need to protect against in-progress updates of the spte.
386  *
387  * Reading the spte while an update is in progress may get the old value
388  * for the high part of the spte.  The race is fine for a present->non-present
389  * change (because the high part of the spte is ignored for non-present spte),
390  * but for a present->present change we must reread the spte.
391  *
392  * All such changes are done in two steps (present->non-present and
393  * non-present->present), hence it is enough to count the number of
394  * present->non-present updates: if it changed while reading the spte,
395  * we might have hit the race.  This is done using clear_spte_count.
396  */
397 static u64 __get_spte_lockless(u64 *sptep)
398 {
399         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
400         union split_spte spte, *orig = (union split_spte *)sptep;
401         int count;
402
403 retry:
404         count = sp->clear_spte_count;
405         smp_rmb();
406
407         spte.spte_low = orig->spte_low;
408         smp_rmb();
409
410         spte.spte_high = orig->spte_high;
411         smp_rmb();
412
413         if (unlikely(spte.spte_low != orig->spte_low ||
414               count != sp->clear_spte_count))
415                 goto retry;
416
417         return spte.spte;
418 }
419 #endif
420
421 static bool spte_has_volatile_bits(u64 spte)
422 {
423         if (!is_shadow_present_pte(spte))
424                 return false;
425
426         /*
427          * Always atomically update spte if it can be updated
428          * out of mmu-lock, it can ensure dirty bit is not lost,
429          * also, it can help us to get a stable is_writable_pte()
430          * to ensure tlb flush is not missed.
431          */
432         if (spte_can_locklessly_be_made_writable(spte) ||
433             is_access_track_spte(spte))
434                 return true;
435
436         if (spte_ad_enabled(spte)) {
437                 if ((spte & shadow_accessed_mask) == 0 ||
438                     (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
439                         return true;
440         }
441
442         return false;
443 }
444
445 /* Rules for using mmu_spte_set:
446  * Set the sptep from nonpresent to present.
447  * Note: the sptep being assigned *must* be either not present
448  * or in a state where the hardware will not attempt to update
449  * the spte.
450  */
451 static void mmu_spte_set(u64 *sptep, u64 new_spte)
452 {
453         WARN_ON(is_shadow_present_pte(*sptep));
454         __set_spte(sptep, new_spte);
455 }
456
457 /*
458  * Update the SPTE (excluding the PFN), but do not track changes in its
459  * accessed/dirty status.
460  */
461 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
462 {
463         u64 old_spte = *sptep;
464
465         WARN_ON(!is_shadow_present_pte(new_spte));
466
467         if (!is_shadow_present_pte(old_spte)) {
468                 mmu_spte_set(sptep, new_spte);
469                 return old_spte;
470         }
471
472         if (!spte_has_volatile_bits(old_spte))
473                 __update_clear_spte_fast(sptep, new_spte);
474         else
475                 old_spte = __update_clear_spte_slow(sptep, new_spte);
476
477         WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
478
479         return old_spte;
480 }
481
482 /* Rules for using mmu_spte_update:
483  * Update the state bits, it means the mapped pfn is not changed.
484  *
485  * Whenever we overwrite a writable spte with a read-only one we
486  * should flush remote TLBs. Otherwise rmap_write_protect
487  * will find a read-only spte, even though the writable spte
488  * might be cached on a CPU's TLB, the return value indicates this
489  * case.
490  *
491  * Returns true if the TLB needs to be flushed
492  */
493 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
494 {
495         bool flush = false;
496         u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
497
498         if (!is_shadow_present_pte(old_spte))
499                 return false;
500
501         /*
502          * For the spte updated out of mmu-lock is safe, since
503          * we always atomically update it, see the comments in
504          * spte_has_volatile_bits().
505          */
506         if (spte_can_locklessly_be_made_writable(old_spte) &&
507               !is_writable_pte(new_spte))
508                 flush = true;
509
510         /*
511          * Flush TLB when accessed/dirty states are changed in the page tables,
512          * to guarantee consistency between TLB and page tables.
513          */
514
515         if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
516                 flush = true;
517                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
518         }
519
520         if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
521                 flush = true;
522                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
523         }
524
525         return flush;
526 }
527
528 /*
529  * Rules for using mmu_spte_clear_track_bits:
530  * It sets the sptep from present to nonpresent, and track the
531  * state bits, it is used to clear the last level sptep.
532  * Returns non-zero if the PTE was previously valid.
533  */
534 static int mmu_spte_clear_track_bits(u64 *sptep)
535 {
536         kvm_pfn_t pfn;
537         u64 old_spte = *sptep;
538
539         if (!spte_has_volatile_bits(old_spte))
540                 __update_clear_spte_fast(sptep, 0ull);
541         else
542                 old_spte = __update_clear_spte_slow(sptep, 0ull);
543
544         if (!is_shadow_present_pte(old_spte))
545                 return 0;
546
547         pfn = spte_to_pfn(old_spte);
548
549         /*
550          * KVM does not hold the refcount of the page used by
551          * kvm mmu, before reclaiming the page, we should
552          * unmap it from mmu first.
553          */
554         WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
555
556         if (is_accessed_spte(old_spte))
557                 kvm_set_pfn_accessed(pfn);
558
559         if (is_dirty_spte(old_spte))
560                 kvm_set_pfn_dirty(pfn);
561
562         return 1;
563 }
564
565 /*
566  * Rules for using mmu_spte_clear_no_track:
567  * Directly clear spte without caring the state bits of sptep,
568  * it is used to set the upper level spte.
569  */
570 static void mmu_spte_clear_no_track(u64 *sptep)
571 {
572         __update_clear_spte_fast(sptep, 0ull);
573 }
574
575 static u64 mmu_spte_get_lockless(u64 *sptep)
576 {
577         return __get_spte_lockless(sptep);
578 }
579
580 /* Restore an acc-track PTE back to a regular PTE */
581 static u64 restore_acc_track_spte(u64 spte)
582 {
583         u64 new_spte = spte;
584         u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
585                          & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
586
587         WARN_ON_ONCE(spte_ad_enabled(spte));
588         WARN_ON_ONCE(!is_access_track_spte(spte));
589
590         new_spte &= ~shadow_acc_track_mask;
591         new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
592                       SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
593         new_spte |= saved_bits;
594
595         return new_spte;
596 }
597
598 /* Returns the Accessed status of the PTE and resets it at the same time. */
599 static bool mmu_spte_age(u64 *sptep)
600 {
601         u64 spte = mmu_spte_get_lockless(sptep);
602
603         if (!is_accessed_spte(spte))
604                 return false;
605
606         if (spte_ad_enabled(spte)) {
607                 clear_bit((ffs(shadow_accessed_mask) - 1),
608                           (unsigned long *)sptep);
609         } else {
610                 /*
611                  * Capture the dirty status of the page, so that it doesn't get
612                  * lost when the SPTE is marked for access tracking.
613                  */
614                 if (is_writable_pte(spte))
615                         kvm_set_pfn_dirty(spte_to_pfn(spte));
616
617                 spte = mark_spte_for_access_track(spte);
618                 mmu_spte_update_no_track(sptep, spte);
619         }
620
621         return true;
622 }
623
624 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
625 {
626         /*
627          * Prevent page table teardown by making any free-er wait during
628          * kvm_flush_remote_tlbs() IPI to all active vcpus.
629          */
630         local_irq_disable();
631
632         /*
633          * Make sure a following spte read is not reordered ahead of the write
634          * to vcpu->mode.
635          */
636         smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
637 }
638
639 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
640 {
641         /*
642          * Make sure the write to vcpu->mode is not reordered in front of
643          * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
644          * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
645          */
646         smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
647         local_irq_enable();
648 }
649
650 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
651 {
652         int r;
653
654         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
655         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
656                                        1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
657         if (r)
658                 return r;
659         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
660                                        PT64_ROOT_MAX_LEVEL);
661         if (r)
662                 return r;
663         if (maybe_indirect) {
664                 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
665                                                PT64_ROOT_MAX_LEVEL);
666                 if (r)
667                         return r;
668         }
669         return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
670                                           PT64_ROOT_MAX_LEVEL);
671 }
672
673 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
674 {
675         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
676         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
677         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
678         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
679 }
680
681 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
682 {
683         return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
684 }
685
686 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
687 {
688         kmem_cache_free(pte_list_desc_cache, pte_list_desc);
689 }
690
691 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
692 {
693         if (!sp->role.direct)
694                 return sp->gfns[index];
695
696         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
697 }
698
699 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
700 {
701         if (!sp->role.direct) {
702                 sp->gfns[index] = gfn;
703                 return;
704         }
705
706         if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
707                 pr_err_ratelimited("gfn mismatch under direct page %llx "
708                                    "(expected %llx, got %llx)\n",
709                                    sp->gfn,
710                                    kvm_mmu_page_get_gfn(sp, index), gfn);
711 }
712
713 /*
714  * Return the pointer to the large page information for a given gfn,
715  * handling slots that are not large page aligned.
716  */
717 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
718                                               struct kvm_memory_slot *slot,
719                                               int level)
720 {
721         unsigned long idx;
722
723         idx = gfn_to_index(gfn, slot->base_gfn, level);
724         return &slot->arch.lpage_info[level - 2][idx];
725 }
726
727 static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
728                                             gfn_t gfn, int count)
729 {
730         struct kvm_lpage_info *linfo;
731         int i;
732
733         for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
734                 linfo = lpage_info_slot(gfn, slot, i);
735                 linfo->disallow_lpage += count;
736                 WARN_ON(linfo->disallow_lpage < 0);
737         }
738 }
739
740 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
741 {
742         update_gfn_disallow_lpage_count(slot, gfn, 1);
743 }
744
745 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
746 {
747         update_gfn_disallow_lpage_count(slot, gfn, -1);
748 }
749
750 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
751 {
752         struct kvm_memslots *slots;
753         struct kvm_memory_slot *slot;
754         gfn_t gfn;
755
756         kvm->arch.indirect_shadow_pages++;
757         gfn = sp->gfn;
758         slots = kvm_memslots_for_spte_role(kvm, sp->role);
759         slot = __gfn_to_memslot(slots, gfn);
760
761         /* the non-leaf shadow pages are keeping readonly. */
762         if (sp->role.level > PG_LEVEL_4K)
763                 return kvm_slot_page_track_add_page(kvm, slot, gfn,
764                                                     KVM_PAGE_TRACK_WRITE);
765
766         kvm_mmu_gfn_disallow_lpage(slot, gfn);
767 }
768
769 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
770 {
771         if (sp->lpage_disallowed)
772                 return;
773
774         ++kvm->stat.nx_lpage_splits;
775         list_add_tail(&sp->lpage_disallowed_link,
776                       &kvm->arch.lpage_disallowed_mmu_pages);
777         sp->lpage_disallowed = true;
778 }
779
780 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
781 {
782         struct kvm_memslots *slots;
783         struct kvm_memory_slot *slot;
784         gfn_t gfn;
785
786         kvm->arch.indirect_shadow_pages--;
787         gfn = sp->gfn;
788         slots = kvm_memslots_for_spte_role(kvm, sp->role);
789         slot = __gfn_to_memslot(slots, gfn);
790         if (sp->role.level > PG_LEVEL_4K)
791                 return kvm_slot_page_track_remove_page(kvm, slot, gfn,
792                                                        KVM_PAGE_TRACK_WRITE);
793
794         kvm_mmu_gfn_allow_lpage(slot, gfn);
795 }
796
797 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
798 {
799         --kvm->stat.nx_lpage_splits;
800         sp->lpage_disallowed = false;
801         list_del(&sp->lpage_disallowed_link);
802 }
803
804 static struct kvm_memory_slot *
805 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
806                             bool no_dirty_log)
807 {
808         struct kvm_memory_slot *slot;
809
810         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
811         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
812                 return NULL;
813         if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
814                 return NULL;
815
816         return slot;
817 }
818
819 /*
820  * About rmap_head encoding:
821  *
822  * If the bit zero of rmap_head->val is clear, then it points to the only spte
823  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
824  * pte_list_desc containing more mappings.
825  */
826
827 /*
828  * Returns the number of pointers in the rmap chain, not counting the new one.
829  */
830 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
831                         struct kvm_rmap_head *rmap_head)
832 {
833         struct pte_list_desc *desc;
834         int i, count = 0;
835
836         if (!rmap_head->val) {
837                 rmap_printk("%p %llx 0->1\n", spte, *spte);
838                 rmap_head->val = (unsigned long)spte;
839         } else if (!(rmap_head->val & 1)) {
840                 rmap_printk("%p %llx 1->many\n", spte, *spte);
841                 desc = mmu_alloc_pte_list_desc(vcpu);
842                 desc->sptes[0] = (u64 *)rmap_head->val;
843                 desc->sptes[1] = spte;
844                 rmap_head->val = (unsigned long)desc | 1;
845                 ++count;
846         } else {
847                 rmap_printk("%p %llx many->many\n", spte, *spte);
848                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
849                 while (desc->sptes[PTE_LIST_EXT-1]) {
850                         count += PTE_LIST_EXT;
851
852                         if (!desc->more) {
853                                 desc->more = mmu_alloc_pte_list_desc(vcpu);
854                                 desc = desc->more;
855                                 break;
856                         }
857                         desc = desc->more;
858                 }
859                 for (i = 0; desc->sptes[i]; ++i)
860                         ++count;
861                 desc->sptes[i] = spte;
862         }
863         return count;
864 }
865
866 static void
867 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
868                            struct pte_list_desc *desc, int i,
869                            struct pte_list_desc *prev_desc)
870 {
871         int j;
872
873         for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
874                 ;
875         desc->sptes[i] = desc->sptes[j];
876         desc->sptes[j] = NULL;
877         if (j != 0)
878                 return;
879         if (!prev_desc && !desc->more)
880                 rmap_head->val = 0;
881         else
882                 if (prev_desc)
883                         prev_desc->more = desc->more;
884                 else
885                         rmap_head->val = (unsigned long)desc->more | 1;
886         mmu_free_pte_list_desc(desc);
887 }
888
889 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
890 {
891         struct pte_list_desc *desc;
892         struct pte_list_desc *prev_desc;
893         int i;
894
895         if (!rmap_head->val) {
896                 pr_err("%s: %p 0->BUG\n", __func__, spte);
897                 BUG();
898         } else if (!(rmap_head->val & 1)) {
899                 rmap_printk("%p 1->0\n", spte);
900                 if ((u64 *)rmap_head->val != spte) {
901                         pr_err("%s:  %p 1->BUG\n", __func__, spte);
902                         BUG();
903                 }
904                 rmap_head->val = 0;
905         } else {
906                 rmap_printk("%p many->many\n", spte);
907                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
908                 prev_desc = NULL;
909                 while (desc) {
910                         for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
911                                 if (desc->sptes[i] == spte) {
912                                         pte_list_desc_remove_entry(rmap_head,
913                                                         desc, i, prev_desc);
914                                         return;
915                                 }
916                         }
917                         prev_desc = desc;
918                         desc = desc->more;
919                 }
920                 pr_err("%s: %p many->many\n", __func__, spte);
921                 BUG();
922         }
923 }
924
925 static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
926 {
927         mmu_spte_clear_track_bits(sptep);
928         __pte_list_remove(sptep, rmap_head);
929 }
930
931 static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
932                                            struct kvm_memory_slot *slot)
933 {
934         unsigned long idx;
935
936         idx = gfn_to_index(gfn, slot->base_gfn, level);
937         return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
938 }
939
940 static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
941                                          struct kvm_mmu_page *sp)
942 {
943         struct kvm_memslots *slots;
944         struct kvm_memory_slot *slot;
945
946         slots = kvm_memslots_for_spte_role(kvm, sp->role);
947         slot = __gfn_to_memslot(slots, gfn);
948         return __gfn_to_rmap(gfn, sp->role.level, slot);
949 }
950
951 static bool rmap_can_add(struct kvm_vcpu *vcpu)
952 {
953         struct kvm_mmu_memory_cache *mc;
954
955         mc = &vcpu->arch.mmu_pte_list_desc_cache;
956         return kvm_mmu_memory_cache_nr_free_objects(mc);
957 }
958
959 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
960 {
961         struct kvm_mmu_page *sp;
962         struct kvm_rmap_head *rmap_head;
963
964         sp = sptep_to_sp(spte);
965         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
966         rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
967         return pte_list_add(vcpu, spte, rmap_head);
968 }
969
970 static void rmap_remove(struct kvm *kvm, u64 *spte)
971 {
972         struct kvm_mmu_page *sp;
973         gfn_t gfn;
974         struct kvm_rmap_head *rmap_head;
975
976         sp = sptep_to_sp(spte);
977         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
978         rmap_head = gfn_to_rmap(kvm, gfn, sp);
979         __pte_list_remove(spte, rmap_head);
980 }
981
982 /*
983  * Used by the following functions to iterate through the sptes linked by a
984  * rmap.  All fields are private and not assumed to be used outside.
985  */
986 struct rmap_iterator {
987         /* private fields */
988         struct pte_list_desc *desc;     /* holds the sptep if not NULL */
989         int pos;                        /* index of the sptep */
990 };
991
992 /*
993  * Iteration must be started by this function.  This should also be used after
994  * removing/dropping sptes from the rmap link because in such cases the
995  * information in the iterator may not be valid.
996  *
997  * Returns sptep if found, NULL otherwise.
998  */
999 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1000                            struct rmap_iterator *iter)
1001 {
1002         u64 *sptep;
1003
1004         if (!rmap_head->val)
1005                 return NULL;
1006
1007         if (!(rmap_head->val & 1)) {
1008                 iter->desc = NULL;
1009                 sptep = (u64 *)rmap_head->val;
1010                 goto out;
1011         }
1012
1013         iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1014         iter->pos = 0;
1015         sptep = iter->desc->sptes[iter->pos];
1016 out:
1017         BUG_ON(!is_shadow_present_pte(*sptep));
1018         return sptep;
1019 }
1020
1021 /*
1022  * Must be used with a valid iterator: e.g. after rmap_get_first().
1023  *
1024  * Returns sptep if found, NULL otherwise.
1025  */
1026 static u64 *rmap_get_next(struct rmap_iterator *iter)
1027 {
1028         u64 *sptep;
1029
1030         if (iter->desc) {
1031                 if (iter->pos < PTE_LIST_EXT - 1) {
1032                         ++iter->pos;
1033                         sptep = iter->desc->sptes[iter->pos];
1034                         if (sptep)
1035                                 goto out;
1036                 }
1037
1038                 iter->desc = iter->desc->more;
1039
1040                 if (iter->desc) {
1041                         iter->pos = 0;
1042                         /* desc->sptes[0] cannot be NULL */
1043                         sptep = iter->desc->sptes[iter->pos];
1044                         goto out;
1045                 }
1046         }
1047
1048         return NULL;
1049 out:
1050         BUG_ON(!is_shadow_present_pte(*sptep));
1051         return sptep;
1052 }
1053
1054 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)                 \
1055         for (_spte_ = rmap_get_first(_rmap_head_, _iter_);              \
1056              _spte_; _spte_ = rmap_get_next(_iter_))
1057
1058 static void drop_spte(struct kvm *kvm, u64 *sptep)
1059 {
1060         if (mmu_spte_clear_track_bits(sptep))
1061                 rmap_remove(kvm, sptep);
1062 }
1063
1064
1065 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1066 {
1067         if (is_large_pte(*sptep)) {
1068                 WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1069                 drop_spte(kvm, sptep);
1070                 --kvm->stat.lpages;
1071                 return true;
1072         }
1073
1074         return false;
1075 }
1076
1077 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1078 {
1079         if (__drop_large_spte(vcpu->kvm, sptep)) {
1080                 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1081
1082                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1083                         KVM_PAGES_PER_HPAGE(sp->role.level));
1084         }
1085 }
1086
1087 /*
1088  * Write-protect on the specified @sptep, @pt_protect indicates whether
1089  * spte write-protection is caused by protecting shadow page table.
1090  *
1091  * Note: write protection is difference between dirty logging and spte
1092  * protection:
1093  * - for dirty logging, the spte can be set to writable at anytime if
1094  *   its dirty bitmap is properly set.
1095  * - for spte protection, the spte can be writable only after unsync-ing
1096  *   shadow page.
1097  *
1098  * Return true if tlb need be flushed.
1099  */
1100 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1101 {
1102         u64 spte = *sptep;
1103
1104         if (!is_writable_pte(spte) &&
1105               !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1106                 return false;
1107
1108         rmap_printk("spte %p %llx\n", sptep, *sptep);
1109
1110         if (pt_protect)
1111                 spte &= ~shadow_mmu_writable_mask;
1112         spte = spte & ~PT_WRITABLE_MASK;
1113
1114         return mmu_spte_update(sptep, spte);
1115 }
1116
1117 static bool __rmap_write_protect(struct kvm *kvm,
1118                                  struct kvm_rmap_head *rmap_head,
1119                                  bool pt_protect)
1120 {
1121         u64 *sptep;
1122         struct rmap_iterator iter;
1123         bool flush = false;
1124
1125         for_each_rmap_spte(rmap_head, &iter, sptep)
1126                 flush |= spte_write_protect(sptep, pt_protect);
1127
1128         return flush;
1129 }
1130
1131 static bool spte_clear_dirty(u64 *sptep)
1132 {
1133         u64 spte = *sptep;
1134
1135         rmap_printk("spte %p %llx\n", sptep, *sptep);
1136
1137         MMU_WARN_ON(!spte_ad_enabled(spte));
1138         spte &= ~shadow_dirty_mask;
1139         return mmu_spte_update(sptep, spte);
1140 }
1141
1142 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1143 {
1144         bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1145                                                (unsigned long *)sptep);
1146         if (was_writable && !spte_ad_enabled(*sptep))
1147                 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1148
1149         return was_writable;
1150 }
1151
1152 /*
1153  * Gets the GFN ready for another round of dirty logging by clearing the
1154  *      - D bit on ad-enabled SPTEs, and
1155  *      - W bit on ad-disabled SPTEs.
1156  * Returns true iff any D or W bits were cleared.
1157  */
1158 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1159                                struct kvm_memory_slot *slot)
1160 {
1161         u64 *sptep;
1162         struct rmap_iterator iter;
1163         bool flush = false;
1164
1165         for_each_rmap_spte(rmap_head, &iter, sptep)
1166                 if (spte_ad_need_write_protect(*sptep))
1167                         flush |= spte_wrprot_for_clear_dirty(sptep);
1168                 else
1169                         flush |= spte_clear_dirty(sptep);
1170
1171         return flush;
1172 }
1173
1174 /**
1175  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1176  * @kvm: kvm instance
1177  * @slot: slot to protect
1178  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1179  * @mask: indicates which pages we should protect
1180  *
1181  * Used when we do not need to care about huge page mappings: e.g. during dirty
1182  * logging we do not have any such mappings.
1183  */
1184 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1185                                      struct kvm_memory_slot *slot,
1186                                      gfn_t gfn_offset, unsigned long mask)
1187 {
1188         struct kvm_rmap_head *rmap_head;
1189
1190         if (is_tdp_mmu_enabled(kvm))
1191                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1192                                 slot->base_gfn + gfn_offset, mask, true);
1193         while (mask) {
1194                 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1195                                           PG_LEVEL_4K, slot);
1196                 __rmap_write_protect(kvm, rmap_head, false);
1197
1198                 /* clear the first set bit */
1199                 mask &= mask - 1;
1200         }
1201 }
1202
1203 /**
1204  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1205  * protect the page if the D-bit isn't supported.
1206  * @kvm: kvm instance
1207  * @slot: slot to clear D-bit
1208  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1209  * @mask: indicates which pages we should clear D-bit
1210  *
1211  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1212  */
1213 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1214                                          struct kvm_memory_slot *slot,
1215                                          gfn_t gfn_offset, unsigned long mask)
1216 {
1217         struct kvm_rmap_head *rmap_head;
1218
1219         if (is_tdp_mmu_enabled(kvm))
1220                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1221                                 slot->base_gfn + gfn_offset, mask, false);
1222         while (mask) {
1223                 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1224                                           PG_LEVEL_4K, slot);
1225                 __rmap_clear_dirty(kvm, rmap_head, slot);
1226
1227                 /* clear the first set bit */
1228                 mask &= mask - 1;
1229         }
1230 }
1231
1232 /**
1233  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1234  * PT level pages.
1235  *
1236  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1237  * enable dirty logging for them.
1238  *
1239  * Used when we do not need to care about huge page mappings: e.g. during dirty
1240  * logging we do not have any such mappings.
1241  */
1242 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1243                                 struct kvm_memory_slot *slot,
1244                                 gfn_t gfn_offset, unsigned long mask)
1245 {
1246         if (kvm_x86_ops.cpu_dirty_log_size)
1247                 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1248         else
1249                 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1250 }
1251
1252 int kvm_cpu_dirty_log_size(void)
1253 {
1254         return kvm_x86_ops.cpu_dirty_log_size;
1255 }
1256
1257 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1258                                     struct kvm_memory_slot *slot, u64 gfn)
1259 {
1260         struct kvm_rmap_head *rmap_head;
1261         int i;
1262         bool write_protected = false;
1263
1264         for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1265                 rmap_head = __gfn_to_rmap(gfn, i, slot);
1266                 write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1267         }
1268
1269         if (is_tdp_mmu_enabled(kvm))
1270                 write_protected |=
1271                         kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
1272
1273         return write_protected;
1274 }
1275
1276 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1277 {
1278         struct kvm_memory_slot *slot;
1279
1280         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1281         return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
1282 }
1283
1284 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1285                           struct kvm_memory_slot *slot)
1286 {
1287         u64 *sptep;
1288         struct rmap_iterator iter;
1289         bool flush = false;
1290
1291         while ((sptep = rmap_get_first(rmap_head, &iter))) {
1292                 rmap_printk("spte %p %llx.\n", sptep, *sptep);
1293
1294                 pte_list_remove(rmap_head, sptep);
1295                 flush = true;
1296         }
1297
1298         return flush;
1299 }
1300
1301 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1302                            struct kvm_memory_slot *slot, gfn_t gfn, int level,
1303                            unsigned long data)
1304 {
1305         return kvm_zap_rmapp(kvm, rmap_head, slot);
1306 }
1307
1308 static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1309                              struct kvm_memory_slot *slot, gfn_t gfn, int level,
1310                              unsigned long data)
1311 {
1312         u64 *sptep;
1313         struct rmap_iterator iter;
1314         int need_flush = 0;
1315         u64 new_spte;
1316         pte_t *ptep = (pte_t *)data;
1317         kvm_pfn_t new_pfn;
1318
1319         WARN_ON(pte_huge(*ptep));
1320         new_pfn = pte_pfn(*ptep);
1321
1322 restart:
1323         for_each_rmap_spte(rmap_head, &iter, sptep) {
1324                 rmap_printk("spte %p %llx gfn %llx (%d)\n",
1325                             sptep, *sptep, gfn, level);
1326
1327                 need_flush = 1;
1328
1329                 if (pte_write(*ptep)) {
1330                         pte_list_remove(rmap_head, sptep);
1331                         goto restart;
1332                 } else {
1333                         new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1334                                         *sptep, new_pfn);
1335
1336                         mmu_spte_clear_track_bits(sptep);
1337                         mmu_spte_set(sptep, new_spte);
1338                 }
1339         }
1340
1341         if (need_flush && kvm_available_flush_tlb_with_range()) {
1342                 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1343                 return 0;
1344         }
1345
1346         return need_flush;
1347 }
1348
1349 struct slot_rmap_walk_iterator {
1350         /* input fields. */
1351         struct kvm_memory_slot *slot;
1352         gfn_t start_gfn;
1353         gfn_t end_gfn;
1354         int start_level;
1355         int end_level;
1356
1357         /* output fields. */
1358         gfn_t gfn;
1359         struct kvm_rmap_head *rmap;
1360         int level;
1361
1362         /* private field. */
1363         struct kvm_rmap_head *end_rmap;
1364 };
1365
1366 static void
1367 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1368 {
1369         iterator->level = level;
1370         iterator->gfn = iterator->start_gfn;
1371         iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1372         iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1373                                            iterator->slot);
1374 }
1375
1376 static void
1377 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1378                     struct kvm_memory_slot *slot, int start_level,
1379                     int end_level, gfn_t start_gfn, gfn_t end_gfn)
1380 {
1381         iterator->slot = slot;
1382         iterator->start_level = start_level;
1383         iterator->end_level = end_level;
1384         iterator->start_gfn = start_gfn;
1385         iterator->end_gfn = end_gfn;
1386
1387         rmap_walk_init_level(iterator, iterator->start_level);
1388 }
1389
1390 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1391 {
1392         return !!iterator->rmap;
1393 }
1394
1395 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1396 {
1397         if (++iterator->rmap <= iterator->end_rmap) {
1398                 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1399                 return;
1400         }
1401
1402         if (++iterator->level > iterator->end_level) {
1403                 iterator->rmap = NULL;
1404                 return;
1405         }
1406
1407         rmap_walk_init_level(iterator, iterator->level);
1408 }
1409
1410 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,    \
1411            _start_gfn, _end_gfn, _iter_)                                \
1412         for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,         \
1413                                  _end_level_, _start_gfn, _end_gfn);    \
1414              slot_rmap_walk_okay(_iter_);                               \
1415              slot_rmap_walk_next(_iter_))
1416
1417 typedef int (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1418                               struct kvm_memory_slot *slot, gfn_t gfn,
1419                               int level, unsigned long data);
1420
1421 static __always_inline int kvm_handle_hva_range(struct kvm *kvm,
1422                                                 unsigned long start,
1423                                                 unsigned long end,
1424                                                 unsigned long data,
1425                                                 rmap_handler_t handler)
1426 {
1427         struct kvm_memslots *slots;
1428         struct kvm_memory_slot *memslot;
1429         struct slot_rmap_walk_iterator iterator;
1430         int ret = 0;
1431         int i;
1432
1433         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1434                 slots = __kvm_memslots(kvm, i);
1435                 kvm_for_each_memslot(memslot, slots) {
1436                         unsigned long hva_start, hva_end;
1437                         gfn_t gfn_start, gfn_end;
1438
1439                         hva_start = max(start, memslot->userspace_addr);
1440                         hva_end = min(end, memslot->userspace_addr +
1441                                       (memslot->npages << PAGE_SHIFT));
1442                         if (hva_start >= hva_end)
1443                                 continue;
1444                         /*
1445                          * {gfn(page) | page intersects with [hva_start, hva_end)} =
1446                          * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1447                          */
1448                         gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1449                         gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1450
1451                         for_each_slot_rmap_range(memslot, PG_LEVEL_4K,
1452                                                  KVM_MAX_HUGEPAGE_LEVEL,
1453                                                  gfn_start, gfn_end - 1,
1454                                                  &iterator)
1455                                 ret |= handler(kvm, iterator.rmap, memslot,
1456                                                iterator.gfn, iterator.level, data);
1457                 }
1458         }
1459
1460         return ret;
1461 }
1462
1463 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1464                           unsigned long data, rmap_handler_t handler)
1465 {
1466         return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1467 }
1468
1469 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
1470                         unsigned flags)
1471 {
1472         int r;
1473
1474         r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1475
1476         if (is_tdp_mmu_enabled(kvm))
1477                 r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
1478
1479         return r;
1480 }
1481
1482 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1483 {
1484         int r;
1485
1486         r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1487
1488         if (is_tdp_mmu_enabled(kvm))
1489                 r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
1490
1491         return r;
1492 }
1493
1494 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1495                          struct kvm_memory_slot *slot, gfn_t gfn, int level,
1496                          unsigned long data)
1497 {
1498         u64 *sptep;
1499         struct rmap_iterator iter;
1500         int young = 0;
1501
1502         for_each_rmap_spte(rmap_head, &iter, sptep)
1503                 young |= mmu_spte_age(sptep);
1504
1505         trace_kvm_age_page(gfn, level, slot, young);
1506         return young;
1507 }
1508
1509 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1510                               struct kvm_memory_slot *slot, gfn_t gfn,
1511                               int level, unsigned long data)
1512 {
1513         u64 *sptep;
1514         struct rmap_iterator iter;
1515
1516         for_each_rmap_spte(rmap_head, &iter, sptep)
1517                 if (is_accessed_spte(*sptep))
1518                         return 1;
1519         return 0;
1520 }
1521
1522 #define RMAP_RECYCLE_THRESHOLD 1000
1523
1524 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1525 {
1526         struct kvm_rmap_head *rmap_head;
1527         struct kvm_mmu_page *sp;
1528
1529         sp = sptep_to_sp(spte);
1530
1531         rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1532
1533         kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1534         kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1535                         KVM_PAGES_PER_HPAGE(sp->role.level));
1536 }
1537
1538 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1539 {
1540         int young = false;
1541
1542         young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1543         if (is_tdp_mmu_enabled(kvm))
1544                 young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
1545
1546         return young;
1547 }
1548
1549 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1550 {
1551         int young = false;
1552
1553         young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1554         if (is_tdp_mmu_enabled(kvm))
1555                 young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
1556
1557         return young;
1558 }
1559
1560 #ifdef MMU_DEBUG
1561 static int is_empty_shadow_page(u64 *spt)
1562 {
1563         u64 *pos;
1564         u64 *end;
1565
1566         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1567                 if (is_shadow_present_pte(*pos)) {
1568                         printk(KERN_ERR "%s: %p %llx\n", __func__,
1569                                pos, *pos);
1570                         return 0;
1571                 }
1572         return 1;
1573 }
1574 #endif
1575
1576 /*
1577  * This value is the sum of all of the kvm instances's
1578  * kvm->arch.n_used_mmu_pages values.  We need a global,
1579  * aggregate version in order to make the slab shrinker
1580  * faster
1581  */
1582 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
1583 {
1584         kvm->arch.n_used_mmu_pages += nr;
1585         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1586 }
1587
1588 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1589 {
1590         MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1591         hlist_del(&sp->hash_link);
1592         list_del(&sp->link);
1593         free_page((unsigned long)sp->spt);
1594         if (!sp->role.direct)
1595                 free_page((unsigned long)sp->gfns);
1596         kmem_cache_free(mmu_page_header_cache, sp);
1597 }
1598
1599 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1600 {
1601         return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1602 }
1603
1604 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1605                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1606 {
1607         if (!parent_pte)
1608                 return;
1609
1610         pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1611 }
1612
1613 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1614                                        u64 *parent_pte)
1615 {
1616         __pte_list_remove(parent_pte, &sp->parent_ptes);
1617 }
1618
1619 static void drop_parent_pte(struct kvm_mmu_page *sp,
1620                             u64 *parent_pte)
1621 {
1622         mmu_page_remove_parent_pte(sp, parent_pte);
1623         mmu_spte_clear_no_track(parent_pte);
1624 }
1625
1626 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1627 {
1628         struct kvm_mmu_page *sp;
1629
1630         sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1631         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1632         if (!direct)
1633                 sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1634         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1635
1636         /*
1637          * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1638          * depends on valid pages being added to the head of the list.  See
1639          * comments in kvm_zap_obsolete_pages().
1640          */
1641         sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1642         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1643         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1644         return sp;
1645 }
1646
1647 static void mark_unsync(u64 *spte);
1648 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1649 {
1650         u64 *sptep;
1651         struct rmap_iterator iter;
1652
1653         for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1654                 mark_unsync(sptep);
1655         }
1656 }
1657
1658 static void mark_unsync(u64 *spte)
1659 {
1660         struct kvm_mmu_page *sp;
1661         unsigned int index;
1662
1663         sp = sptep_to_sp(spte);
1664         index = spte - sp->spt;
1665         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1666                 return;
1667         if (sp->unsync_children++)
1668                 return;
1669         kvm_mmu_mark_parents_unsync(sp);
1670 }
1671
1672 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1673                                struct kvm_mmu_page *sp)
1674 {
1675         return 0;
1676 }
1677
1678 #define KVM_PAGE_ARRAY_NR 16
1679
1680 struct kvm_mmu_pages {
1681         struct mmu_page_and_offset {
1682                 struct kvm_mmu_page *sp;
1683                 unsigned int idx;
1684         } page[KVM_PAGE_ARRAY_NR];
1685         unsigned int nr;
1686 };
1687
1688 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1689                          int idx)
1690 {
1691         int i;
1692
1693         if (sp->unsync)
1694                 for (i=0; i < pvec->nr; i++)
1695                         if (pvec->page[i].sp == sp)
1696                                 return 0;
1697
1698         pvec->page[pvec->nr].sp = sp;
1699         pvec->page[pvec->nr].idx = idx;
1700         pvec->nr++;
1701         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1702 }
1703
1704 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1705 {
1706         --sp->unsync_children;
1707         WARN_ON((int)sp->unsync_children < 0);
1708         __clear_bit(idx, sp->unsync_child_bitmap);
1709 }
1710
1711 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1712                            struct kvm_mmu_pages *pvec)
1713 {
1714         int i, ret, nr_unsync_leaf = 0;
1715
1716         for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1717                 struct kvm_mmu_page *child;
1718                 u64 ent = sp->spt[i];
1719
1720                 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1721                         clear_unsync_child_bit(sp, i);
1722                         continue;
1723                 }
1724
1725                 child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1726
1727                 if (child->unsync_children) {
1728                         if (mmu_pages_add(pvec, child, i))
1729                                 return -ENOSPC;
1730
1731                         ret = __mmu_unsync_walk(child, pvec);
1732                         if (!ret) {
1733                                 clear_unsync_child_bit(sp, i);
1734                                 continue;
1735                         } else if (ret > 0) {
1736                                 nr_unsync_leaf += ret;
1737                         } else
1738                                 return ret;
1739                 } else if (child->unsync) {
1740                         nr_unsync_leaf++;
1741                         if (mmu_pages_add(pvec, child, i))
1742                                 return -ENOSPC;
1743                 } else
1744                         clear_unsync_child_bit(sp, i);
1745         }
1746
1747         return nr_unsync_leaf;
1748 }
1749
1750 #define INVALID_INDEX (-1)
1751
1752 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1753                            struct kvm_mmu_pages *pvec)
1754 {
1755         pvec->nr = 0;
1756         if (!sp->unsync_children)
1757                 return 0;
1758
1759         mmu_pages_add(pvec, sp, INVALID_INDEX);
1760         return __mmu_unsync_walk(sp, pvec);
1761 }
1762
1763 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1764 {
1765         WARN_ON(!sp->unsync);
1766         trace_kvm_mmu_sync_page(sp);
1767         sp->unsync = 0;
1768         --kvm->stat.mmu_unsync;
1769 }
1770
1771 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1772                                      struct list_head *invalid_list);
1773 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1774                                     struct list_head *invalid_list);
1775
1776 #define for_each_valid_sp(_kvm, _sp, _list)                             \
1777         hlist_for_each_entry(_sp, _list, hash_link)                     \
1778                 if (is_obsolete_sp((_kvm), (_sp))) {                    \
1779                 } else
1780
1781 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                 \
1782         for_each_valid_sp(_kvm, _sp,                                    \
1783           &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
1784                 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1785
1786 static inline bool is_ept_sp(struct kvm_mmu_page *sp)
1787 {
1788         return sp->role.cr0_wp && sp->role.smap_andnot_wp;
1789 }
1790
1791 /* @sp->gfn should be write-protected at the call site */
1792 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1793                             struct list_head *invalid_list)
1794 {
1795         if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
1796             vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1797                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1798                 return false;
1799         }
1800
1801         return true;
1802 }
1803
1804 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1805                                         struct list_head *invalid_list,
1806                                         bool remote_flush)
1807 {
1808         if (!remote_flush && list_empty(invalid_list))
1809                 return false;
1810
1811         if (!list_empty(invalid_list))
1812                 kvm_mmu_commit_zap_page(kvm, invalid_list);
1813         else
1814                 kvm_flush_remote_tlbs(kvm);
1815         return true;
1816 }
1817
1818 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1819                                  struct list_head *invalid_list,
1820                                  bool remote_flush, bool local_flush)
1821 {
1822         if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1823                 return;
1824
1825         if (local_flush)
1826                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1827 }
1828
1829 #ifdef CONFIG_KVM_MMU_AUDIT
1830 #include "mmu_audit.c"
1831 #else
1832 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1833 static void mmu_audit_disable(void) { }
1834 #endif
1835
1836 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1837 {
1838         return sp->role.invalid ||
1839                unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1840 }
1841
1842 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1843                          struct list_head *invalid_list)
1844 {
1845         kvm_unlink_unsync_page(vcpu->kvm, sp);
1846         return __kvm_sync_page(vcpu, sp, invalid_list);
1847 }
1848
1849 /* @gfn should be write-protected at the call site */
1850 static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
1851                            struct list_head *invalid_list)
1852 {
1853         struct kvm_mmu_page *s;
1854         bool ret = false;
1855
1856         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1857                 if (!s->unsync)
1858                         continue;
1859
1860                 WARN_ON(s->role.level != PG_LEVEL_4K);
1861                 ret |= kvm_sync_page(vcpu, s, invalid_list);
1862         }
1863
1864         return ret;
1865 }
1866
1867 struct mmu_page_path {
1868         struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1869         unsigned int idx[PT64_ROOT_MAX_LEVEL];
1870 };
1871
1872 #define for_each_sp(pvec, sp, parents, i)                       \
1873                 for (i = mmu_pages_first(&pvec, &parents);      \
1874                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1875                         i = mmu_pages_next(&pvec, &parents, i))
1876
1877 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1878                           struct mmu_page_path *parents,
1879                           int i)
1880 {
1881         int n;
1882
1883         for (n = i+1; n < pvec->nr; n++) {
1884                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1885                 unsigned idx = pvec->page[n].idx;
1886                 int level = sp->role.level;
1887
1888                 parents->idx[level-1] = idx;
1889                 if (level == PG_LEVEL_4K)
1890                         break;
1891
1892                 parents->parent[level-2] = sp;
1893         }
1894
1895         return n;
1896 }
1897
1898 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1899                            struct mmu_page_path *parents)
1900 {
1901         struct kvm_mmu_page *sp;
1902         int level;
1903
1904         if (pvec->nr == 0)
1905                 return 0;
1906
1907         WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1908
1909         sp = pvec->page[0].sp;
1910         level = sp->role.level;
1911         WARN_ON(level == PG_LEVEL_4K);
1912
1913         parents->parent[level-2] = sp;
1914
1915         /* Also set up a sentinel.  Further entries in pvec are all
1916          * children of sp, so this element is never overwritten.
1917          */
1918         parents->parent[level-1] = NULL;
1919         return mmu_pages_next(pvec, parents, 0);
1920 }
1921
1922 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1923 {
1924         struct kvm_mmu_page *sp;
1925         unsigned int level = 0;
1926
1927         do {
1928                 unsigned int idx = parents->idx[level];
1929                 sp = parents->parent[level];
1930                 if (!sp)
1931                         return;
1932
1933                 WARN_ON(idx == INVALID_INDEX);
1934                 clear_unsync_child_bit(sp, idx);
1935                 level++;
1936         } while (!sp->unsync_children);
1937 }
1938
1939 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1940                               struct kvm_mmu_page *parent)
1941 {
1942         int i;
1943         struct kvm_mmu_page *sp;
1944         struct mmu_page_path parents;
1945         struct kvm_mmu_pages pages;
1946         LIST_HEAD(invalid_list);
1947         bool flush = false;
1948
1949         while (mmu_unsync_walk(parent, &pages)) {
1950                 bool protected = false;
1951
1952                 for_each_sp(pages, sp, parents, i)
1953                         protected |= rmap_write_protect(vcpu, sp->gfn);
1954
1955                 if (protected) {
1956                         kvm_flush_remote_tlbs(vcpu->kvm);
1957                         flush = false;
1958                 }
1959
1960                 for_each_sp(pages, sp, parents, i) {
1961                         flush |= kvm_sync_page(vcpu, sp, &invalid_list);
1962                         mmu_pages_clear_parents(&parents);
1963                 }
1964                 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
1965                         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1966                         cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
1967                         flush = false;
1968                 }
1969         }
1970
1971         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1972 }
1973
1974 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
1975 {
1976         atomic_set(&sp->write_flooding_count,  0);
1977 }
1978
1979 static void clear_sp_write_flooding_count(u64 *spte)
1980 {
1981         __clear_sp_write_flooding_count(sptep_to_sp(spte));
1982 }
1983
1984 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1985                                              gfn_t gfn,
1986                                              gva_t gaddr,
1987                                              unsigned level,
1988                                              int direct,
1989                                              unsigned int access)
1990 {
1991         bool direct_mmu = vcpu->arch.mmu->direct_map;
1992         union kvm_mmu_page_role role;
1993         struct hlist_head *sp_list;
1994         unsigned quadrant;
1995         struct kvm_mmu_page *sp;
1996         bool need_sync = false;
1997         bool flush = false;
1998         int collisions = 0;
1999         LIST_HEAD(invalid_list);
2000
2001         role = vcpu->arch.mmu->mmu_role.base;
2002         role.level = level;
2003         role.direct = direct;
2004         if (role.direct)
2005                 role.gpte_is_8_bytes = true;
2006         role.access = access;
2007         if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2008                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2009                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2010                 role.quadrant = quadrant;
2011         }
2012
2013         sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2014         for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2015                 if (sp->gfn != gfn) {
2016                         collisions++;
2017                         continue;
2018                 }
2019
2020                 if (!need_sync && sp->unsync)
2021                         need_sync = true;
2022
2023                 if (sp->role.word != role.word)
2024                         continue;
2025
2026                 if (direct_mmu)
2027                         goto trace_get_page;
2028
2029                 if (sp->unsync) {
2030                         /* The page is good, but __kvm_sync_page might still end
2031                          * up zapping it.  If so, break in order to rebuild it.
2032                          */
2033                         if (!__kvm_sync_page(vcpu, sp, &invalid_list))
2034                                 break;
2035
2036                         WARN_ON(!list_empty(&invalid_list));
2037                         kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2038                 }
2039
2040                 if (sp->unsync_children)
2041                         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2042
2043                 __clear_sp_write_flooding_count(sp);
2044
2045 trace_get_page:
2046                 trace_kvm_mmu_get_page(sp, false);
2047                 goto out;
2048         }
2049
2050         ++vcpu->kvm->stat.mmu_cache_miss;
2051
2052         sp = kvm_mmu_alloc_page(vcpu, direct);
2053
2054         sp->gfn = gfn;
2055         sp->role = role;
2056         hlist_add_head(&sp->hash_link, sp_list);
2057         if (!direct) {
2058                 /*
2059                  * we should do write protection before syncing pages
2060                  * otherwise the content of the synced shadow page may
2061                  * be inconsistent with guest page table.
2062                  */
2063                 account_shadowed(vcpu->kvm, sp);
2064                 if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2065                         kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2066
2067                 if (level > PG_LEVEL_4K && need_sync)
2068                         flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2069         }
2070         trace_kvm_mmu_get_page(sp, true);
2071
2072         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2073 out:
2074         if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2075                 vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2076         return sp;
2077 }
2078
2079 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2080                                         struct kvm_vcpu *vcpu, hpa_t root,
2081                                         u64 addr)
2082 {
2083         iterator->addr = addr;
2084         iterator->shadow_addr = root;
2085         iterator->level = vcpu->arch.mmu->shadow_root_level;
2086
2087         if (iterator->level == PT64_ROOT_4LEVEL &&
2088             vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2089             !vcpu->arch.mmu->direct_map)
2090                 --iterator->level;
2091
2092         if (iterator->level == PT32E_ROOT_LEVEL) {
2093                 /*
2094                  * prev_root is currently only used for 64-bit hosts. So only
2095                  * the active root_hpa is valid here.
2096                  */
2097                 BUG_ON(root != vcpu->arch.mmu->root_hpa);
2098
2099                 iterator->shadow_addr
2100                         = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2101                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2102                 --iterator->level;
2103                 if (!iterator->shadow_addr)
2104                         iterator->level = 0;
2105         }
2106 }
2107
2108 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2109                              struct kvm_vcpu *vcpu, u64 addr)
2110 {
2111         shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2112                                     addr);
2113 }
2114
2115 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2116 {
2117         if (iterator->level < PG_LEVEL_4K)
2118                 return false;
2119
2120         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2121         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2122         return true;
2123 }
2124
2125 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2126                                u64 spte)
2127 {
2128         if (is_last_spte(spte, iterator->level)) {
2129                 iterator->level = 0;
2130                 return;
2131         }
2132
2133         iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2134         --iterator->level;
2135 }
2136
2137 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2138 {
2139         __shadow_walk_next(iterator, *iterator->sptep);
2140 }
2141
2142 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2143                              struct kvm_mmu_page *sp)
2144 {
2145         u64 spte;
2146
2147         BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2148
2149         spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2150
2151         mmu_spte_set(sptep, spte);
2152
2153         mmu_page_add_parent_pte(vcpu, sp, sptep);
2154
2155         if (sp->unsync_children || sp->unsync)
2156                 mark_unsync(sptep);
2157 }
2158
2159 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2160                                    unsigned direct_access)
2161 {
2162         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2163                 struct kvm_mmu_page *child;
2164
2165                 /*
2166                  * For the direct sp, if the guest pte's dirty bit
2167                  * changed form clean to dirty, it will corrupt the
2168                  * sp's access: allow writable in the read-only sp,
2169                  * so we should update the spte at this point to get
2170                  * a new sp with the correct access.
2171                  */
2172                 child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2173                 if (child->role.access == direct_access)
2174                         return;
2175
2176                 drop_parent_pte(child, sptep);
2177                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2178         }
2179 }
2180
2181 /* Returns the number of zapped non-leaf child shadow pages. */
2182 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2183                             u64 *spte, struct list_head *invalid_list)
2184 {
2185         u64 pte;
2186         struct kvm_mmu_page *child;
2187
2188         pte = *spte;
2189         if (is_shadow_present_pte(pte)) {
2190                 if (is_last_spte(pte, sp->role.level)) {
2191                         drop_spte(kvm, spte);
2192                         if (is_large_pte(pte))
2193                                 --kvm->stat.lpages;
2194                 } else {
2195                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2196                         drop_parent_pte(child, spte);
2197
2198                         /*
2199                          * Recursively zap nested TDP SPs, parentless SPs are
2200                          * unlikely to be used again in the near future.  This
2201                          * avoids retaining a large number of stale nested SPs.
2202                          */
2203                         if (tdp_enabled && invalid_list &&
2204                             child->role.guest_mode && !child->parent_ptes.val)
2205                                 return kvm_mmu_prepare_zap_page(kvm, child,
2206                                                                 invalid_list);
2207                 }
2208         } else if (is_mmio_spte(pte)) {
2209                 mmu_spte_clear_no_track(spte);
2210         }
2211         return 0;
2212 }
2213
2214 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2215                                         struct kvm_mmu_page *sp,
2216                                         struct list_head *invalid_list)
2217 {
2218         int zapped = 0;
2219         unsigned i;
2220
2221         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2222                 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2223
2224         return zapped;
2225 }
2226
2227 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2228 {
2229         u64 *sptep;
2230         struct rmap_iterator iter;
2231
2232         while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2233                 drop_parent_pte(sp, sptep);
2234 }
2235
2236 static int mmu_zap_unsync_children(struct kvm *kvm,
2237                                    struct kvm_mmu_page *parent,
2238                                    struct list_head *invalid_list)
2239 {
2240         int i, zapped = 0;
2241         struct mmu_page_path parents;
2242         struct kvm_mmu_pages pages;
2243
2244         if (parent->role.level == PG_LEVEL_4K)
2245                 return 0;
2246
2247         while (mmu_unsync_walk(parent, &pages)) {
2248                 struct kvm_mmu_page *sp;
2249
2250                 for_each_sp(pages, sp, parents, i) {
2251                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2252                         mmu_pages_clear_parents(&parents);
2253                         zapped++;
2254                 }
2255         }
2256
2257         return zapped;
2258 }
2259
2260 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2261                                        struct kvm_mmu_page *sp,
2262                                        struct list_head *invalid_list,
2263                                        int *nr_zapped)
2264 {
2265         bool list_unstable;
2266
2267         trace_kvm_mmu_prepare_zap_page(sp);
2268         ++kvm->stat.mmu_shadow_zapped;
2269         *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2270         *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2271         kvm_mmu_unlink_parents(kvm, sp);
2272
2273         /* Zapping children means active_mmu_pages has become unstable. */
2274         list_unstable = *nr_zapped;
2275
2276         if (!sp->role.invalid && !sp->role.direct)
2277                 unaccount_shadowed(kvm, sp);
2278
2279         if (sp->unsync)
2280                 kvm_unlink_unsync_page(kvm, sp);
2281         if (!sp->root_count) {
2282                 /* Count self */
2283                 (*nr_zapped)++;
2284
2285                 /*
2286                  * Already invalid pages (previously active roots) are not on
2287                  * the active page list.  See list_del() in the "else" case of
2288                  * !sp->root_count.
2289                  */
2290                 if (sp->role.invalid)
2291                         list_add(&sp->link, invalid_list);
2292                 else
2293                         list_move(&sp->link, invalid_list);
2294                 kvm_mod_used_mmu_pages(kvm, -1);
2295         } else {
2296                 /*
2297                  * Remove the active root from the active page list, the root
2298                  * will be explicitly freed when the root_count hits zero.
2299                  */
2300                 list_del(&sp->link);
2301
2302                 /*
2303                  * Obsolete pages cannot be used on any vCPUs, see the comment
2304                  * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2305                  * treats invalid shadow pages as being obsolete.
2306                  */
2307                 if (!is_obsolete_sp(kvm, sp))
2308                         kvm_reload_remote_mmus(kvm);
2309         }
2310
2311         if (sp->lpage_disallowed)
2312                 unaccount_huge_nx_page(kvm, sp);
2313
2314         sp->role.invalid = 1;
2315         return list_unstable;
2316 }
2317
2318 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2319                                      struct list_head *invalid_list)
2320 {
2321         int nr_zapped;
2322
2323         __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2324         return nr_zapped;
2325 }
2326
2327 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2328                                     struct list_head *invalid_list)
2329 {
2330         struct kvm_mmu_page *sp, *nsp;
2331
2332         if (list_empty(invalid_list))
2333                 return;
2334
2335         /*
2336          * We need to make sure everyone sees our modifications to
2337          * the page tables and see changes to vcpu->mode here. The barrier
2338          * in the kvm_flush_remote_tlbs() achieves this. This pairs
2339          * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2340          *
2341          * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2342          * guest mode and/or lockless shadow page table walks.
2343          */
2344         kvm_flush_remote_tlbs(kvm);
2345
2346         list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2347                 WARN_ON(!sp->role.invalid || sp->root_count);
2348                 kvm_mmu_free_page(sp);
2349         }
2350 }
2351
2352 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2353                                                   unsigned long nr_to_zap)
2354 {
2355         unsigned long total_zapped = 0;
2356         struct kvm_mmu_page *sp, *tmp;
2357         LIST_HEAD(invalid_list);
2358         bool unstable;
2359         int nr_zapped;
2360
2361         if (list_empty(&kvm->arch.active_mmu_pages))
2362                 return 0;
2363
2364 restart:
2365         list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2366                 /*
2367                  * Don't zap active root pages, the page itself can't be freed
2368                  * and zapping it will just force vCPUs to realloc and reload.
2369                  */
2370                 if (sp->root_count)
2371                         continue;
2372
2373                 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2374                                                       &nr_zapped);
2375                 total_zapped += nr_zapped;
2376                 if (total_zapped >= nr_to_zap)
2377                         break;
2378
2379                 if (unstable)
2380                         goto restart;
2381         }
2382
2383         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2384
2385         kvm->stat.mmu_recycled += total_zapped;
2386         return total_zapped;
2387 }
2388
2389 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2390 {
2391         if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2392                 return kvm->arch.n_max_mmu_pages -
2393                         kvm->arch.n_used_mmu_pages;
2394
2395         return 0;
2396 }
2397
2398 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2399 {
2400         unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2401
2402         if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2403                 return 0;
2404
2405         kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2406
2407         /*
2408          * Note, this check is intentionally soft, it only guarantees that one
2409          * page is available, while the caller may end up allocating as many as
2410          * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2411          * exceeding the (arbitrary by default) limit will not harm the host,
2412          * being too agressive may unnecessarily kill the guest, and getting an
2413          * exact count is far more trouble than it's worth, especially in the
2414          * page fault paths.
2415          */
2416         if (!kvm_mmu_available_pages(vcpu->kvm))
2417                 return -ENOSPC;
2418         return 0;
2419 }
2420
2421 /*
2422  * Changing the number of mmu pages allocated to the vm
2423  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2424  */
2425 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2426 {
2427         write_lock(&kvm->mmu_lock);
2428
2429         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2430                 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2431                                                   goal_nr_mmu_pages);
2432
2433                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2434         }
2435
2436         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2437
2438         write_unlock(&kvm->mmu_lock);
2439 }
2440
2441 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2442 {
2443         struct kvm_mmu_page *sp;
2444         LIST_HEAD(invalid_list);
2445         int r;
2446
2447         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2448         r = 0;
2449         write_lock(&kvm->mmu_lock);
2450         for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2451                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2452                          sp->role.word);
2453                 r = 1;
2454                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2455         }
2456         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2457         write_unlock(&kvm->mmu_lock);
2458
2459         return r;
2460 }
2461
2462 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2463 {
2464         gpa_t gpa;
2465         int r;
2466
2467         if (vcpu->arch.mmu->direct_map)
2468                 return 0;
2469
2470         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2471
2472         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2473
2474         return r;
2475 }
2476
2477 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2478 {
2479         trace_kvm_mmu_unsync_page(sp);
2480         ++vcpu->kvm->stat.mmu_unsync;
2481         sp->unsync = 1;
2482
2483         kvm_mmu_mark_parents_unsync(sp);
2484 }
2485
2486 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2487                             bool can_unsync)
2488 {
2489         struct kvm_mmu_page *sp;
2490
2491         if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2492                 return true;
2493
2494         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2495                 if (!can_unsync)
2496                         return true;
2497
2498                 if (sp->unsync)
2499                         continue;
2500
2501                 WARN_ON(sp->role.level != PG_LEVEL_4K);
2502                 kvm_unsync_page(vcpu, sp);
2503         }
2504
2505         /*
2506          * We need to ensure that the marking of unsync pages is visible
2507          * before the SPTE is updated to allow writes because
2508          * kvm_mmu_sync_roots() checks the unsync flags without holding
2509          * the MMU lock and so can race with this. If the SPTE was updated
2510          * before the page had been marked as unsync-ed, something like the
2511          * following could happen:
2512          *
2513          * CPU 1                    CPU 2
2514          * ---------------------------------------------------------------------
2515          * 1.2 Host updates SPTE
2516          *     to be writable
2517          *                      2.1 Guest writes a GPTE for GVA X.
2518          *                          (GPTE being in the guest page table shadowed
2519          *                           by the SP from CPU 1.)
2520          *                          This reads SPTE during the page table walk.
2521          *                          Since SPTE.W is read as 1, there is no
2522          *                          fault.
2523          *
2524          *                      2.2 Guest issues TLB flush.
2525          *                          That causes a VM Exit.
2526          *
2527          *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
2528          *                          Since it is false, so it just returns.
2529          *
2530          *                      2.4 Guest accesses GVA X.
2531          *                          Since the mapping in the SP was not updated,
2532          *                          so the old mapping for GVA X incorrectly
2533          *                          gets used.
2534          * 1.1 Host marks SP
2535          *     as unsync
2536          *     (sp->unsync = true)
2537          *
2538          * The write barrier below ensures that 1.1 happens before 1.2 and thus
2539          * the situation in 2.4 does not arise. The implicit barrier in 2.2
2540          * pairs with this write barrier.
2541          */
2542         smp_wmb();
2543
2544         return false;
2545 }
2546
2547 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2548                     unsigned int pte_access, int level,
2549                     gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2550                     bool can_unsync, bool host_writable)
2551 {
2552         u64 spte;
2553         struct kvm_mmu_page *sp;
2554         int ret;
2555
2556         sp = sptep_to_sp(sptep);
2557
2558         ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2559                         can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2560
2561         if (spte & PT_WRITABLE_MASK)
2562                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
2563
2564         if (*sptep == spte)
2565                 ret |= SET_SPTE_SPURIOUS;
2566         else if (mmu_spte_update(sptep, spte))
2567                 ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2568         return ret;
2569 }
2570
2571 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2572                         unsigned int pte_access, bool write_fault, int level,
2573                         gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2574                         bool host_writable)
2575 {
2576         int was_rmapped = 0;
2577         int rmap_count;
2578         int set_spte_ret;
2579         int ret = RET_PF_FIXED;
2580         bool flush = false;
2581
2582         pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2583                  *sptep, write_fault, gfn);
2584
2585         if (unlikely(is_noslot_pfn(pfn))) {
2586                 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2587                 return RET_PF_EMULATE;
2588         }
2589
2590         if (is_shadow_present_pte(*sptep)) {
2591                 /*
2592                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2593                  * the parent of the now unreachable PTE.
2594                  */
2595                 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2596                         struct kvm_mmu_page *child;
2597                         u64 pte = *sptep;
2598
2599                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2600                         drop_parent_pte(child, sptep);
2601                         flush = true;
2602                 } else if (pfn != spte_to_pfn(*sptep)) {
2603                         pgprintk("hfn old %llx new %llx\n",
2604                                  spte_to_pfn(*sptep), pfn);
2605                         drop_spte(vcpu->kvm, sptep);
2606                         flush = true;
2607                 } else
2608                         was_rmapped = 1;
2609         }
2610
2611         set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2612                                 speculative, true, host_writable);
2613         if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2614                 if (write_fault)
2615                         ret = RET_PF_EMULATE;
2616                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2617         }
2618
2619         if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2620                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2621                                 KVM_PAGES_PER_HPAGE(level));
2622
2623         /*
2624          * The fault is fully spurious if and only if the new SPTE and old SPTE
2625          * are identical, and emulation is not required.
2626          */
2627         if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
2628                 WARN_ON_ONCE(!was_rmapped);
2629                 return RET_PF_SPURIOUS;
2630         }
2631
2632         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2633         trace_kvm_mmu_set_spte(level, gfn, sptep);
2634         if (!was_rmapped && is_large_pte(*sptep))
2635                 ++vcpu->kvm->stat.lpages;
2636
2637         if (is_shadow_present_pte(*sptep)) {
2638                 if (!was_rmapped) {
2639                         rmap_count = rmap_add(vcpu, sptep, gfn);
2640                         if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2641                                 rmap_recycle(vcpu, sptep, gfn);
2642                 }
2643         }
2644
2645         return ret;
2646 }
2647
2648 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2649                                      bool no_dirty_log)
2650 {
2651         struct kvm_memory_slot *slot;
2652
2653         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2654         if (!slot)
2655                 return KVM_PFN_ERR_FAULT;
2656
2657         return gfn_to_pfn_memslot_atomic(slot, gfn);
2658 }
2659
2660 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2661                                     struct kvm_mmu_page *sp,
2662                                     u64 *start, u64 *end)
2663 {
2664         struct page *pages[PTE_PREFETCH_NUM];
2665         struct kvm_memory_slot *slot;
2666         unsigned int access = sp->role.access;
2667         int i, ret;
2668         gfn_t gfn;
2669
2670         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2671         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2672         if (!slot)
2673                 return -1;
2674
2675         ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2676         if (ret <= 0)
2677                 return -1;
2678
2679         for (i = 0; i < ret; i++, gfn++, start++) {
2680                 mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2681                              page_to_pfn(pages[i]), true, true);
2682                 put_page(pages[i]);
2683         }
2684
2685         return 0;
2686 }
2687
2688 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2689                                   struct kvm_mmu_page *sp, u64 *sptep)
2690 {
2691         u64 *spte, *start = NULL;
2692         int i;
2693
2694         WARN_ON(!sp->role.direct);
2695
2696         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2697         spte = sp->spt + i;
2698
2699         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2700                 if (is_shadow_present_pte(*spte) || spte == sptep) {
2701                         if (!start)
2702                                 continue;
2703                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2704                                 break;
2705                         start = NULL;
2706                 } else if (!start)
2707                         start = spte;
2708         }
2709 }
2710
2711 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2712 {
2713         struct kvm_mmu_page *sp;
2714
2715         sp = sptep_to_sp(sptep);
2716
2717         /*
2718          * Without accessed bits, there's no way to distinguish between
2719          * actually accessed translations and prefetched, so disable pte
2720          * prefetch if accessed bits aren't available.
2721          */
2722         if (sp_ad_disabled(sp))
2723                 return;
2724
2725         if (sp->role.level > PG_LEVEL_4K)
2726                 return;
2727
2728         /*
2729          * If addresses are being invalidated, skip prefetching to avoid
2730          * accidentally prefetching those addresses.
2731          */
2732         if (unlikely(vcpu->kvm->mmu_notifier_count))
2733                 return;
2734
2735         __direct_pte_prefetch(vcpu, sp, sptep);
2736 }
2737
2738 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2739                                   struct kvm_memory_slot *slot)
2740 {
2741         unsigned long hva;
2742         pte_t *pte;
2743         int level;
2744
2745         if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2746                 return PG_LEVEL_4K;
2747
2748         /*
2749          * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2750          * is not solely for performance, it's also necessary to avoid the
2751          * "writable" check in __gfn_to_hva_many(), which will always fail on
2752          * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2753          * page fault steps have already verified the guest isn't writing a
2754          * read-only memslot.
2755          */
2756         hva = __gfn_to_hva_memslot(slot, gfn);
2757
2758         pte = lookup_address_in_mm(kvm->mm, hva, &level);
2759         if (unlikely(!pte))
2760                 return PG_LEVEL_4K;
2761
2762         return level;
2763 }
2764
2765 int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
2766                               gfn_t gfn, kvm_pfn_t pfn, int max_level)
2767 {
2768         struct kvm_lpage_info *linfo;
2769
2770         max_level = min(max_level, max_huge_page_level);
2771         for ( ; max_level > PG_LEVEL_4K; max_level--) {
2772                 linfo = lpage_info_slot(gfn, slot, max_level);
2773                 if (!linfo->disallow_lpage)
2774                         break;
2775         }
2776
2777         if (max_level == PG_LEVEL_4K)
2778                 return PG_LEVEL_4K;
2779
2780         return host_pfn_mapping_level(kvm, gfn, pfn, slot);
2781 }
2782
2783 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
2784                             int max_level, kvm_pfn_t *pfnp,
2785                             bool huge_page_disallowed, int *req_level)
2786 {
2787         struct kvm_memory_slot *slot;
2788         kvm_pfn_t pfn = *pfnp;
2789         kvm_pfn_t mask;
2790         int level;
2791
2792         *req_level = PG_LEVEL_4K;
2793
2794         if (unlikely(max_level == PG_LEVEL_4K))
2795                 return PG_LEVEL_4K;
2796
2797         if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
2798                 return PG_LEVEL_4K;
2799
2800         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2801         if (!slot)
2802                 return PG_LEVEL_4K;
2803
2804         level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
2805         if (level == PG_LEVEL_4K)
2806                 return level;
2807
2808         *req_level = level = min(level, max_level);
2809
2810         /*
2811          * Enforce the iTLB multihit workaround after capturing the requested
2812          * level, which will be used to do precise, accurate accounting.
2813          */
2814         if (huge_page_disallowed)
2815                 return PG_LEVEL_4K;
2816
2817         /*
2818          * mmu_notifier_retry() was successful and mmu_lock is held, so
2819          * the pmd can't be split from under us.
2820          */
2821         mask = KVM_PAGES_PER_HPAGE(level) - 1;
2822         VM_BUG_ON((gfn & mask) != (pfn & mask));
2823         *pfnp = pfn & ~mask;
2824
2825         return level;
2826 }
2827
2828 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2829                                 kvm_pfn_t *pfnp, int *goal_levelp)
2830 {
2831         int level = *goal_levelp;
2832
2833         if (cur_level == level && level > PG_LEVEL_4K &&
2834             is_shadow_present_pte(spte) &&
2835             !is_large_pte(spte)) {
2836                 /*
2837                  * A small SPTE exists for this pfn, but FNAME(fetch)
2838                  * and __direct_map would like to create a large PTE
2839                  * instead: just force them to go down another level,
2840                  * patching back for them into pfn the next 9 bits of
2841                  * the address.
2842                  */
2843                 u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
2844                                 KVM_PAGES_PER_HPAGE(level - 1);
2845                 *pfnp |= gfn & page_mask;
2846                 (*goal_levelp)--;
2847         }
2848 }
2849
2850 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
2851                         int map_writable, int max_level, kvm_pfn_t pfn,
2852                         bool prefault, bool is_tdp)
2853 {
2854         bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
2855         bool write = error_code & PFERR_WRITE_MASK;
2856         bool exec = error_code & PFERR_FETCH_MASK;
2857         bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2858         struct kvm_shadow_walk_iterator it;
2859         struct kvm_mmu_page *sp;
2860         int level, req_level, ret;
2861         gfn_t gfn = gpa >> PAGE_SHIFT;
2862         gfn_t base_gfn = gfn;
2863
2864         if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
2865                 return RET_PF_RETRY;
2866
2867         level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
2868                                         huge_page_disallowed, &req_level);
2869
2870         trace_kvm_mmu_spte_requested(gpa, level, pfn);
2871         for_each_shadow_entry(vcpu, gpa, it) {
2872                 /*
2873                  * We cannot overwrite existing page tables with an NX
2874                  * large page, as the leaf could be executable.
2875                  */
2876                 if (nx_huge_page_workaround_enabled)
2877                         disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
2878                                                    &pfn, &level);
2879
2880                 base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2881                 if (it.level == level)
2882                         break;
2883
2884                 drop_large_spte(vcpu, it.sptep);
2885                 if (!is_shadow_present_pte(*it.sptep)) {
2886                         sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2887                                               it.level - 1, true, ACC_ALL);
2888
2889                         link_shadow_page(vcpu, it.sptep, sp);
2890                         if (is_tdp && huge_page_disallowed &&
2891                             req_level >= it.level)
2892                                 account_huge_nx_page(vcpu->kvm, sp);
2893                 }
2894         }
2895
2896         ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
2897                            write, level, base_gfn, pfn, prefault,
2898                            map_writable);
2899         if (ret == RET_PF_SPURIOUS)
2900                 return ret;
2901
2902         direct_pte_prefetch(vcpu, it.sptep);
2903         ++vcpu->stat.pf_fixed;
2904         return ret;
2905 }
2906
2907 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2908 {
2909         send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2910 }
2911
2912 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2913 {
2914         /*
2915          * Do not cache the mmio info caused by writing the readonly gfn
2916          * into the spte otherwise read access on readonly gfn also can
2917          * caused mmio page fault and treat it as mmio access.
2918          */
2919         if (pfn == KVM_PFN_ERR_RO_FAULT)
2920                 return RET_PF_EMULATE;
2921
2922         if (pfn == KVM_PFN_ERR_HWPOISON) {
2923                 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2924                 return RET_PF_RETRY;
2925         }
2926
2927         return -EFAULT;
2928 }
2929
2930 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2931                                 kvm_pfn_t pfn, unsigned int access,
2932                                 int *ret_val)
2933 {
2934         /* The pfn is invalid, report the error! */
2935         if (unlikely(is_error_pfn(pfn))) {
2936                 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2937                 return true;
2938         }
2939
2940         if (unlikely(is_noslot_pfn(pfn))) {
2941                 vcpu_cache_mmio_info(vcpu, gva, gfn,
2942                                      access & shadow_mmio_access_mask);
2943                 /*
2944                  * If MMIO caching is disabled, emulate immediately without
2945                  * touching the shadow page tables as attempting to install an
2946                  * MMIO SPTE will just be an expensive nop.
2947                  */
2948                 if (unlikely(!shadow_mmio_value)) {
2949                         *ret_val = RET_PF_EMULATE;
2950                         return true;
2951                 }
2952         }
2953
2954         return false;
2955 }
2956
2957 static bool page_fault_can_be_fast(u32 error_code)
2958 {
2959         /*
2960          * Do not fix the mmio spte with invalid generation number which
2961          * need to be updated by slow page fault path.
2962          */
2963         if (unlikely(error_code & PFERR_RSVD_MASK))
2964                 return false;
2965
2966         /* See if the page fault is due to an NX violation */
2967         if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
2968                       == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
2969                 return false;
2970
2971         /*
2972          * #PF can be fast if:
2973          * 1. The shadow page table entry is not present, which could mean that
2974          *    the fault is potentially caused by access tracking (if enabled).
2975          * 2. The shadow page table entry is present and the fault
2976          *    is caused by write-protect, that means we just need change the W
2977          *    bit of the spte which can be done out of mmu-lock.
2978          *
2979          * However, if access tracking is disabled we know that a non-present
2980          * page must be a genuine page fault where we have to create a new SPTE.
2981          * So, if access tracking is disabled, we return true only for write
2982          * accesses to a present page.
2983          */
2984
2985         return shadow_acc_track_mask != 0 ||
2986                ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
2987                 == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
2988 }
2989
2990 /*
2991  * Returns true if the SPTE was fixed successfully. Otherwise,
2992  * someone else modified the SPTE from its original value.
2993  */
2994 static bool
2995 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2996                         u64 *sptep, u64 old_spte, u64 new_spte)
2997 {
2998         gfn_t gfn;
2999
3000         WARN_ON(!sp->role.direct);
3001
3002         /*
3003          * Theoretically we could also set dirty bit (and flush TLB) here in
3004          * order to eliminate unnecessary PML logging. See comments in
3005          * set_spte. But fast_page_fault is very unlikely to happen with PML
3006          * enabled, so we do not do this. This might result in the same GPA
3007          * to be logged in PML buffer again when the write really happens, and
3008          * eventually to be called by mark_page_dirty twice. But it's also no
3009          * harm. This also avoids the TLB flush needed after setting dirty bit
3010          * so non-PML cases won't be impacted.
3011          *
3012          * Compare with set_spte where instead shadow_dirty_mask is set.
3013          */
3014         if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3015                 return false;
3016
3017         if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3018                 /*
3019                  * The gfn of direct spte is stable since it is
3020                  * calculated by sp->gfn.
3021                  */
3022                 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3023                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3024         }
3025
3026         return true;
3027 }
3028
3029 static bool is_access_allowed(u32 fault_err_code, u64 spte)
3030 {
3031         if (fault_err_code & PFERR_FETCH_MASK)
3032                 return is_executable_pte(spte);
3033
3034         if (fault_err_code & PFERR_WRITE_MASK)
3035                 return is_writable_pte(spte);
3036
3037         /* Fault was on Read access */
3038         return spte & PT_PRESENT_MASK;
3039 }
3040
3041 /*
3042  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3043  */
3044 static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3045                            u32 error_code)
3046 {
3047         struct kvm_shadow_walk_iterator iterator;
3048         struct kvm_mmu_page *sp;
3049         int ret = RET_PF_INVALID;
3050         u64 spte = 0ull;
3051         uint retry_count = 0;
3052
3053         if (!page_fault_can_be_fast(error_code))
3054                 return ret;
3055
3056         walk_shadow_page_lockless_begin(vcpu);
3057
3058         do {
3059                 u64 new_spte;
3060
3061                 for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3062                         if (!is_shadow_present_pte(spte))
3063                                 break;
3064
3065                 if (!is_shadow_present_pte(spte))
3066                         break;
3067
3068                 sp = sptep_to_sp(iterator.sptep);
3069                 if (!is_last_spte(spte, sp->role.level))
3070                         break;
3071
3072                 /*
3073                  * Check whether the memory access that caused the fault would
3074                  * still cause it if it were to be performed right now. If not,
3075                  * then this is a spurious fault caused by TLB lazily flushed,
3076                  * or some other CPU has already fixed the PTE after the
3077                  * current CPU took the fault.
3078                  *
3079                  * Need not check the access of upper level table entries since
3080                  * they are always ACC_ALL.
3081                  */
3082                 if (is_access_allowed(error_code, spte)) {
3083                         ret = RET_PF_SPURIOUS;
3084                         break;
3085                 }
3086
3087                 new_spte = spte;
3088
3089                 if (is_access_track_spte(spte))
3090                         new_spte = restore_acc_track_spte(new_spte);
3091
3092                 /*
3093                  * Currently, to simplify the code, write-protection can
3094                  * be removed in the fast path only if the SPTE was
3095                  * write-protected for dirty-logging or access tracking.
3096                  */
3097                 if ((error_code & PFERR_WRITE_MASK) &&
3098                     spte_can_locklessly_be_made_writable(spte)) {
3099                         new_spte |= PT_WRITABLE_MASK;
3100
3101                         /*
3102                          * Do not fix write-permission on the large spte.  Since
3103                          * we only dirty the first page into the dirty-bitmap in
3104                          * fast_pf_fix_direct_spte(), other pages are missed
3105                          * if its slot has dirty logging enabled.
3106                          *
3107                          * Instead, we let the slow page fault path create a
3108                          * normal spte to fix the access.
3109                          *
3110                          * See the comments in kvm_arch_commit_memory_region().
3111                          */
3112                         if (sp->role.level > PG_LEVEL_4K)
3113                                 break;
3114                 }
3115
3116                 /* Verify that the fault can be handled in the fast path */
3117                 if (new_spte == spte ||
3118                     !is_access_allowed(error_code, new_spte))
3119                         break;
3120
3121                 /*
3122                  * Currently, fast page fault only works for direct mapping
3123                  * since the gfn is not stable for indirect shadow page. See
3124                  * Documentation/virt/kvm/locking.rst to get more detail.
3125                  */
3126                 if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3127                                             new_spte)) {
3128                         ret = RET_PF_FIXED;
3129                         break;
3130                 }
3131
3132                 if (++retry_count > 4) {
3133                         printk_once(KERN_WARNING
3134                                 "kvm: Fast #PF retrying more than 4 times.\n");
3135                         break;
3136                 }
3137
3138         } while (true);
3139
3140         trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3141                               spte, ret);
3142         walk_shadow_page_lockless_end(vcpu);
3143
3144         return ret;
3145 }
3146
3147 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3148                                struct list_head *invalid_list)
3149 {
3150         struct kvm_mmu_page *sp;
3151
3152         if (!VALID_PAGE(*root_hpa))
3153                 return;
3154
3155         sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3156
3157         if (kvm_mmu_put_root(kvm, sp)) {
3158                 if (is_tdp_mmu_page(sp))
3159                         kvm_tdp_mmu_free_root(kvm, sp);
3160                 else if (sp->role.invalid)
3161                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3162         }
3163
3164         *root_hpa = INVALID_PAGE;
3165 }
3166
3167 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3168 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3169                         ulong roots_to_free)
3170 {
3171         struct kvm *kvm = vcpu->kvm;
3172         int i;
3173         LIST_HEAD(invalid_list);
3174         bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3175
3176         BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3177
3178         /* Before acquiring the MMU lock, see if we need to do any real work. */
3179         if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3180                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3181                         if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3182                             VALID_PAGE(mmu->prev_roots[i].hpa))
3183                                 break;
3184
3185                 if (i == KVM_MMU_NUM_PREV_ROOTS)
3186                         return;
3187         }
3188
3189         write_lock(&kvm->mmu_lock);
3190
3191         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3192                 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3193                         mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3194                                            &invalid_list);
3195
3196         if (free_active_root) {
3197                 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3198                     (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3199                         mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3200                 } else if (mmu->pae_root) {
3201                         for (i = 0; i < 4; ++i) {
3202                                 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3203                                         continue;
3204
3205                                 mmu_free_root_page(kvm, &mmu->pae_root[i],
3206                                                    &invalid_list);
3207                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3208                         }
3209                 }
3210                 mmu->root_hpa = INVALID_PAGE;
3211                 mmu->root_pgd = 0;
3212         }
3213
3214         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3215         write_unlock(&kvm->mmu_lock);
3216 }
3217 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3218
3219 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3220 {
3221         int ret = 0;
3222
3223         if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3224                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3225                 ret = 1;
3226         }
3227
3228         return ret;
3229 }
3230
3231 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
3232                             u8 level, bool direct)
3233 {
3234         struct kvm_mmu_page *sp;
3235
3236         sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
3237         ++sp->root_count;
3238
3239         return __pa(sp->spt);
3240 }
3241
3242 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3243 {
3244         struct kvm_mmu *mmu = vcpu->arch.mmu;
3245         u8 shadow_root_level = mmu->shadow_root_level;
3246         hpa_t root;
3247         unsigned i;
3248
3249         if (is_tdp_mmu_enabled(vcpu->kvm)) {
3250                 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3251                 mmu->root_hpa = root;
3252         } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3253                 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3254                 mmu->root_hpa = root;
3255         } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3256                 if (WARN_ON_ONCE(!mmu->pae_root))
3257                         return -EIO;
3258
3259                 for (i = 0; i < 4; ++i) {
3260                         WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3261
3262                         root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
3263                                               i << 30, PT32_ROOT_LEVEL, true);
3264                         mmu->pae_root[i] = root | PT_PRESENT_MASK |
3265                                            shadow_me_mask;
3266                 }
3267                 mmu->root_hpa = __pa(mmu->pae_root);
3268         } else {
3269                 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3270                 return -EIO;
3271         }
3272
3273         /* root_pgd is ignored for direct MMUs. */
3274         mmu->root_pgd = 0;
3275
3276         return 0;
3277 }
3278
3279 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3280 {
3281         struct kvm_mmu *mmu = vcpu->arch.mmu;
3282         u64 pdptrs[4], pm_mask;
3283         gfn_t root_gfn, root_pgd;
3284         hpa_t root;
3285         int i;
3286
3287         root_pgd = mmu->get_guest_pgd(vcpu);
3288         root_gfn = root_pgd >> PAGE_SHIFT;
3289
3290         if (mmu_check_root(vcpu, root_gfn))
3291                 return 1;
3292
3293         if (mmu->root_level == PT32E_ROOT_LEVEL) {
3294                 for (i = 0; i < 4; ++i) {
3295                         pdptrs[i] = mmu->get_pdptr(vcpu, i);
3296                         if (!(pdptrs[i] & PT_PRESENT_MASK))
3297                                 continue;
3298
3299                         if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
3300                                 return 1;
3301                 }
3302         }
3303
3304         /*
3305          * Do we shadow a long mode page table? If so we need to
3306          * write-protect the guests page table root.
3307          */
3308         if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3309                 root = mmu_alloc_root(vcpu, root_gfn, 0,
3310                                       mmu->shadow_root_level, false);
3311                 mmu->root_hpa = root;
3312                 goto set_root_pgd;
3313         }
3314
3315         if (WARN_ON_ONCE(!mmu->pae_root))
3316                 return -EIO;
3317
3318         /*
3319          * We shadow a 32 bit page table. This may be a legacy 2-level
3320          * or a PAE 3-level page table. In either case we need to be aware that
3321          * the shadow page table may be a PAE or a long mode page table.
3322          */
3323         pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3324         if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3325                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3326
3327                 if (WARN_ON_ONCE(!mmu->lm_root))
3328                         return -EIO;
3329
3330                 mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask;
3331         }
3332
3333         for (i = 0; i < 4; ++i) {
3334                 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3335
3336                 if (mmu->root_level == PT32E_ROOT_LEVEL) {
3337                         if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3338                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3339                                 continue;
3340                         }
3341                         root_gfn = pdptrs[i] >> PAGE_SHIFT;
3342                 }
3343
3344                 root = mmu_alloc_root(vcpu, root_gfn, i << 30,
3345                                       PT32_ROOT_LEVEL, false);
3346                 mmu->pae_root[i] = root | pm_mask;
3347         }
3348
3349         if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3350                 mmu->root_hpa = __pa(mmu->lm_root);
3351         else
3352                 mmu->root_hpa = __pa(mmu->pae_root);
3353
3354 set_root_pgd:
3355         mmu->root_pgd = root_pgd;
3356
3357         return 0;
3358 }
3359
3360 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3361 {
3362         struct kvm_mmu *mmu = vcpu->arch.mmu;
3363         u64 *lm_root, *pae_root;
3364
3365         /*
3366          * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3367          * tables are allocated and initialized at root creation as there is no
3368          * equivalent level in the guest's NPT to shadow.  Allocate the tables
3369          * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3370          */
3371         if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3372             mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3373                 return 0;
3374
3375         /*
3376          * This mess only works with 4-level paging and needs to be updated to
3377          * work with 5-level paging.
3378          */
3379         if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
3380                 return -EIO;
3381
3382         if (mmu->pae_root && mmu->lm_root)
3383                 return 0;
3384
3385         /*
3386          * The special roots should always be allocated in concert.  Yell and
3387          * bail if KVM ends up in a state where only one of the roots is valid.
3388          */
3389         if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root))
3390                 return -EIO;
3391
3392         /*
3393          * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3394          * doesn't need to be decrypted.
3395          */
3396         pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3397         if (!pae_root)
3398                 return -ENOMEM;
3399
3400         lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3401         if (!lm_root) {
3402                 free_page((unsigned long)pae_root);
3403                 return -ENOMEM;
3404         }
3405
3406         mmu->pae_root = pae_root;
3407         mmu->lm_root = lm_root;
3408
3409         return 0;
3410 }
3411
3412 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3413 {
3414         int i;
3415         struct kvm_mmu_page *sp;
3416
3417         if (vcpu->arch.mmu->direct_map)
3418                 return;
3419
3420         if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3421                 return;
3422
3423         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3424
3425         if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3426                 hpa_t root = vcpu->arch.mmu->root_hpa;
3427                 sp = to_shadow_page(root);
3428
3429                 /*
3430                  * Even if another CPU was marking the SP as unsync-ed
3431                  * simultaneously, any guest page table changes are not
3432                  * guaranteed to be visible anyway until this VCPU issues a TLB
3433                  * flush strictly after those changes are made. We only need to
3434                  * ensure that the other CPU sets these flags before any actual
3435                  * changes to the page tables are made. The comments in
3436                  * mmu_need_write_protect() describe what could go wrong if this
3437                  * requirement isn't satisfied.
3438                  */
3439                 if (!smp_load_acquire(&sp->unsync) &&
3440                     !smp_load_acquire(&sp->unsync_children))
3441                         return;
3442
3443                 write_lock(&vcpu->kvm->mmu_lock);
3444                 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3445
3446                 mmu_sync_children(vcpu, sp);
3447
3448                 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3449                 write_unlock(&vcpu->kvm->mmu_lock);
3450                 return;
3451         }
3452
3453         write_lock(&vcpu->kvm->mmu_lock);
3454         kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3455
3456         for (i = 0; i < 4; ++i) {
3457                 hpa_t root = vcpu->arch.mmu->pae_root[i];
3458
3459                 if (IS_VALID_PAE_ROOT(root)) {
3460                         root &= PT64_BASE_ADDR_MASK;
3461                         sp = to_shadow_page(root);
3462                         mmu_sync_children(vcpu, sp);
3463                 }
3464         }
3465
3466         kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3467         write_unlock(&vcpu->kvm->mmu_lock);
3468 }
3469
3470 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3471                                   u32 access, struct x86_exception *exception)
3472 {
3473         if (exception)
3474                 exception->error_code = 0;
3475         return vaddr;
3476 }
3477
3478 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3479                                          u32 access,
3480                                          struct x86_exception *exception)
3481 {
3482         if (exception)
3483                 exception->error_code = 0;
3484         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3485 }
3486
3487 static bool
3488 __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3489 {
3490         int bit7 = (pte >> 7) & 1;
3491
3492         return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3493 }
3494
3495 static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3496 {
3497         return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3498 }
3499
3500 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3501 {
3502         /*
3503          * A nested guest cannot use the MMIO cache if it is using nested
3504          * page tables, because cr2 is a nGPA while the cache stores GPAs.
3505          */
3506         if (mmu_is_nested(vcpu))
3507                 return false;
3508
3509         if (direct)
3510                 return vcpu_match_mmio_gpa(vcpu, addr);
3511
3512         return vcpu_match_mmio_gva(vcpu, addr);
3513 }
3514
3515 /*
3516  * Return the level of the lowest level SPTE added to sptes.
3517  * That SPTE may be non-present.
3518  */
3519 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3520 {
3521         struct kvm_shadow_walk_iterator iterator;
3522         int leaf = -1;
3523         u64 spte;
3524
3525         walk_shadow_page_lockless_begin(vcpu);
3526
3527         for (shadow_walk_init(&iterator, vcpu, addr),
3528              *root_level = iterator.level;
3529              shadow_walk_okay(&iterator);
3530              __shadow_walk_next(&iterator, spte)) {
3531                 leaf = iterator.level;
3532                 spte = mmu_spte_get_lockless(iterator.sptep);
3533
3534                 sptes[leaf] = spte;
3535
3536                 if (!is_shadow_present_pte(spte))
3537                         break;
3538         }
3539
3540         walk_shadow_page_lockless_end(vcpu);
3541
3542         return leaf;
3543 }
3544
3545 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
3546 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3547 {
3548         u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3549         struct rsvd_bits_validate *rsvd_check;
3550         int root, leaf, level;
3551         bool reserved = false;
3552
3553         if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
3554                 *sptep = 0ull;
3555                 return reserved;
3556         }
3557
3558         if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3559                 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3560         else
3561                 leaf = get_walk(vcpu, addr, sptes, &root);
3562
3563         if (unlikely(leaf < 0)) {
3564                 *sptep = 0ull;
3565                 return reserved;
3566         }
3567
3568         *sptep = sptes[leaf];
3569
3570         /*
3571          * Skip reserved bits checks on the terminal leaf if it's not a valid
3572          * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
3573          * design, always have reserved bits set.  The purpose of the checks is
3574          * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
3575          */
3576         if (!is_shadow_present_pte(sptes[leaf]))
3577                 leaf++;
3578
3579         rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3580
3581         for (level = root; level >= leaf; level--)
3582                 /*
3583                  * Use a bitwise-OR instead of a logical-OR to aggregate the
3584                  * reserved bit and EPT's invalid memtype/XWR checks to avoid
3585                  * adding a Jcc in the loop.
3586                  */
3587                 reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
3588                             __is_rsvd_bits_set(rsvd_check, sptes[level], level);
3589
3590         if (reserved) {
3591                 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3592                        __func__, addr);
3593                 for (level = root; level >= leaf; level--)
3594                         pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3595                                sptes[level], level,
3596                                rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
3597         }
3598
3599         return reserved;
3600 }
3601
3602 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3603 {
3604         u64 spte;
3605         bool reserved;
3606
3607         if (mmio_info_in_cache(vcpu, addr, direct))
3608                 return RET_PF_EMULATE;
3609
3610         reserved = get_mmio_spte(vcpu, addr, &spte);
3611         if (WARN_ON(reserved))
3612                 return -EINVAL;
3613
3614         if (is_mmio_spte(spte)) {
3615                 gfn_t gfn = get_mmio_spte_gfn(spte);
3616                 unsigned int access = get_mmio_spte_access(spte);
3617
3618                 if (!check_mmio_spte(vcpu, spte))
3619                         return RET_PF_INVALID;
3620
3621                 if (direct)
3622                         addr = 0;
3623
3624                 trace_handle_mmio_page_fault(addr, gfn, access);
3625                 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3626                 return RET_PF_EMULATE;
3627         }
3628
3629         /*
3630          * If the page table is zapped by other cpus, let CPU fault again on
3631          * the address.
3632          */
3633         return RET_PF_RETRY;
3634 }
3635
3636 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3637                                          u32 error_code, gfn_t gfn)
3638 {
3639         if (unlikely(error_code & PFERR_RSVD_MASK))
3640                 return false;
3641
3642         if (!(error_code & PFERR_PRESENT_MASK) ||
3643               !(error_code & PFERR_WRITE_MASK))
3644                 return false;
3645
3646         /*
3647          * guest is writing the page which is write tracked which can
3648          * not be fixed by page fault handler.
3649          */
3650         if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3651                 return true;
3652
3653         return false;
3654 }
3655
3656 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3657 {
3658         struct kvm_shadow_walk_iterator iterator;
3659         u64 spte;
3660
3661         walk_shadow_page_lockless_begin(vcpu);
3662         for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3663                 clear_sp_write_flooding_count(iterator.sptep);
3664                 if (!is_shadow_present_pte(spte))
3665                         break;
3666         }
3667         walk_shadow_page_lockless_end(vcpu);
3668 }
3669
3670 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3671                                     gfn_t gfn)
3672 {
3673         struct kvm_arch_async_pf arch;
3674
3675         arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3676         arch.gfn = gfn;
3677         arch.direct_map = vcpu->arch.mmu->direct_map;
3678         arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3679
3680         return kvm_setup_async_pf(vcpu, cr2_or_gpa,
3681                                   kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3682 }
3683
3684 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3685                          gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
3686                          bool write, bool *writable)
3687 {
3688         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3689         bool async;
3690
3691         /*
3692          * Retry the page fault if the gfn hit a memslot that is being deleted
3693          * or moved.  This ensures any existing SPTEs for the old memslot will
3694          * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3695          */
3696         if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3697                 return true;
3698
3699         /* Don't expose private memslots to L2. */
3700         if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
3701                 *pfn = KVM_PFN_NOSLOT;
3702                 *writable = false;
3703                 return false;
3704         }
3705
3706         async = false;
3707         *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
3708                                     write, writable, hva);
3709         if (!async)
3710                 return false; /* *pfn has correct page already */
3711
3712         if (!prefault && kvm_can_do_async_pf(vcpu)) {
3713                 trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3714                 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3715                         trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3716                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3717                         return true;
3718                 } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3719                         return true;
3720         }
3721
3722         *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
3723                                     write, writable, hva);
3724         return false;
3725 }
3726
3727 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3728                              bool prefault, int max_level, bool is_tdp)
3729 {
3730         bool write = error_code & PFERR_WRITE_MASK;
3731         bool map_writable;
3732
3733         gfn_t gfn = gpa >> PAGE_SHIFT;
3734         unsigned long mmu_seq;
3735         kvm_pfn_t pfn;
3736         hva_t hva;
3737         int r;
3738
3739         if (page_fault_handle_page_track(vcpu, error_code, gfn))
3740                 return RET_PF_EMULATE;
3741
3742         if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
3743                 r = fast_page_fault(vcpu, gpa, error_code);
3744                 if (r != RET_PF_INVALID)
3745                         return r;
3746         }
3747
3748         r = mmu_topup_memory_caches(vcpu, false);
3749         if (r)
3750                 return r;
3751
3752         mmu_seq = vcpu->kvm->mmu_notifier_seq;
3753         smp_rmb();
3754
3755         if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
3756                          write, &map_writable))
3757                 return RET_PF_RETRY;
3758
3759         if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3760                 return r;
3761
3762         r = RET_PF_RETRY;
3763
3764         if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3765                 read_lock(&vcpu->kvm->mmu_lock);
3766         else
3767                 write_lock(&vcpu->kvm->mmu_lock);
3768
3769         if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
3770                 goto out_unlock;
3771         r = make_mmu_pages_available(vcpu);
3772         if (r)
3773                 goto out_unlock;
3774
3775         if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3776                 r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
3777                                     pfn, prefault);
3778         else
3779                 r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
3780                                  prefault, is_tdp);
3781
3782 out_unlock:
3783         if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3784                 read_unlock(&vcpu->kvm->mmu_lock);
3785         else
3786                 write_unlock(&vcpu->kvm->mmu_lock);
3787         kvm_release_pfn_clean(pfn);
3788         return r;
3789 }
3790
3791 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
3792                                 u32 error_code, bool prefault)
3793 {
3794         pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
3795
3796         /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
3797         return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
3798                                  PG_LEVEL_2M, false);
3799 }
3800
3801 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3802                                 u64 fault_address, char *insn, int insn_len)
3803 {
3804         int r = 1;
3805         u32 flags = vcpu->arch.apf.host_apf_flags;
3806
3807 #ifndef CONFIG_X86_64
3808         /* A 64-bit CR2 should be impossible on 32-bit KVM. */
3809         if (WARN_ON_ONCE(fault_address >> 32))
3810                 return -EFAULT;
3811 #endif
3812
3813         vcpu->arch.l1tf_flush_l1d = true;
3814         if (!flags) {
3815                 trace_kvm_page_fault(fault_address, error_code);
3816
3817                 if (kvm_event_needs_reinjection(vcpu))
3818                         kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3819                 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3820                                 insn_len);
3821         } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
3822                 vcpu->arch.apf.host_apf_flags = 0;
3823                 local_irq_disable();
3824                 kvm_async_pf_task_wait_schedule(fault_address);
3825                 local_irq_enable();
3826         } else {
3827                 WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
3828         }
3829
3830         return r;
3831 }
3832 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3833
3834 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3835                        bool prefault)
3836 {
3837         int max_level;
3838
3839         for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
3840              max_level > PG_LEVEL_4K;
3841              max_level--) {
3842                 int page_num = KVM_PAGES_PER_HPAGE(max_level);
3843                 gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
3844
3845                 if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
3846                         break;
3847         }
3848
3849         return direct_page_fault(vcpu, gpa, error_code, prefault,
3850                                  max_level, true);
3851 }
3852
3853 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
3854                                    struct kvm_mmu *context)
3855 {
3856         context->page_fault = nonpaging_page_fault;
3857         context->gva_to_gpa = nonpaging_gva_to_gpa;
3858         context->sync_page = nonpaging_sync_page;
3859         context->invlpg = NULL;
3860         context->root_level = 0;
3861         context->shadow_root_level = PT32E_ROOT_LEVEL;
3862         context->direct_map = true;
3863         context->nx = false;
3864 }
3865
3866 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
3867                                   union kvm_mmu_page_role role)
3868 {
3869         return (role.direct || pgd == root->pgd) &&
3870                VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
3871                role.word == to_shadow_page(root->hpa)->role.word;
3872 }
3873
3874 /*
3875  * Find out if a previously cached root matching the new pgd/role is available.
3876  * The current root is also inserted into the cache.
3877  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3878  * returned.
3879  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3880  * false is returned. This root should now be freed by the caller.
3881  */
3882 static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3883                                   union kvm_mmu_page_role new_role)
3884 {
3885         uint i;
3886         struct kvm_mmu_root_info root;
3887         struct kvm_mmu *mmu = vcpu->arch.mmu;
3888
3889         root.pgd = mmu->root_pgd;
3890         root.hpa = mmu->root_hpa;
3891
3892         if (is_root_usable(&root, new_pgd, new_role))
3893                 return true;
3894
3895         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3896                 swap(root, mmu->prev_roots[i]);
3897
3898                 if (is_root_usable(&root, new_pgd, new_role))
3899                         break;
3900         }
3901
3902         mmu->root_hpa = root.hpa;
3903         mmu->root_pgd = root.pgd;
3904
3905         return i < KVM_MMU_NUM_PREV_ROOTS;
3906 }
3907
3908 static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3909                             union kvm_mmu_page_role new_role)
3910 {
3911         struct kvm_mmu *mmu = vcpu->arch.mmu;
3912
3913         /*
3914          * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
3915          * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
3916          * later if necessary.
3917          */
3918         if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3919             mmu->root_level >= PT64_ROOT_4LEVEL)
3920                 return cached_root_available(vcpu, new_pgd, new_role);
3921
3922         return false;
3923 }
3924
3925 static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3926                               union kvm_mmu_page_role new_role,
3927                               bool skip_tlb_flush, bool skip_mmu_sync)
3928 {
3929         if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
3930                 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
3931                 return;
3932         }
3933
3934         /*
3935          * It's possible that the cached previous root page is obsolete because
3936          * of a change in the MMU generation number. However, changing the
3937          * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
3938          * free the root set here and allocate a new one.
3939          */
3940         kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
3941
3942         if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
3943                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
3944         if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
3945                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3946
3947         /*
3948          * The last MMIO access's GVA and GPA are cached in the VCPU. When
3949          * switching to a new CR3, that GVA->GPA mapping may no longer be
3950          * valid. So clear any cached MMIO info even when we don't need to sync
3951          * the shadow page tables.
3952          */
3953         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3954
3955         /*
3956          * If this is a direct root page, it doesn't have a write flooding
3957          * count. Otherwise, clear the write flooding count.
3958          */
3959         if (!new_role.direct)
3960                 __clear_sp_write_flooding_count(
3961                                 to_shadow_page(vcpu->arch.mmu->root_hpa));
3962 }
3963
3964 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
3965                      bool skip_mmu_sync)
3966 {
3967         __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
3968                           skip_tlb_flush, skip_mmu_sync);
3969 }
3970 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
3971
3972 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
3973 {
3974         return kvm_read_cr3(vcpu);
3975 }
3976
3977 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
3978                            unsigned int access, int *nr_present)
3979 {
3980         if (unlikely(is_mmio_spte(*sptep))) {
3981                 if (gfn != get_mmio_spte_gfn(*sptep)) {
3982                         mmu_spte_clear_no_track(sptep);
3983                         return true;
3984                 }
3985
3986                 (*nr_present)++;
3987                 mark_mmio_spte(vcpu, sptep, gfn, access);
3988                 return true;
3989         }
3990
3991         return false;
3992 }
3993
3994 static inline bool is_last_gpte(struct kvm_mmu *mmu,
3995                                 unsigned level, unsigned gpte)
3996 {
3997         /*
3998          * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3999          * If it is clear, there are no large pages at this level, so clear
4000          * PT_PAGE_SIZE_MASK in gpte if that is the case.
4001          */
4002         gpte &= level - mmu->last_nonleaf_level;
4003
4004         /*
4005          * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
4006          * iff level <= PG_LEVEL_4K, which for our purpose means
4007          * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
4008          */
4009         gpte |= level - PG_LEVEL_4K - 1;
4010
4011         return gpte & PT_PAGE_SIZE_MASK;
4012 }
4013
4014 #define PTTYPE_EPT 18 /* arbitrary */
4015 #define PTTYPE PTTYPE_EPT
4016 #include "paging_tmpl.h"
4017 #undef PTTYPE
4018
4019 #define PTTYPE 64
4020 #include "paging_tmpl.h"
4021 #undef PTTYPE
4022
4023 #define PTTYPE 32
4024 #include "paging_tmpl.h"
4025 #undef PTTYPE
4026
4027 static void
4028 __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4029                         struct rsvd_bits_validate *rsvd_check,
4030                         u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4031                         bool pse, bool amd)
4032 {
4033         u64 gbpages_bit_rsvd = 0;
4034         u64 nonleaf_bit8_rsvd = 0;
4035         u64 high_bits_rsvd;
4036
4037         rsvd_check->bad_mt_xwr = 0;
4038
4039         if (!gbpages)
4040                 gbpages_bit_rsvd = rsvd_bits(7, 7);
4041
4042         if (level == PT32E_ROOT_LEVEL)
4043                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4044         else
4045                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4046
4047         /* Note, NX doesn't exist in PDPTEs, this is handled below. */
4048         if (!nx)
4049                 high_bits_rsvd |= rsvd_bits(63, 63);
4050
4051         /*
4052          * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4053          * leaf entries) on AMD CPUs only.
4054          */
4055         if (amd)
4056                 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4057
4058         switch (level) {
4059         case PT32_ROOT_LEVEL:
4060                 /* no rsvd bits for 2 level 4K page table entries */
4061                 rsvd_check->rsvd_bits_mask[0][1] = 0;
4062                 rsvd_check->rsvd_bits_mask[0][0] = 0;
4063                 rsvd_check->rsvd_bits_mask[1][0] =
4064                         rsvd_check->rsvd_bits_mask[0][0];
4065
4066                 if (!pse) {
4067                         rsvd_check->rsvd_bits_mask[1][1] = 0;
4068                         break;
4069                 }
4070
4071                 if (is_cpuid_PSE36())
4072                         /* 36bits PSE 4MB page */
4073                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4074                 else
4075                         /* 32 bits PSE 4MB page */
4076                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4077                 break;
4078         case PT32E_ROOT_LEVEL:
4079                 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
4080                                                    high_bits_rsvd |
4081                                                    rsvd_bits(5, 8) |
4082                                                    rsvd_bits(1, 2);     /* PDPTE */
4083                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;      /* PDE */
4084                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;      /* PTE */
4085                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4086                                                    rsvd_bits(13, 20);   /* large page */
4087                 rsvd_check->rsvd_bits_mask[1][0] =
4088                         rsvd_check->rsvd_bits_mask[0][0];
4089                 break;
4090         case PT64_ROOT_5LEVEL:
4091                 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
4092                                                    nonleaf_bit8_rsvd |
4093                                                    rsvd_bits(7, 7);
4094                 rsvd_check->rsvd_bits_mask[1][4] =
4095                         rsvd_check->rsvd_bits_mask[0][4];
4096                 fallthrough;
4097         case PT64_ROOT_4LEVEL:
4098                 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
4099                                                    nonleaf_bit8_rsvd |
4100                                                    rsvd_bits(7, 7);
4101                 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
4102                                                    gbpages_bit_rsvd;
4103                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4104                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4105                 rsvd_check->rsvd_bits_mask[1][3] =
4106                         rsvd_check->rsvd_bits_mask[0][3];
4107                 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
4108                                                    gbpages_bit_rsvd |
4109                                                    rsvd_bits(13, 29);
4110                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4111                                                    rsvd_bits(13, 20); /* large page */
4112                 rsvd_check->rsvd_bits_mask[1][0] =
4113                         rsvd_check->rsvd_bits_mask[0][0];
4114                 break;
4115         }
4116 }
4117
4118 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4119                                   struct kvm_mmu *context)
4120 {
4121         __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
4122                                 vcpu->arch.reserved_gpa_bits,
4123                                 context->root_level, context->nx,
4124                                 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4125                                 is_pse(vcpu),
4126                                 guest_cpuid_is_amd_or_hygon(vcpu));
4127 }
4128
4129 static void
4130 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4131                             u64 pa_bits_rsvd, bool execonly)
4132 {
4133         u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4134         u64 bad_mt_xwr;
4135
4136         rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
4137         rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
4138         rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
4139         rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
4140         rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4141
4142         /* large page */
4143         rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4144         rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4145         rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
4146         rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4147         rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4148
4149         bad_mt_xwr = 0xFFull << (2 * 8);        /* bits 3..5 must not be 2 */
4150         bad_mt_xwr |= 0xFFull << (3 * 8);       /* bits 3..5 must not be 3 */
4151         bad_mt_xwr |= 0xFFull << (7 * 8);       /* bits 3..5 must not be 7 */
4152         bad_mt_xwr |= REPEAT_BYTE(1ull << 2);   /* bits 0..2 must not be 010 */
4153         bad_mt_xwr |= REPEAT_BYTE(1ull << 6);   /* bits 0..2 must not be 110 */
4154         if (!execonly) {
4155                 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
4156                 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4157         }
4158         rsvd_check->bad_mt_xwr = bad_mt_xwr;
4159 }
4160
4161 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4162                 struct kvm_mmu *context, bool execonly)
4163 {
4164         __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4165                                     vcpu->arch.reserved_gpa_bits, execonly);
4166 }
4167
4168 static inline u64 reserved_hpa_bits(void)
4169 {
4170         return rsvd_bits(shadow_phys_bits, 63);
4171 }
4172
4173 /*
4174  * the page table on host is the shadow page table for the page
4175  * table in guest or amd nested guest, its mmu features completely
4176  * follow the features in guest.
4177  */
4178 void
4179 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
4180 {
4181         bool uses_nx = context->nx ||
4182                 context->mmu_role.base.smep_andnot_wp;
4183         struct rsvd_bits_validate *shadow_zero_check;
4184         int i;
4185
4186         /*
4187          * Passing "true" to the last argument is okay; it adds a check
4188          * on bit 8 of the SPTEs which KVM doesn't use anyway.
4189          */
4190         shadow_zero_check = &context->shadow_zero_check;
4191         __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4192                                 reserved_hpa_bits(),
4193                                 context->shadow_root_level, uses_nx,
4194                                 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4195                                 is_pse(vcpu), true);
4196
4197         if (!shadow_me_mask)
4198                 return;
4199
4200         for (i = context->shadow_root_level; --i >= 0;) {
4201                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4202                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4203         }
4204
4205 }
4206 EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
4207
4208 static inline bool boot_cpu_is_amd(void)
4209 {
4210         WARN_ON_ONCE(!tdp_enabled);
4211         return shadow_x_mask == 0;
4212 }
4213
4214 /*
4215  * the direct page table on host, use as much mmu features as
4216  * possible, however, kvm currently does not do execution-protection.
4217  */
4218 static void
4219 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4220                                 struct kvm_mmu *context)
4221 {
4222         struct rsvd_bits_validate *shadow_zero_check;
4223         int i;
4224
4225         shadow_zero_check = &context->shadow_zero_check;
4226
4227         if (boot_cpu_is_amd())
4228                 __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4229                                         reserved_hpa_bits(),
4230                                         context->shadow_root_level, false,
4231                                         boot_cpu_has(X86_FEATURE_GBPAGES),
4232                                         true, true);
4233         else
4234                 __reset_rsvds_bits_mask_ept(shadow_zero_check,
4235                                             reserved_hpa_bits(), false);
4236
4237         if (!shadow_me_mask)
4238                 return;
4239
4240         for (i = context->shadow_root_level; --i >= 0;) {
4241                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4242                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4243         }
4244 }
4245
4246 /*
4247  * as the comments in reset_shadow_zero_bits_mask() except it
4248  * is the shadow page table for intel nested guest.
4249  */
4250 static void
4251 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4252                                 struct kvm_mmu *context, bool execonly)
4253 {
4254         __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4255                                     reserved_hpa_bits(), execonly);
4256 }
4257
4258 #define BYTE_MASK(access) \
4259         ((1 & (access) ? 2 : 0) | \
4260          (2 & (access) ? 4 : 0) | \
4261          (3 & (access) ? 8 : 0) | \
4262          (4 & (access) ? 16 : 0) | \
4263          (5 & (access) ? 32 : 0) | \
4264          (6 & (access) ? 64 : 0) | \
4265          (7 & (access) ? 128 : 0))
4266
4267
4268 static void update_permission_bitmask(struct kvm_vcpu *vcpu,
4269                                       struct kvm_mmu *mmu, bool ept)
4270 {
4271         unsigned byte;
4272
4273         const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4274         const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4275         const u8 u = BYTE_MASK(ACC_USER_MASK);
4276
4277         bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
4278         bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
4279         bool cr0_wp = is_write_protection(vcpu);
4280
4281         for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4282                 unsigned pfec = byte << 1;
4283
4284                 /*
4285                  * Each "*f" variable has a 1 bit for each UWX value
4286                  * that causes a fault with the given PFEC.
4287                  */
4288
4289                 /* Faults from writes to non-writable pages */
4290                 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4291                 /* Faults from user mode accesses to supervisor pages */
4292                 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4293                 /* Faults from fetches of non-executable pages*/
4294                 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4295                 /* Faults from kernel mode fetches of user pages */
4296                 u8 smepf = 0;
4297                 /* Faults from kernel mode accesses of user pages */
4298                 u8 smapf = 0;
4299
4300                 if (!ept) {
4301                         /* Faults from kernel mode accesses to user pages */
4302                         u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4303
4304                         /* Not really needed: !nx will cause pte.nx to fault */
4305                         if (!mmu->nx)
4306                                 ff = 0;
4307
4308                         /* Allow supervisor writes if !cr0.wp */
4309                         if (!cr0_wp)
4310                                 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4311
4312                         /* Disallow supervisor fetches of user code if cr4.smep */
4313                         if (cr4_smep)
4314                                 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4315
4316                         /*
4317                          * SMAP:kernel-mode data accesses from user-mode
4318                          * mappings should fault. A fault is considered
4319                          * as a SMAP violation if all of the following
4320                          * conditions are true:
4321                          *   - X86_CR4_SMAP is set in CR4
4322                          *   - A user page is accessed
4323                          *   - The access is not a fetch
4324                          *   - Page fault in kernel mode
4325                          *   - if CPL = 3 or X86_EFLAGS_AC is clear
4326                          *
4327                          * Here, we cover the first three conditions.
4328                          * The fourth is computed dynamically in permission_fault();
4329                          * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4330                          * *not* subject to SMAP restrictions.
4331                          */
4332                         if (cr4_smap)
4333                                 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4334                 }
4335
4336                 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4337         }
4338 }
4339
4340 /*
4341 * PKU is an additional mechanism by which the paging controls access to
4342 * user-mode addresses based on the value in the PKRU register.  Protection
4343 * key violations are reported through a bit in the page fault error code.
4344 * Unlike other bits of the error code, the PK bit is not known at the
4345 * call site of e.g. gva_to_gpa; it must be computed directly in
4346 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4347 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4348 *
4349 * In particular the following conditions come from the error code, the
4350 * page tables and the machine state:
4351 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4352 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4353 * - PK is always zero if U=0 in the page tables
4354 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4355 *
4356 * The PKRU bitmask caches the result of these four conditions.  The error
4357 * code (minus the P bit) and the page table's U bit form an index into the
4358 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4359 * with the two bits of the PKRU register corresponding to the protection key.
4360 * For the first three conditions above the bits will be 00, thus masking
4361 * away both AD and WD.  For all reads or if the last condition holds, WD
4362 * only will be masked away.
4363 */
4364 static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4365                                 bool ept)
4366 {
4367         unsigned bit;
4368         bool wp;
4369
4370         if (ept) {
4371                 mmu->pkru_mask = 0;
4372                 return;
4373         }
4374
4375         /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
4376         if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
4377                 mmu->pkru_mask = 0;
4378                 return;
4379         }
4380
4381         wp = is_write_protection(vcpu);
4382
4383         for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4384                 unsigned pfec, pkey_bits;
4385                 bool check_pkey, check_write, ff, uf, wf, pte_user;
4386
4387                 pfec = bit << 1;
4388                 ff = pfec & PFERR_FETCH_MASK;
4389                 uf = pfec & PFERR_USER_MASK;
4390                 wf = pfec & PFERR_WRITE_MASK;
4391
4392                 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
4393                 pte_user = pfec & PFERR_RSVD_MASK;
4394
4395                 /*
4396                  * Only need to check the access which is not an
4397                  * instruction fetch and is to a user page.
4398                  */
4399                 check_pkey = (!ff && pte_user);
4400                 /*
4401                  * write access is controlled by PKRU if it is a
4402                  * user access or CR0.WP = 1.
4403                  */
4404                 check_write = check_pkey && wf && (uf || wp);
4405
4406                 /* PKRU.AD stops both read and write access. */
4407                 pkey_bits = !!check_pkey;
4408                 /* PKRU.WD stops write access. */
4409                 pkey_bits |= (!!check_write) << 1;
4410
4411                 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4412         }
4413 }
4414
4415 static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
4416 {
4417         unsigned root_level = mmu->root_level;
4418
4419         mmu->last_nonleaf_level = root_level;
4420         if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
4421                 mmu->last_nonleaf_level++;
4422 }
4423
4424 static void paging64_init_context_common(struct kvm_vcpu *vcpu,
4425                                          struct kvm_mmu *context,
4426                                          int level)
4427 {
4428         context->nx = is_nx(vcpu);
4429         context->root_level = level;
4430
4431         reset_rsvds_bits_mask(vcpu, context);
4432         update_permission_bitmask(vcpu, context, false);
4433         update_pkru_bitmask(vcpu, context, false);
4434         update_last_nonleaf_level(vcpu, context);
4435
4436         MMU_WARN_ON(!is_pae(vcpu));
4437         context->page_fault = paging64_page_fault;
4438         context->gva_to_gpa = paging64_gva_to_gpa;
4439         context->sync_page = paging64_sync_page;
4440         context->invlpg = paging64_invlpg;
4441         context->shadow_root_level = level;
4442         context->direct_map = false;
4443 }
4444
4445 static void paging64_init_context(struct kvm_vcpu *vcpu,
4446                                   struct kvm_mmu *context)
4447 {
4448         int root_level = is_la57_mode(vcpu) ?
4449                          PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4450
4451         paging64_init_context_common(vcpu, context, root_level);
4452 }
4453
4454 static void paging32_init_context(struct kvm_vcpu *vcpu,
4455                                   struct kvm_mmu *context)
4456 {
4457         context->nx = false;
4458         context->root_level = PT32_ROOT_LEVEL;
4459
4460         reset_rsvds_bits_mask(vcpu, context);
4461         update_permission_bitmask(vcpu, context, false);
4462         update_pkru_bitmask(vcpu, context, false);
4463         update_last_nonleaf_level(vcpu, context);
4464
4465         context->page_fault = paging32_page_fault;
4466         context->gva_to_gpa = paging32_gva_to_gpa;
4467         context->sync_page = paging32_sync_page;
4468         context->invlpg = paging32_invlpg;
4469         context->shadow_root_level = PT32E_ROOT_LEVEL;
4470         context->direct_map = false;
4471 }
4472
4473 static void paging32E_init_context(struct kvm_vcpu *vcpu,
4474                                    struct kvm_mmu *context)
4475 {
4476         paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
4477 }
4478
4479 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4480 {
4481         union kvm_mmu_extended_role ext = {0};
4482
4483         ext.cr0_pg = !!is_paging(vcpu);
4484         ext.cr4_pae = !!is_pae(vcpu);
4485         ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4486         ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4487         ext.cr4_pse = !!is_pse(vcpu);
4488         ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4489         ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4490
4491         ext.valid = 1;
4492
4493         return ext;
4494 }
4495
4496 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4497                                                    bool base_only)
4498 {
4499         union kvm_mmu_role role = {0};
4500
4501         role.base.access = ACC_ALL;
4502         role.base.nxe = !!is_nx(vcpu);
4503         role.base.cr0_wp = is_write_protection(vcpu);
4504         role.base.smm = is_smm(vcpu);
4505         role.base.guest_mode = is_guest_mode(vcpu);
4506
4507         if (base_only)
4508                 return role;
4509
4510         role.ext = kvm_calc_mmu_role_ext(vcpu);
4511
4512         return role;
4513 }
4514
4515 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4516 {
4517         /* Use 5-level TDP if and only if it's useful/necessary. */
4518         if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4519                 return 4;
4520
4521         return max_tdp_level;
4522 }
4523
4524 static union kvm_mmu_role
4525 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4526 {
4527         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4528
4529         role.base.ad_disabled = (shadow_accessed_mask == 0);
4530         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4531         role.base.direct = true;
4532         role.base.gpte_is_8_bytes = true;
4533
4534         return role;
4535 }
4536
4537 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4538 {
4539         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4540         union kvm_mmu_role new_role =
4541                 kvm_calc_tdp_mmu_root_page_role(vcpu, false);
4542
4543         if (new_role.as_u64 == context->mmu_role.as_u64)
4544                 return;
4545
4546         context->mmu_role.as_u64 = new_role.as_u64;
4547         context->page_fault = kvm_tdp_page_fault;
4548         context->sync_page = nonpaging_sync_page;
4549         context->invlpg = NULL;
4550         context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4551         context->direct_map = true;
4552         context->get_guest_pgd = get_cr3;
4553         context->get_pdptr = kvm_pdptr_read;
4554         context->inject_page_fault = kvm_inject_page_fault;
4555
4556         if (!is_paging(vcpu)) {
4557                 context->nx = false;
4558                 context->gva_to_gpa = nonpaging_gva_to_gpa;
4559                 context->root_level = 0;
4560         } else if (is_long_mode(vcpu)) {
4561                 context->nx = is_nx(vcpu);
4562                 context->root_level = is_la57_mode(vcpu) ?
4563                                 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4564                 reset_rsvds_bits_mask(vcpu, context);
4565                 context->gva_to_gpa = paging64_gva_to_gpa;
4566         } else if (is_pae(vcpu)) {
4567                 context->nx = is_nx(vcpu);
4568                 context->root_level = PT32E_ROOT_LEVEL;
4569                 reset_rsvds_bits_mask(vcpu, context);
4570                 context->gva_to_gpa = paging64_gva_to_gpa;
4571         } else {
4572                 context->nx = false;
4573                 context->root_level = PT32_ROOT_LEVEL;
4574                 reset_rsvds_bits_mask(vcpu, context);
4575                 context->gva_to_gpa = paging32_gva_to_gpa;
4576         }
4577
4578         update_permission_bitmask(vcpu, context, false);
4579         update_pkru_bitmask(vcpu, context, false);
4580         update_last_nonleaf_level(vcpu, context);
4581         reset_tdp_shadow_zero_bits_mask(vcpu, context);
4582 }
4583
4584 static union kvm_mmu_role
4585 kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
4586 {
4587         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4588
4589         role.base.smep_andnot_wp = role.ext.cr4_smep &&
4590                 !is_write_protection(vcpu);
4591         role.base.smap_andnot_wp = role.ext.cr4_smap &&
4592                 !is_write_protection(vcpu);
4593         role.base.gpte_is_8_bytes = !!is_pae(vcpu);
4594
4595         return role;
4596 }
4597
4598 static union kvm_mmu_role
4599 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4600 {
4601         union kvm_mmu_role role =
4602                 kvm_calc_shadow_root_page_role_common(vcpu, base_only);
4603
4604         role.base.direct = !is_paging(vcpu);
4605
4606         if (!is_long_mode(vcpu))
4607                 role.base.level = PT32E_ROOT_LEVEL;
4608         else if (is_la57_mode(vcpu))
4609                 role.base.level = PT64_ROOT_5LEVEL;
4610         else
4611                 role.base.level = PT64_ROOT_4LEVEL;
4612
4613         return role;
4614 }
4615
4616 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4617                                     u32 cr0, u32 cr4, u32 efer,
4618                                     union kvm_mmu_role new_role)
4619 {
4620         if (!(cr0 & X86_CR0_PG))
4621                 nonpaging_init_context(vcpu, context);
4622         else if (efer & EFER_LMA)
4623                 paging64_init_context(vcpu, context);
4624         else if (cr4 & X86_CR4_PAE)
4625                 paging32E_init_context(vcpu, context);
4626         else
4627                 paging32_init_context(vcpu, context);
4628
4629         context->mmu_role.as_u64 = new_role.as_u64;
4630         reset_shadow_zero_bits_mask(vcpu, context);
4631 }
4632
4633 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
4634 {
4635         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4636         union kvm_mmu_role new_role =
4637                 kvm_calc_shadow_mmu_root_page_role(vcpu, false);
4638
4639         if (new_role.as_u64 != context->mmu_role.as_u64)
4640                 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
4641 }
4642
4643 static union kvm_mmu_role
4644 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
4645 {
4646         union kvm_mmu_role role =
4647                 kvm_calc_shadow_root_page_role_common(vcpu, false);
4648
4649         role.base.direct = false;
4650         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4651
4652         return role;
4653 }
4654
4655 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
4656                              gpa_t nested_cr3)
4657 {
4658         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4659         union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
4660
4661         __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
4662
4663         if (new_role.as_u64 != context->mmu_role.as_u64) {
4664                 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
4665
4666                 /*
4667                  * Override the level set by the common init helper, nested TDP
4668                  * always uses the host's TDP configuration.
4669                  */
4670                 context->shadow_root_level = new_role.base.level;
4671         }
4672 }
4673 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4674
4675 static union kvm_mmu_role
4676 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4677                                    bool execonly, u8 level)
4678 {
4679         union kvm_mmu_role role = {0};
4680
4681         /* SMM flag is inherited from root_mmu */
4682         role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4683
4684         role.base.level = level;
4685         role.base.gpte_is_8_bytes = true;
4686         role.base.direct = false;
4687         role.base.ad_disabled = !accessed_dirty;
4688         role.base.guest_mode = true;
4689         role.base.access = ACC_ALL;
4690
4691         /*
4692          * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
4693          * SMAP variation to denote shadow EPT entries.
4694          */
4695         role.base.cr0_wp = true;
4696         role.base.smap_andnot_wp = true;
4697
4698         role.ext = kvm_calc_mmu_role_ext(vcpu);
4699         role.ext.execonly = execonly;
4700
4701         return role;
4702 }
4703
4704 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4705                              bool accessed_dirty, gpa_t new_eptp)
4706 {
4707         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4708         u8 level = vmx_eptp_page_walk_level(new_eptp);
4709         union kvm_mmu_role new_role =
4710                 kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4711                                                    execonly, level);
4712
4713         __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
4714
4715         if (new_role.as_u64 == context->mmu_role.as_u64)
4716                 return;
4717
4718         context->shadow_root_level = level;
4719
4720         context->nx = true;
4721         context->ept_ad = accessed_dirty;
4722         context->page_fault = ept_page_fault;
4723         context->gva_to_gpa = ept_gva_to_gpa;
4724         context->sync_page = ept_sync_page;
4725         context->invlpg = ept_invlpg;
4726         context->root_level = level;
4727         context->direct_map = false;
4728         context->mmu_role.as_u64 = new_role.as_u64;
4729
4730         update_permission_bitmask(vcpu, context, true);
4731         update_pkru_bitmask(vcpu, context, true);
4732         update_last_nonleaf_level(vcpu, context);
4733         reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4734         reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4735 }
4736 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4737
4738 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4739 {
4740         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4741
4742         kvm_init_shadow_mmu(vcpu,
4743                             kvm_read_cr0_bits(vcpu, X86_CR0_PG),
4744                             kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
4745                             vcpu->arch.efer);
4746
4747         context->get_guest_pgd     = get_cr3;
4748         context->get_pdptr         = kvm_pdptr_read;
4749         context->inject_page_fault = kvm_inject_page_fault;
4750 }
4751
4752 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4753 {
4754         union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
4755         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4756
4757         if (new_role.as_u64 == g_context->mmu_role.as_u64)
4758                 return;
4759
4760         g_context->mmu_role.as_u64 = new_role.as_u64;
4761         g_context->get_guest_pgd     = get_cr3;
4762         g_context->get_pdptr         = kvm_pdptr_read;
4763         g_context->inject_page_fault = kvm_inject_page_fault;
4764
4765         /*
4766          * L2 page tables are never shadowed, so there is no need to sync
4767          * SPTEs.
4768          */
4769         g_context->invlpg            = NULL;
4770
4771         /*
4772          * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4773          * L1's nested page tables (e.g. EPT12). The nested translation
4774          * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4775          * L2's page tables as the first level of translation and L1's
4776          * nested page tables as the second level of translation. Basically
4777          * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4778          */
4779         if (!is_paging(vcpu)) {
4780                 g_context->nx = false;
4781                 g_context->root_level = 0;
4782                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4783         } else if (is_long_mode(vcpu)) {
4784                 g_context->nx = is_nx(vcpu);
4785                 g_context->root_level = is_la57_mode(vcpu) ?
4786                                         PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4787                 reset_rsvds_bits_mask(vcpu, g_context);
4788                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4789         } else if (is_pae(vcpu)) {
4790                 g_context->nx = is_nx(vcpu);
4791                 g_context->root_level = PT32E_ROOT_LEVEL;
4792                 reset_rsvds_bits_mask(vcpu, g_context);
4793                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4794         } else {
4795                 g_context->nx = false;
4796                 g_context->root_level = PT32_ROOT_LEVEL;
4797                 reset_rsvds_bits_mask(vcpu, g_context);
4798                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4799         }
4800
4801         update_permission_bitmask(vcpu, g_context, false);
4802         update_pkru_bitmask(vcpu, g_context, false);
4803         update_last_nonleaf_level(vcpu, g_context);
4804 }
4805
4806 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
4807 {
4808         if (reset_roots) {
4809                 uint i;
4810
4811                 vcpu->arch.mmu->root_hpa = INVALID_PAGE;
4812
4813                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4814                         vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
4815         }
4816
4817         if (mmu_is_nested(vcpu))
4818                 init_kvm_nested_mmu(vcpu);
4819         else if (tdp_enabled)
4820                 init_kvm_tdp_mmu(vcpu);
4821         else
4822                 init_kvm_softmmu(vcpu);
4823 }
4824 EXPORT_SYMBOL_GPL(kvm_init_mmu);
4825
4826 static union kvm_mmu_page_role
4827 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4828 {
4829         union kvm_mmu_role role;
4830
4831         if (tdp_enabled)
4832                 role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
4833         else
4834                 role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
4835
4836         return role.base;
4837 }
4838
4839 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4840 {
4841         kvm_mmu_unload(vcpu);
4842         kvm_init_mmu(vcpu, true);
4843 }
4844 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4845
4846 int kvm_mmu_load(struct kvm_vcpu *vcpu)
4847 {
4848         int r;
4849
4850         r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
4851         if (r)
4852                 goto out;
4853         r = mmu_alloc_special_roots(vcpu);
4854         if (r)
4855                 goto out;
4856         write_lock(&vcpu->kvm->mmu_lock);
4857         if (make_mmu_pages_available(vcpu))
4858                 r = -ENOSPC;
4859         else if (vcpu->arch.mmu->direct_map)
4860                 r = mmu_alloc_direct_roots(vcpu);
4861         else
4862                 r = mmu_alloc_shadow_roots(vcpu);
4863         write_unlock(&vcpu->kvm->mmu_lock);
4864         if (r)
4865                 goto out;
4866
4867         kvm_mmu_sync_roots(vcpu);
4868
4869         kvm_mmu_load_pgd(vcpu);
4870         static_call(kvm_x86_tlb_flush_current)(vcpu);
4871 out:
4872         return r;
4873 }
4874
4875 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
4876 {
4877         kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
4878         WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
4879         kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4880         WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
4881 }
4882
4883 static bool need_remote_flush(u64 old, u64 new)
4884 {
4885         if (!is_shadow_present_pte(old))
4886                 return false;
4887         if (!is_shadow_present_pte(new))
4888                 return true;
4889         if ((old ^ new) & PT64_BASE_ADDR_MASK)
4890                 return true;
4891         old ^= shadow_nx_mask;
4892         new ^= shadow_nx_mask;
4893         return (old & ~new & PT64_PERM_MASK) != 0;
4894 }
4895
4896 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
4897                                     int *bytes)
4898 {
4899         u64 gentry = 0;
4900         int r;
4901
4902         /*
4903          * Assume that the pte write on a page table of the same type
4904          * as the current vcpu paging mode since we update the sptes only
4905          * when they have the same mode.
4906          */
4907         if (is_pae(vcpu) && *bytes == 4) {
4908                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
4909                 *gpa &= ~(gpa_t)7;
4910                 *bytes = 8;
4911         }
4912
4913         if (*bytes == 4 || *bytes == 8) {
4914                 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
4915                 if (r)
4916                         gentry = 0;
4917         }
4918
4919         return gentry;
4920 }
4921
4922 /*
4923  * If we're seeing too many writes to a page, it may no longer be a page table,
4924  * or we may be forking, in which case it is better to unmap the page.
4925  */
4926 static bool detect_write_flooding(struct kvm_mmu_page *sp)
4927 {
4928         /*
4929          * Skip write-flooding detected for the sp whose level is 1, because
4930          * it can become unsync, then the guest page is not write-protected.
4931          */
4932         if (sp->role.level == PG_LEVEL_4K)
4933                 return false;
4934
4935         atomic_inc(&sp->write_flooding_count);
4936         return atomic_read(&sp->write_flooding_count) >= 3;
4937 }
4938
4939 /*
4940  * Misaligned accesses are too much trouble to fix up; also, they usually
4941  * indicate a page is not used as a page table.
4942  */
4943 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
4944                                     int bytes)
4945 {
4946         unsigned offset, pte_size, misaligned;
4947
4948         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4949                  gpa, bytes, sp->role.word);
4950
4951         offset = offset_in_page(gpa);
4952         pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
4953
4954         /*
4955          * Sometimes, the OS only writes the last one bytes to update status
4956          * bits, for example, in linux, andb instruction is used in clear_bit().
4957          */
4958         if (!(offset & (pte_size - 1)) && bytes == 1)
4959                 return false;
4960
4961         misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
4962         misaligned |= bytes < 4;
4963
4964         return misaligned;
4965 }
4966
4967 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
4968 {
4969         unsigned page_offset, quadrant;
4970         u64 *spte;
4971         int level;
4972
4973         page_offset = offset_in_page(gpa);
4974         level = sp->role.level;
4975         *nspte = 1;
4976         if (!sp->role.gpte_is_8_bytes) {
4977                 page_offset <<= 1;      /* 32->64 */
4978                 /*
4979                  * A 32-bit pde maps 4MB while the shadow pdes map
4980                  * only 2MB.  So we need to double the offset again
4981                  * and zap two pdes instead of one.
4982                  */
4983                 if (level == PT32_ROOT_LEVEL) {
4984                         page_offset &= ~7; /* kill rounding error */
4985                         page_offset <<= 1;
4986                         *nspte = 2;
4987                 }
4988                 quadrant = page_offset >> PAGE_SHIFT;
4989                 page_offset &= ~PAGE_MASK;
4990                 if (quadrant != sp->role.quadrant)
4991                         return NULL;
4992         }
4993
4994         spte = &sp->spt[page_offset / sizeof(*spte)];
4995         return spte;
4996 }
4997
4998 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4999                               const u8 *new, int bytes,
5000                               struct kvm_page_track_notifier_node *node)
5001 {
5002         gfn_t gfn = gpa >> PAGE_SHIFT;
5003         struct kvm_mmu_page *sp;
5004         LIST_HEAD(invalid_list);
5005         u64 entry, gentry, *spte;
5006         int npte;
5007         bool remote_flush, local_flush;
5008
5009         /*
5010          * If we don't have indirect shadow pages, it means no page is
5011          * write-protected, so we can exit simply.
5012          */
5013         if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5014                 return;
5015
5016         remote_flush = local_flush = false;
5017
5018         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5019
5020         /*
5021          * No need to care whether allocation memory is successful
5022          * or not since pte prefetch is skiped if it does not have
5023          * enough objects in the cache.
5024          */
5025         mmu_topup_memory_caches(vcpu, true);
5026
5027         write_lock(&vcpu->kvm->mmu_lock);
5028
5029         gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5030
5031         ++vcpu->kvm->stat.mmu_pte_write;
5032         kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5033
5034         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5035                 if (detect_write_misaligned(sp, gpa, bytes) ||
5036                       detect_write_flooding(sp)) {
5037                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5038                         ++vcpu->kvm->stat.mmu_flooded;
5039                         continue;
5040                 }
5041
5042                 spte = get_written_sptes(sp, gpa, &npte);
5043                 if (!spte)
5044                         continue;
5045
5046                 local_flush = true;
5047                 while (npte--) {
5048                         entry = *spte;
5049                         mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5050                         if (gentry && sp->role.level != PG_LEVEL_4K)
5051                                 ++vcpu->kvm->stat.mmu_pde_zapped;
5052                         if (need_remote_flush(entry, *spte))
5053                                 remote_flush = true;
5054                         ++spte;
5055                 }
5056         }
5057         kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5058         kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5059         write_unlock(&vcpu->kvm->mmu_lock);
5060 }
5061
5062 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5063                        void *insn, int insn_len)
5064 {
5065         int r, emulation_type = EMULTYPE_PF;
5066         bool direct = vcpu->arch.mmu->direct_map;
5067
5068         if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5069                 return RET_PF_RETRY;
5070
5071         r = RET_PF_INVALID;
5072         if (unlikely(error_code & PFERR_RSVD_MASK)) {
5073                 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5074                 if (r == RET_PF_EMULATE)
5075                         goto emulate;
5076         }
5077
5078         if (r == RET_PF_INVALID) {
5079                 r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
5080                                           lower_32_bits(error_code), false);
5081                 if (WARN_ON_ONCE(r == RET_PF_INVALID))
5082                         return -EIO;
5083         }
5084
5085         if (r < 0)
5086                 return r;
5087         if (r != RET_PF_EMULATE)
5088                 return 1;
5089
5090         /*
5091          * Before emulating the instruction, check if the error code
5092          * was due to a RO violation while translating the guest page.
5093          * This can occur when using nested virtualization with nested
5094          * paging in both guests. If true, we simply unprotect the page
5095          * and resume the guest.
5096          */
5097         if (vcpu->arch.mmu->direct_map &&
5098             (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5099                 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5100                 return 1;
5101         }
5102
5103         /*
5104          * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5105          * optimistically try to just unprotect the page and let the processor
5106          * re-execute the instruction that caused the page fault.  Do not allow
5107          * retrying MMIO emulation, as it's not only pointless but could also
5108          * cause us to enter an infinite loop because the processor will keep
5109          * faulting on the non-existent MMIO address.  Retrying an instruction
5110          * from a nested guest is also pointless and dangerous as we are only
5111          * explicitly shadowing L1's page tables, i.e. unprotecting something
5112          * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5113          */
5114         if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5115                 emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5116 emulate:
5117         return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5118                                        insn_len);
5119 }
5120 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5121
5122 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5123                             gva_t gva, hpa_t root_hpa)
5124 {
5125         int i;
5126
5127         /* It's actually a GPA for vcpu->arch.guest_mmu.  */
5128         if (mmu != &vcpu->arch.guest_mmu) {
5129                 /* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5130                 if (is_noncanonical_address(gva, vcpu))
5131                         return;
5132
5133                 static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5134         }
5135
5136         if (!mmu->invlpg)
5137                 return;
5138
5139         if (root_hpa == INVALID_PAGE) {
5140                 mmu->invlpg(vcpu, gva, mmu->root_hpa);
5141
5142                 /*
5143                  * INVLPG is required to invalidate any global mappings for the VA,
5144                  * irrespective of PCID. Since it would take us roughly similar amount
5145                  * of work to determine whether any of the prev_root mappings of the VA
5146                  * is marked global, or to just sync it blindly, so we might as well
5147                  * just always sync it.
5148                  *
5149                  * Mappings not reachable via the current cr3 or the prev_roots will be
5150                  * synced when switching to that cr3, so nothing needs to be done here
5151                  * for them.
5152                  */
5153                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5154                         if (VALID_PAGE(mmu->prev_roots[i].hpa))
5155                                 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5156         } else {
5157                 mmu->invlpg(vcpu, gva, root_hpa);
5158         }
5159 }
5160
5161 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
5162 {
5163         kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5164         ++vcpu->stat.invlpg;
5165 }
5166 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5167
5168
5169 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5170 {
5171         struct kvm_mmu *mmu = vcpu->arch.mmu;
5172         bool tlb_flush = false;
5173         uint i;
5174
5175         if (pcid == kvm_get_active_pcid(vcpu)) {
5176                 mmu->invlpg(vcpu, gva, mmu->root_hpa);
5177                 tlb_flush = true;
5178         }
5179
5180         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5181                 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5182                     pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5183                         mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5184                         tlb_flush = true;
5185                 }
5186         }
5187
5188         if (tlb_flush)
5189                 static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5190
5191         ++vcpu->stat.invlpg;
5192
5193         /*
5194          * Mappings not reachable via the current cr3 or the prev_roots will be
5195          * synced when switching to that cr3, so nothing needs to be done here
5196          * for them.
5197          */
5198 }
5199
5200 void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
5201                        int tdp_huge_page_level)
5202 {
5203         tdp_enabled = enable_tdp;
5204         max_tdp_level = tdp_max_root_level;
5205
5206         /*
5207          * max_huge_page_level reflects KVM's MMU capabilities irrespective
5208          * of kernel support, e.g. KVM may be capable of using 1GB pages when
5209          * the kernel is not.  But, KVM never creates a page size greater than
5210          * what is used by the kernel for any given HVA, i.e. the kernel's
5211          * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5212          */
5213         if (tdp_enabled)
5214                 max_huge_page_level = tdp_huge_page_level;
5215         else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5216                 max_huge_page_level = PG_LEVEL_1G;
5217         else
5218                 max_huge_page_level = PG_LEVEL_2M;
5219 }
5220 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5221
5222 /* The return value indicates if tlb flush on all vcpus is needed. */
5223 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head,
5224                                     struct kvm_memory_slot *slot);
5225
5226 /* The caller should hold mmu-lock before calling this function. */
5227 static __always_inline bool
5228 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5229                         slot_level_handler fn, int start_level, int end_level,
5230                         gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
5231 {
5232         struct slot_rmap_walk_iterator iterator;
5233         bool flush = false;
5234
5235         for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5236                         end_gfn, &iterator) {
5237                 if (iterator.rmap)
5238                         flush |= fn(kvm, iterator.rmap, memslot);
5239
5240                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5241                         if (flush && lock_flush_tlb) {
5242                                 kvm_flush_remote_tlbs_with_address(kvm,
5243                                                 start_gfn,
5244                                                 iterator.gfn - start_gfn + 1);
5245                                 flush = false;
5246                         }
5247                         cond_resched_rwlock_write(&kvm->mmu_lock);
5248                 }
5249         }
5250
5251         if (flush && lock_flush_tlb) {
5252                 kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
5253                                                    end_gfn - start_gfn + 1);
5254                 flush = false;
5255         }
5256
5257         return flush;
5258 }
5259
5260 static __always_inline bool
5261 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5262                   slot_level_handler fn, int start_level, int end_level,
5263                   bool lock_flush_tlb)
5264 {
5265         return slot_handle_level_range(kvm, memslot, fn, start_level,
5266                         end_level, memslot->base_gfn,
5267                         memslot->base_gfn + memslot->npages - 1,
5268                         lock_flush_tlb);
5269 }
5270
5271 static __always_inline bool
5272 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5273                  slot_level_handler fn, bool lock_flush_tlb)
5274 {
5275         return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5276                                  PG_LEVEL_4K, lock_flush_tlb);
5277 }
5278
5279 static void free_mmu_pages(struct kvm_mmu *mmu)
5280 {
5281         if (!tdp_enabled && mmu->pae_root)
5282                 set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5283         free_page((unsigned long)mmu->pae_root);
5284         free_page((unsigned long)mmu->lm_root);
5285 }
5286
5287 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5288 {
5289         struct page *page;
5290         int i;
5291
5292         mmu->root_hpa = INVALID_PAGE;
5293         mmu->root_pgd = 0;
5294         mmu->translate_gpa = translate_gpa;
5295         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5296                 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5297
5298         /*
5299          * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5300          * while the PDP table is a per-vCPU construct that's allocated at MMU
5301          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5302          * x86_64.  Therefore we need to allocate the PDP table in the first
5303          * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
5304          * generally doesn't use PAE paging and can skip allocating the PDP
5305          * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
5306          * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
5307          * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5308          */
5309         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5310                 return 0;
5311
5312         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5313         if (!page)
5314                 return -ENOMEM;
5315
5316         mmu->pae_root = page_address(page);
5317
5318         /*
5319          * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
5320          * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
5321          * that KVM's writes and the CPU's reads get along.  Note, this is
5322          * only necessary when using shadow paging, as 64-bit NPT can get at
5323          * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
5324          * by 32-bit kernels (when KVM itself uses 32-bit NPT).
5325          */
5326         if (!tdp_enabled)
5327                 set_memory_decrypted((unsigned long)mmu->pae_root, 1);
5328         else
5329                 WARN_ON_ONCE(shadow_me_mask);
5330
5331         for (i = 0; i < 4; ++i)
5332                 mmu->pae_root[i] = INVALID_PAE_ROOT;
5333
5334         return 0;
5335 }
5336
5337 int kvm_mmu_create(struct kvm_vcpu *vcpu)
5338 {
5339         int ret;
5340
5341         vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5342         vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
5343
5344         vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5345         vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5346
5347         vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
5348
5349         vcpu->arch.mmu = &vcpu->arch.root_mmu;
5350         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5351
5352         vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5353
5354         ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5355         if (ret)
5356                 return ret;
5357
5358         ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5359         if (ret)
5360                 goto fail_allocate_root;
5361
5362         return ret;
5363  fail_allocate_root:
5364         free_mmu_pages(&vcpu->arch.guest_mmu);
5365         return ret;
5366 }
5367
5368 #define BATCH_ZAP_PAGES 10
5369 static void kvm_zap_obsolete_pages(struct kvm *kvm)
5370 {
5371         struct kvm_mmu_page *sp, *node;
5372         int nr_zapped, batch = 0;
5373
5374 restart:
5375         list_for_each_entry_safe_reverse(sp, node,
5376               &kvm->arch.active_mmu_pages, link) {
5377                 /*
5378                  * No obsolete valid page exists before a newly created page
5379                  * since active_mmu_pages is a FIFO list.
5380                  */
5381                 if (!is_obsolete_sp(kvm, sp))
5382                         break;
5383
5384                 /*
5385                  * Invalid pages should never land back on the list of active
5386                  * pages.  Skip the bogus page, otherwise we'll get stuck in an
5387                  * infinite loop if the page gets put back on the list (again).
5388                  */
5389                 if (WARN_ON(sp->role.invalid))
5390                         continue;
5391
5392                 /*
5393                  * No need to flush the TLB since we're only zapping shadow
5394                  * pages with an obsolete generation number and all vCPUS have
5395                  * loaded a new root, i.e. the shadow pages being zapped cannot
5396                  * be in active use by the guest.
5397                  */
5398                 if (batch >= BATCH_ZAP_PAGES &&
5399                     cond_resched_rwlock_write(&kvm->mmu_lock)) {
5400                         batch = 0;
5401                         goto restart;
5402                 }
5403
5404                 if (__kvm_mmu_prepare_zap_page(kvm, sp,
5405                                 &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5406                         batch += nr_zapped;
5407                         goto restart;
5408                 }
5409         }
5410
5411         /*
5412          * Trigger a remote TLB flush before freeing the page tables to ensure
5413          * KVM is not in the middle of a lockless shadow page table walk, which
5414          * may reference the pages.
5415          */
5416         kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5417 }
5418
5419 /*
5420  * Fast invalidate all shadow pages and use lock-break technique
5421  * to zap obsolete pages.
5422  *
5423  * It's required when memslot is being deleted or VM is being
5424  * destroyed, in these cases, we should ensure that KVM MMU does
5425  * not use any resource of the being-deleted slot or all slots
5426  * after calling the function.
5427  */
5428 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5429 {
5430         lockdep_assert_held(&kvm->slots_lock);
5431
5432         write_lock(&kvm->mmu_lock);
5433         trace_kvm_mmu_zap_all_fast(kvm);
5434
5435         /*
5436          * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5437          * held for the entire duration of zapping obsolete pages, it's
5438          * impossible for there to be multiple invalid generations associated
5439          * with *valid* shadow pages at any given time, i.e. there is exactly
5440          * one valid generation and (at most) one invalid generation.
5441          */
5442         kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5443
5444         /*
5445          * Notify all vcpus to reload its shadow page table and flush TLB.
5446          * Then all vcpus will switch to new shadow page table with the new
5447          * mmu_valid_gen.
5448          *
5449          * Note: we need to do this under the protection of mmu_lock,
5450          * otherwise, vcpu would purge shadow page but miss tlb flush.
5451          */
5452         kvm_reload_remote_mmus(kvm);
5453
5454         kvm_zap_obsolete_pages(kvm);
5455
5456         if (is_tdp_mmu_enabled(kvm))
5457                 kvm_tdp_mmu_zap_all(kvm);
5458
5459         write_unlock(&kvm->mmu_lock);
5460 }
5461
5462 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5463 {
5464         return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5465 }
5466
5467 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5468                         struct kvm_memory_slot *slot,
5469                         struct kvm_page_track_notifier_node *node)
5470 {
5471         kvm_mmu_zap_all_fast(kvm);
5472 }
5473
5474 void kvm_mmu_init_vm(struct kvm *kvm)
5475 {
5476         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5477
5478         kvm_mmu_init_tdp_mmu(kvm);
5479
5480         node->track_write = kvm_mmu_pte_write;
5481         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5482         kvm_page_track_register_notifier(kvm, node);
5483 }
5484
5485 void kvm_mmu_uninit_vm(struct kvm *kvm)
5486 {
5487         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5488
5489         kvm_page_track_unregister_notifier(kvm, node);
5490
5491         kvm_mmu_uninit_tdp_mmu(kvm);
5492 }
5493
5494 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5495 {
5496         struct kvm_memslots *slots;
5497         struct kvm_memory_slot *memslot;
5498         int i;
5499         bool flush;
5500
5501         write_lock(&kvm->mmu_lock);
5502         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5503                 slots = __kvm_memslots(kvm, i);
5504                 kvm_for_each_memslot(memslot, slots) {
5505                         gfn_t start, end;
5506
5507                         start = max(gfn_start, memslot->base_gfn);
5508                         end = min(gfn_end, memslot->base_gfn + memslot->npages);
5509                         if (start >= end)
5510                                 continue;
5511
5512                         slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5513                                                 PG_LEVEL_4K,
5514                                                 KVM_MAX_HUGEPAGE_LEVEL,
5515                                                 start, end - 1, true);
5516                 }
5517         }
5518
5519         if (is_tdp_mmu_enabled(kvm)) {
5520                 flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
5521                 if (flush)
5522                         kvm_flush_remote_tlbs(kvm);
5523         }
5524
5525         write_unlock(&kvm->mmu_lock);
5526 }
5527
5528 static bool slot_rmap_write_protect(struct kvm *kvm,
5529                                     struct kvm_rmap_head *rmap_head,
5530                                     struct kvm_memory_slot *slot)
5531 {
5532         return __rmap_write_protect(kvm, rmap_head, false);
5533 }
5534
5535 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5536                                       struct kvm_memory_slot *memslot,
5537                                       int start_level)
5538 {
5539         bool flush;
5540
5541         write_lock(&kvm->mmu_lock);
5542         flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5543                                 start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
5544         if (is_tdp_mmu_enabled(kvm))
5545                 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
5546         write_unlock(&kvm->mmu_lock);
5547
5548         /*
5549          * We can flush all the TLBs out of the mmu lock without TLB
5550          * corruption since we just change the spte from writable to
5551          * readonly so that we only need to care the case of changing
5552          * spte from present to present (changing the spte from present
5553          * to nonpresent will flush all the TLBs immediately), in other
5554          * words, the only case we care is mmu_spte_update() where we
5555          * have checked Host-writable | MMU-writable instead of
5556          * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
5557          * anymore.
5558          */
5559         if (flush)
5560                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5561 }
5562
5563 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5564                                          struct kvm_rmap_head *rmap_head,
5565                                          struct kvm_memory_slot *slot)
5566 {
5567         u64 *sptep;
5568         struct rmap_iterator iter;
5569         int need_tlb_flush = 0;
5570         kvm_pfn_t pfn;
5571         struct kvm_mmu_page *sp;
5572
5573 restart:
5574         for_each_rmap_spte(rmap_head, &iter, sptep) {
5575                 sp = sptep_to_sp(sptep);
5576                 pfn = spte_to_pfn(*sptep);
5577
5578                 /*
5579                  * We cannot do huge page mapping for indirect shadow pages,
5580                  * which are found on the last rmap (level = 1) when not using
5581                  * tdp; such shadow pages are synced with the page table in
5582                  * the guest, and the guest page table is using 4K page size
5583                  * mapping if the indirect sp has level = 1.
5584                  */
5585                 if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5586                     sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
5587                                                                pfn, PG_LEVEL_NUM)) {
5588                         pte_list_remove(rmap_head, sptep);
5589
5590                         if (kvm_available_flush_tlb_with_range())
5591                                 kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5592                                         KVM_PAGES_PER_HPAGE(sp->role.level));
5593                         else
5594                                 need_tlb_flush = 1;
5595
5596                         goto restart;
5597                 }
5598         }
5599
5600         return need_tlb_flush;
5601 }
5602
5603 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5604                                    const struct kvm_memory_slot *memslot)
5605 {
5606         /* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5607         struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
5608
5609         write_lock(&kvm->mmu_lock);
5610         slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5611
5612         if (is_tdp_mmu_enabled(kvm))
5613                 kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
5614         write_unlock(&kvm->mmu_lock);
5615 }
5616
5617 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5618                                         struct kvm_memory_slot *memslot)
5619 {
5620         /*
5621          * All current use cases for flushing the TLBs for a specific memslot
5622          * are related to dirty logging, and do the TLB flush out of mmu_lock.
5623          * The interaction between the various operations on memslot must be
5624          * serialized by slots_locks to ensure the TLB flush from one operation
5625          * is observed by any other operation on the same memslot.
5626          */
5627         lockdep_assert_held(&kvm->slots_lock);
5628         kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5629                                            memslot->npages);
5630 }
5631
5632 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5633                                    struct kvm_memory_slot *memslot)
5634 {
5635         bool flush;
5636
5637         write_lock(&kvm->mmu_lock);
5638         flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5639         if (is_tdp_mmu_enabled(kvm))
5640                 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5641         write_unlock(&kvm->mmu_lock);
5642
5643         /*
5644          * It's also safe to flush TLBs out of mmu lock here as currently this
5645          * function is only used for dirty logging, in which case flushing TLB
5646          * out of mmu lock also guarantees no dirty pages will be lost in
5647          * dirty_bitmap.
5648          */
5649         if (flush)
5650                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5651 }
5652
5653 void kvm_mmu_zap_all(struct kvm *kvm)
5654 {
5655         struct kvm_mmu_page *sp, *node;
5656         LIST_HEAD(invalid_list);
5657         int ign;
5658
5659         write_lock(&kvm->mmu_lock);
5660 restart:
5661         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5662                 if (WARN_ON(sp->role.invalid))
5663                         continue;
5664                 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5665                         goto restart;
5666                 if (cond_resched_rwlock_write(&kvm->mmu_lock))
5667                         goto restart;
5668         }
5669
5670         kvm_mmu_commit_zap_page(kvm, &invalid_list);
5671
5672         if (is_tdp_mmu_enabled(kvm))
5673                 kvm_tdp_mmu_zap_all(kvm);
5674
5675         write_unlock(&kvm->mmu_lock);
5676 }
5677
5678 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5679 {
5680         WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5681
5682         gen &= MMIO_SPTE_GEN_MASK;
5683
5684         /*
5685          * Generation numbers are incremented in multiples of the number of
5686          * address spaces in order to provide unique generations across all
5687          * address spaces.  Strip what is effectively the address space
5688          * modifier prior to checking for a wrap of the MMIO generation so
5689          * that a wrap in any address space is detected.
5690          */
5691         gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5692
5693         /*
5694          * The very rare case: if the MMIO generation number has wrapped,
5695          * zap all shadow pages.
5696          */
5697         if (unlikely(gen == 0)) {
5698                 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5699                 kvm_mmu_zap_all_fast(kvm);
5700         }
5701 }
5702
5703 static unsigned long
5704 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5705 {
5706         struct kvm *kvm;
5707         int nr_to_scan = sc->nr_to_scan;
5708         unsigned long freed = 0;
5709
5710         mutex_lock(&kvm_lock);
5711
5712         list_for_each_entry(kvm, &vm_list, vm_list) {
5713                 int idx;
5714                 LIST_HEAD(invalid_list);
5715
5716                 /*
5717                  * Never scan more than sc->nr_to_scan VM instances.
5718                  * Will not hit this condition practically since we do not try
5719                  * to shrink more than one VM and it is very unlikely to see
5720                  * !n_used_mmu_pages so many times.
5721                  */
5722                 if (!nr_to_scan--)
5723                         break;
5724                 /*
5725                  * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5726                  * here. We may skip a VM instance errorneosly, but we do not
5727                  * want to shrink a VM that only started to populate its MMU
5728                  * anyway.
5729                  */
5730                 if (!kvm->arch.n_used_mmu_pages &&
5731                     !kvm_has_zapped_obsolete_pages(kvm))
5732                         continue;
5733
5734                 idx = srcu_read_lock(&kvm->srcu);
5735                 write_lock(&kvm->mmu_lock);
5736
5737                 if (kvm_has_zapped_obsolete_pages(kvm)) {
5738                         kvm_mmu_commit_zap_page(kvm,
5739                               &kvm->arch.zapped_obsolete_pages);
5740                         goto unlock;
5741                 }
5742
5743                 freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5744
5745 unlock:
5746                 write_unlock(&kvm->mmu_lock);
5747                 srcu_read_unlock(&kvm->srcu, idx);
5748
5749                 /*
5750                  * unfair on small ones
5751                  * per-vm shrinkers cry out
5752                  * sadness comes quickly
5753                  */
5754                 list_move_tail(&kvm->vm_list, &vm_list);
5755                 break;
5756         }
5757
5758         mutex_unlock(&kvm_lock);
5759         return freed;
5760 }
5761
5762 static unsigned long
5763 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5764 {
5765         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5766 }
5767
5768 static struct shrinker mmu_shrinker = {
5769         .count_objects = mmu_shrink_count,
5770         .scan_objects = mmu_shrink_scan,
5771         .seeks = DEFAULT_SEEKS * 10,
5772 };
5773
5774 static void mmu_destroy_caches(void)
5775 {
5776         kmem_cache_destroy(pte_list_desc_cache);
5777         kmem_cache_destroy(mmu_page_header_cache);
5778 }
5779
5780 static bool get_nx_auto_mode(void)
5781 {
5782         /* Return true when CPU has the bug, and mitigations are ON */
5783         return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5784 }
5785
5786 static void __set_nx_huge_pages(bool val)
5787 {
5788         nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5789 }
5790
5791 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5792 {
5793         bool old_val = nx_huge_pages;
5794         bool new_val;
5795
5796         /* In "auto" mode deploy workaround only if CPU has the bug. */
5797         if (sysfs_streq(val, "off"))
5798                 new_val = 0;
5799         else if (sysfs_streq(val, "force"))
5800                 new_val = 1;
5801         else if (sysfs_streq(val, "auto"))
5802                 new_val = get_nx_auto_mode();
5803         else if (strtobool(val, &new_val) < 0)
5804                 return -EINVAL;
5805
5806         __set_nx_huge_pages(new_val);
5807
5808         if (new_val != old_val) {
5809                 struct kvm *kvm;
5810
5811                 mutex_lock(&kvm_lock);
5812
5813                 list_for_each_entry(kvm, &vm_list, vm_list) {
5814                         mutex_lock(&kvm->slots_lock);
5815                         kvm_mmu_zap_all_fast(kvm);
5816                         mutex_unlock(&kvm->slots_lock);
5817
5818                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5819                 }
5820                 mutex_unlock(&kvm_lock);
5821         }
5822
5823         return 0;
5824 }
5825
5826 int kvm_mmu_module_init(void)
5827 {
5828         int ret = -ENOMEM;
5829
5830         if (nx_huge_pages == -1)
5831                 __set_nx_huge_pages(get_nx_auto_mode());
5832
5833         /*
5834          * MMU roles use union aliasing which is, generally speaking, an
5835          * undefined behavior. However, we supposedly know how compilers behave
5836          * and the current status quo is unlikely to change. Guardians below are
5837          * supposed to let us know if the assumption becomes false.
5838          */
5839         BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
5840         BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
5841         BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
5842
5843         kvm_mmu_reset_all_pte_masks();
5844
5845         pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5846                                             sizeof(struct pte_list_desc),
5847                                             0, SLAB_ACCOUNT, NULL);
5848         if (!pte_list_desc_cache)
5849                 goto out;
5850
5851         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5852                                                   sizeof(struct kvm_mmu_page),
5853                                                   0, SLAB_ACCOUNT, NULL);
5854         if (!mmu_page_header_cache)
5855                 goto out;
5856
5857         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5858                 goto out;
5859
5860         ret = register_shrinker(&mmu_shrinker);
5861         if (ret)
5862                 goto out;
5863
5864         return 0;
5865
5866 out:
5867         mmu_destroy_caches();
5868         return ret;
5869 }
5870
5871 /*
5872  * Calculate mmu pages needed for kvm.
5873  */
5874 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
5875 {
5876         unsigned long nr_mmu_pages;
5877         unsigned long nr_pages = 0;
5878         struct kvm_memslots *slots;
5879         struct kvm_memory_slot *memslot;
5880         int i;
5881
5882         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5883                 slots = __kvm_memslots(kvm, i);
5884
5885                 kvm_for_each_memslot(memslot, slots)
5886                         nr_pages += memslot->npages;
5887         }
5888
5889         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
5890         nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
5891
5892         return nr_mmu_pages;
5893 }
5894
5895 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
5896 {
5897         kvm_mmu_unload(vcpu);
5898         free_mmu_pages(&vcpu->arch.root_mmu);
5899         free_mmu_pages(&vcpu->arch.guest_mmu);
5900         mmu_free_memory_caches(vcpu);
5901 }
5902
5903 void kvm_mmu_module_exit(void)
5904 {
5905         mmu_destroy_caches();
5906         percpu_counter_destroy(&kvm_total_used_mmu_pages);
5907         unregister_shrinker(&mmu_shrinker);
5908         mmu_audit_disable();
5909 }
5910
5911 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
5912 {
5913         unsigned int old_val;
5914         int err;
5915
5916         old_val = nx_huge_pages_recovery_ratio;
5917         err = param_set_uint(val, kp);
5918         if (err)
5919                 return err;
5920
5921         if (READ_ONCE(nx_huge_pages) &&
5922             !old_val && nx_huge_pages_recovery_ratio) {
5923                 struct kvm *kvm;
5924
5925                 mutex_lock(&kvm_lock);
5926
5927                 list_for_each_entry(kvm, &vm_list, vm_list)
5928                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5929
5930                 mutex_unlock(&kvm_lock);
5931         }
5932
5933         return err;
5934 }
5935
5936 static void kvm_recover_nx_lpages(struct kvm *kvm)
5937 {
5938         int rcu_idx;
5939         struct kvm_mmu_page *sp;
5940         unsigned int ratio;
5941         LIST_HEAD(invalid_list);
5942         bool flush = false;
5943         ulong to_zap;
5944
5945         rcu_idx = srcu_read_lock(&kvm->srcu);
5946         write_lock(&kvm->mmu_lock);
5947
5948         ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
5949         to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
5950         for ( ; to_zap; --to_zap) {
5951                 if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
5952                         break;
5953
5954                 /*
5955                  * We use a separate list instead of just using active_mmu_pages
5956                  * because the number of lpage_disallowed pages is expected to
5957                  * be relatively small compared to the total.
5958                  */
5959                 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
5960                                       struct kvm_mmu_page,
5961                                       lpage_disallowed_link);
5962                 WARN_ON_ONCE(!sp->lpage_disallowed);
5963                 if (is_tdp_mmu_page(sp)) {
5964                         flush = kvm_tdp_mmu_zap_sp(kvm, sp);
5965                 } else {
5966                         kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5967                         WARN_ON_ONCE(sp->lpage_disallowed);
5968                 }
5969
5970                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5971                         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5972                         cond_resched_rwlock_write(&kvm->mmu_lock);
5973                         flush = false;
5974                 }
5975         }
5976         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5977
5978         write_unlock(&kvm->mmu_lock);
5979         srcu_read_unlock(&kvm->srcu, rcu_idx);
5980 }
5981
5982 static long get_nx_lpage_recovery_timeout(u64 start_time)
5983 {
5984         return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
5985                 ? start_time + 60 * HZ - get_jiffies_64()
5986                 : MAX_SCHEDULE_TIMEOUT;
5987 }
5988
5989 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
5990 {
5991         u64 start_time;
5992         long remaining_time;
5993
5994         while (true) {
5995                 start_time = get_jiffies_64();
5996                 remaining_time = get_nx_lpage_recovery_timeout(start_time);
5997
5998                 set_current_state(TASK_INTERRUPTIBLE);
5999                 while (!kthread_should_stop() && remaining_time > 0) {
6000                         schedule_timeout(remaining_time);
6001                         remaining_time = get_nx_lpage_recovery_timeout(start_time);
6002                         set_current_state(TASK_INTERRUPTIBLE);
6003                 }
6004
6005                 set_current_state(TASK_RUNNING);
6006
6007                 if (kthread_should_stop())
6008                         return 0;
6009
6010                 kvm_recover_nx_lpages(kvm);
6011         }
6012 }
6013
6014 int kvm_mmu_post_init_vm(struct kvm *kvm)
6015 {
6016         int err;
6017
6018         err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6019                                           "kvm-nx-lpage-recovery",
6020                                           &kvm->arch.nx_lpage_recovery_thread);
6021         if (!err)
6022                 kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6023
6024         return err;
6025 }
6026
6027 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6028 {
6029         if (kvm->arch.nx_lpage_recovery_thread)
6030                 kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6031 }