KVM: x86/mmu: Automatically update iter->old_spte if cmpxchg fails
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / tdp_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "mmu.h"
4 #include "mmu_internal.h"
5 #include "mmutrace.h"
6 #include "tdp_iter.h"
7 #include "tdp_mmu.h"
8 #include "spte.h"
9
10 #include <asm/cmpxchg.h>
11 #include <trace/events/kvm.h>
12
13 static bool __read_mostly tdp_mmu_enabled = true;
14 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15
16 /* Initializes the TDP MMU for the VM, if enabled. */
17 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18 {
19         if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20                 return false;
21
22         /* This should not be changed for the lifetime of the VM. */
23         kvm->arch.tdp_mmu_enabled = true;
24
25         INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
26         spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
27         INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28
29         return true;
30 }
31
32 static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
33                                                              bool shared)
34 {
35         if (shared)
36                 lockdep_assert_held_read(&kvm->mmu_lock);
37         else
38                 lockdep_assert_held_write(&kvm->mmu_lock);
39 }
40
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42 {
43         if (!kvm->arch.tdp_mmu_enabled)
44                 return;
45
46         WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
47         WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
48
49         /*
50          * Ensure that all the outstanding RCU callbacks to free shadow pages
51          * can run before the VM is torn down.
52          */
53         rcu_barrier();
54 }
55
56 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
57                           gfn_t start, gfn_t end, bool can_yield, bool flush,
58                           bool shared);
59
60 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
61 {
62         free_page((unsigned long)sp->spt);
63         kmem_cache_free(mmu_page_header_cache, sp);
64 }
65
66 /*
67  * This is called through call_rcu in order to free TDP page table memory
68  * safely with respect to other kernel threads that may be operating on
69  * the memory.
70  * By only accessing TDP MMU page table memory in an RCU read critical
71  * section, and freeing it after a grace period, lockless access to that
72  * memory won't use it after it is freed.
73  */
74 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
75 {
76         struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
77                                                rcu_head);
78
79         tdp_mmu_free_sp(sp);
80 }
81
82 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
83                           bool shared)
84 {
85         kvm_lockdep_assert_mmu_lock_held(kvm, shared);
86
87         if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
88                 return;
89
90         WARN_ON(!root->tdp_mmu_page);
91
92         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93         list_del_rcu(&root->link);
94         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
95
96         zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
97
98         call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
99 }
100
101 /*
102  * Returns the next root after @prev_root (or the first root if @prev_root is
103  * NULL).  A reference to the returned root is acquired, and the reference to
104  * @prev_root is released (the caller obviously must hold a reference to
105  * @prev_root if it's non-NULL).
106  *
107  * If @only_valid is true, invalid roots are skipped.
108  *
109  * Returns NULL if the end of tdp_mmu_roots was reached.
110  */
111 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
112                                               struct kvm_mmu_page *prev_root,
113                                               bool shared, bool only_valid)
114 {
115         struct kvm_mmu_page *next_root;
116
117         rcu_read_lock();
118
119         if (prev_root)
120                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121                                                   &prev_root->link,
122                                                   typeof(*prev_root), link);
123         else
124                 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
125                                                    typeof(*next_root), link);
126
127         while (next_root) {
128                 if ((!only_valid || !next_root->role.invalid) &&
129                     kvm_tdp_mmu_get_root(next_root))
130                         break;
131
132                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
133                                 &next_root->link, typeof(*next_root), link);
134         }
135
136         rcu_read_unlock();
137
138         if (prev_root)
139                 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
140
141         return next_root;
142 }
143
144 /*
145  * Note: this iterator gets and puts references to the roots it iterates over.
146  * This makes it safe to release the MMU lock and yield within the loop, but
147  * if exiting the loop early, the caller must drop the reference to the most
148  * recent root. (Unless keeping a live reference is desirable.)
149  *
150  * If shared is set, this function is operating under the MMU lock in read
151  * mode. In the unlikely event that this thread must free a root, the lock
152  * will be temporarily dropped and reacquired in write mode.
153  */
154 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
155         for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);       \
156              _root;                                                             \
157              _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))      \
158                 if (kvm_mmu_page_as_id(_root) != _as_id) {                      \
159                 } else
160
161 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)    \
162         __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
163
164 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)          \
165         __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, false)
166
167 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)                              \
168         list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link,         \
169                                 lockdep_is_held_type(&kvm->mmu_lock, 0) ||      \
170                                 lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \
171                 if (kvm_mmu_page_as_id(_root) != _as_id) {              \
172                 } else
173
174 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
175                                                    int level)
176 {
177         union kvm_mmu_page_role role;
178
179         role = vcpu->arch.mmu->mmu_role.base;
180         role.level = level;
181         role.direct = true;
182         role.has_4_byte_gpte = false;
183         role.access = ACC_ALL;
184         role.ad_disabled = !shadow_accessed_mask;
185
186         return role;
187 }
188
189 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
190                                                int level)
191 {
192         struct kvm_mmu_page *sp;
193
194         sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
195         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
196         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
197
198         sp->role.word = page_role_for_level(vcpu, level).word;
199         sp->gfn = gfn;
200         sp->tdp_mmu_page = true;
201
202         trace_kvm_mmu_get_page(sp, true);
203
204         return sp;
205 }
206
207 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
208 {
209         union kvm_mmu_page_role role;
210         struct kvm *kvm = vcpu->kvm;
211         struct kvm_mmu_page *root;
212
213         lockdep_assert_held_write(&kvm->mmu_lock);
214
215         role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
216
217         /*
218          * Check for an existing root before allocating a new one.  Note, the
219          * role check prevents consuming an invalid root.
220          */
221         for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
222                 if (root->role.word == role.word &&
223                     kvm_tdp_mmu_get_root(root))
224                         goto out;
225         }
226
227         root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
228         refcount_set(&root->tdp_mmu_root_count, 1);
229
230         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
231         list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
232         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
233
234 out:
235         return __pa(root->spt);
236 }
237
238 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
239                                 u64 old_spte, u64 new_spte, int level,
240                                 bool shared);
241
242 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
243 {
244         if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
245                 return;
246
247         if (is_accessed_spte(old_spte) &&
248             (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
249              spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
250                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
251 }
252
253 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
254                                           u64 old_spte, u64 new_spte, int level)
255 {
256         bool pfn_changed;
257         struct kvm_memory_slot *slot;
258
259         if (level > PG_LEVEL_4K)
260                 return;
261
262         pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
263
264         if ((!is_writable_pte(old_spte) || pfn_changed) &&
265             is_writable_pte(new_spte)) {
266                 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
267                 mark_page_dirty_in_slot(kvm, slot, gfn);
268         }
269 }
270
271 /**
272  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
273  *
274  * @kvm: kvm instance
275  * @sp: the new page
276  * @account_nx: This page replaces a NX large page and should be marked for
277  *              eventual reclaim.
278  */
279 static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
280                               bool account_nx)
281 {
282         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
283         list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
284         if (account_nx)
285                 account_huge_nx_page(kvm, sp);
286         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
287 }
288
289 /**
290  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
291  *
292  * @kvm: kvm instance
293  * @sp: the page to be removed
294  * @shared: This operation may not be running under the exclusive use of
295  *          the MMU lock and the operation must synchronize with other
296  *          threads that might be adding or removing pages.
297  */
298 static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
299                                 bool shared)
300 {
301         if (shared)
302                 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
303         else
304                 lockdep_assert_held_write(&kvm->mmu_lock);
305
306         list_del(&sp->link);
307         if (sp->lpage_disallowed)
308                 unaccount_huge_nx_page(kvm, sp);
309
310         if (shared)
311                 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
312 }
313
314 /**
315  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
316  *
317  * @kvm: kvm instance
318  * @pt: the page removed from the paging structure
319  * @shared: This operation may not be running under the exclusive use
320  *          of the MMU lock and the operation must synchronize with other
321  *          threads that might be modifying SPTEs.
322  *
323  * Given a page table that has been removed from the TDP paging structure,
324  * iterates through the page table to clear SPTEs and free child page tables.
325  *
326  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
327  * protection. Since this thread removed it from the paging structure,
328  * this thread will be responsible for ensuring the page is freed. Hence the
329  * early rcu_dereferences in the function.
330  */
331 static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
332                                         bool shared)
333 {
334         struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
335         int level = sp->role.level;
336         gfn_t base_gfn = sp->gfn;
337         int i;
338
339         trace_kvm_mmu_prepare_zap_page(sp);
340
341         tdp_mmu_unlink_page(kvm, sp, shared);
342
343         for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
344                 u64 *sptep = rcu_dereference(pt) + i;
345                 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
346                 u64 old_child_spte;
347
348                 if (shared) {
349                         /*
350                          * Set the SPTE to a nonpresent value that other
351                          * threads will not overwrite. If the SPTE was
352                          * already marked as removed then another thread
353                          * handling a page fault could overwrite it, so
354                          * set the SPTE until it is set from some other
355                          * value to the removed SPTE value.
356                          */
357                         for (;;) {
358                                 old_child_spte = xchg(sptep, REMOVED_SPTE);
359                                 if (!is_removed_spte(old_child_spte))
360                                         break;
361                                 cpu_relax();
362                         }
363                 } else {
364                         /*
365                          * If the SPTE is not MMU-present, there is no backing
366                          * page associated with the SPTE and so no side effects
367                          * that need to be recorded, and exclusive ownership of
368                          * mmu_lock ensures the SPTE can't be made present.
369                          * Note, zapping MMIO SPTEs is also unnecessary as they
370                          * are guarded by the memslots generation, not by being
371                          * unreachable.
372                          */
373                         old_child_spte = READ_ONCE(*sptep);
374                         if (!is_shadow_present_pte(old_child_spte))
375                                 continue;
376
377                         /*
378                          * Marking the SPTE as a removed SPTE is not
379                          * strictly necessary here as the MMU lock will
380                          * stop other threads from concurrently modifying
381                          * this SPTE. Using the removed SPTE value keeps
382                          * the two branches consistent and simplifies
383                          * the function.
384                          */
385                         WRITE_ONCE(*sptep, REMOVED_SPTE);
386                 }
387                 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
388                                     old_child_spte, REMOVED_SPTE, level,
389                                     shared);
390         }
391
392         kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
393                                            KVM_PAGES_PER_HPAGE(level + 1));
394
395         call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
396 }
397
398 /**
399  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
400  * @kvm: kvm instance
401  * @as_id: the address space of the paging structure the SPTE was a part of
402  * @gfn: the base GFN that was mapped by the SPTE
403  * @old_spte: The value of the SPTE before the change
404  * @new_spte: The value of the SPTE after the change
405  * @level: the level of the PT the SPTE is part of in the paging structure
406  * @shared: This operation may not be running under the exclusive use of
407  *          the MMU lock and the operation must synchronize with other
408  *          threads that might be modifying SPTEs.
409  *
410  * Handle bookkeeping that might result from the modification of a SPTE.
411  * This function must be called for all TDP SPTE modifications.
412  */
413 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
414                                   u64 old_spte, u64 new_spte, int level,
415                                   bool shared)
416 {
417         bool was_present = is_shadow_present_pte(old_spte);
418         bool is_present = is_shadow_present_pte(new_spte);
419         bool was_leaf = was_present && is_last_spte(old_spte, level);
420         bool is_leaf = is_present && is_last_spte(new_spte, level);
421         bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
422
423         WARN_ON(level > PT64_ROOT_MAX_LEVEL);
424         WARN_ON(level < PG_LEVEL_4K);
425         WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
426
427         /*
428          * If this warning were to trigger it would indicate that there was a
429          * missing MMU notifier or a race with some notifier handler.
430          * A present, leaf SPTE should never be directly replaced with another
431          * present leaf SPTE pointing to a different PFN. A notifier handler
432          * should be zapping the SPTE before the main MM's page table is
433          * changed, or the SPTE should be zeroed, and the TLBs flushed by the
434          * thread before replacement.
435          */
436         if (was_leaf && is_leaf && pfn_changed) {
437                 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
438                        "SPTE with another present leaf SPTE mapping a\n"
439                        "different PFN!\n"
440                        "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
441                        as_id, gfn, old_spte, new_spte, level);
442
443                 /*
444                  * Crash the host to prevent error propagation and guest data
445                  * corruption.
446                  */
447                 BUG();
448         }
449
450         if (old_spte == new_spte)
451                 return;
452
453         trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
454
455         if (is_leaf)
456                 check_spte_writable_invariants(new_spte);
457
458         /*
459          * The only times a SPTE should be changed from a non-present to
460          * non-present state is when an MMIO entry is installed/modified/
461          * removed. In that case, there is nothing to do here.
462          */
463         if (!was_present && !is_present) {
464                 /*
465                  * If this change does not involve a MMIO SPTE or removed SPTE,
466                  * it is unexpected. Log the change, though it should not
467                  * impact the guest since both the former and current SPTEs
468                  * are nonpresent.
469                  */
470                 if (WARN_ON(!is_mmio_spte(old_spte) &&
471                             !is_mmio_spte(new_spte) &&
472                             !is_removed_spte(new_spte)))
473                         pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
474                                "should not be replaced with another,\n"
475                                "different nonpresent SPTE, unless one or both\n"
476                                "are MMIO SPTEs, or the new SPTE is\n"
477                                "a temporary removed SPTE.\n"
478                                "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
479                                as_id, gfn, old_spte, new_spte, level);
480                 return;
481         }
482
483         if (is_leaf != was_leaf)
484                 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
485
486         if (was_leaf && is_dirty_spte(old_spte) &&
487             (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
488                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
489
490         /*
491          * Recursively handle child PTs if the change removed a subtree from
492          * the paging structure.
493          */
494         if (was_present && !was_leaf && (pfn_changed || !is_present))
495                 handle_removed_tdp_mmu_page(kvm,
496                                 spte_to_child_pt(old_spte, level), shared);
497 }
498
499 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
500                                 u64 old_spte, u64 new_spte, int level,
501                                 bool shared)
502 {
503         __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
504                               shared);
505         handle_changed_spte_acc_track(old_spte, new_spte, level);
506         handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
507                                       new_spte, level);
508 }
509
510 /*
511  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
512  * and handle the associated bookkeeping.  Do not mark the page dirty
513  * in KVM's dirty bitmaps.
514  *
515  * If setting the SPTE fails because it has changed, iter->old_spte will be
516  * refreshed to the current value of the spte.
517  *
518  * @kvm: kvm instance
519  * @iter: a tdp_iter instance currently on the SPTE that should be set
520  * @new_spte: The value the SPTE should be set to
521  * Returns: true if the SPTE was set, false if it was not. If false is returned,
522  *          this function will have no side-effects other than setting
523  *          iter->old_spte to the last known value of spte.
524  */
525 static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
526                                            struct tdp_iter *iter,
527                                            u64 new_spte)
528 {
529         u64 *sptep = rcu_dereference(iter->sptep);
530         u64 old_spte;
531
532         WARN_ON_ONCE(iter->yielded);
533
534         lockdep_assert_held_read(&kvm->mmu_lock);
535
536         /*
537          * Do not change removed SPTEs. Only the thread that froze the SPTE
538          * may modify it.
539          */
540         if (is_removed_spte(iter->old_spte))
541                 return false;
542
543         /*
544          * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
545          * does not hold the mmu_lock.
546          */
547         old_spte = cmpxchg64(sptep, iter->old_spte, new_spte);
548         if (old_spte != iter->old_spte) {
549                 /*
550                  * The page table entry was modified by a different logical
551                  * CPU. Refresh iter->old_spte with the current value so the
552                  * caller operates on fresh data, e.g. if it retries
553                  * tdp_mmu_set_spte_atomic().
554                  */
555                 iter->old_spte = old_spte;
556                 return false;
557         }
558
559         __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
560                               new_spte, iter->level, true);
561         handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
562
563         return true;
564 }
565
566 static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
567                                            struct tdp_iter *iter)
568 {
569         /*
570          * Freeze the SPTE by setting it to a special,
571          * non-present value. This will stop other threads from
572          * immediately installing a present entry in its place
573          * before the TLBs are flushed.
574          */
575         if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
576                 return false;
577
578         kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
579                                            KVM_PAGES_PER_HPAGE(iter->level));
580
581         /*
582          * No other thread can overwrite the removed SPTE as they
583          * must either wait on the MMU lock or use
584          * tdp_mmu_set_spte_atomic which will not overwrite the
585          * special removed SPTE value. No bookkeeping is needed
586          * here since the SPTE is going from non-present
587          * to non-present.
588          */
589         WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
590
591         return true;
592 }
593
594
595 /*
596  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
597  * @kvm: kvm instance
598  * @iter: a tdp_iter instance currently on the SPTE that should be set
599  * @new_spte: The value the SPTE should be set to
600  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
601  *                    of the page. Should be set unless handling an MMU
602  *                    notifier for access tracking. Leaving record_acc_track
603  *                    unset in that case prevents page accesses from being
604  *                    double counted.
605  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
606  *                    appropriate for the change being made. Should be set
607  *                    unless performing certain dirty logging operations.
608  *                    Leaving record_dirty_log unset in that case prevents page
609  *                    writes from being double counted.
610  */
611 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
612                                       u64 new_spte, bool record_acc_track,
613                                       bool record_dirty_log)
614 {
615         WARN_ON_ONCE(iter->yielded);
616
617         lockdep_assert_held_write(&kvm->mmu_lock);
618
619         /*
620          * No thread should be using this function to set SPTEs to the
621          * temporary removed SPTE value.
622          * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
623          * should be used. If operating under the MMU lock in write mode, the
624          * use of the removed SPTE should not be necessary.
625          */
626         WARN_ON(is_removed_spte(iter->old_spte));
627
628         WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
629
630         __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
631                               new_spte, iter->level, false);
632         if (record_acc_track)
633                 handle_changed_spte_acc_track(iter->old_spte, new_spte,
634                                               iter->level);
635         if (record_dirty_log)
636                 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
637                                               iter->old_spte, new_spte,
638                                               iter->level);
639 }
640
641 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
642                                     u64 new_spte)
643 {
644         __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
645 }
646
647 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
648                                                  struct tdp_iter *iter,
649                                                  u64 new_spte)
650 {
651         __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
652 }
653
654 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
655                                                  struct tdp_iter *iter,
656                                                  u64 new_spte)
657 {
658         __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
659 }
660
661 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
662         for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
663
664 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)  \
665         tdp_root_for_each_pte(_iter, _root, _start, _end)               \
666                 if (!is_shadow_present_pte(_iter.old_spte) ||           \
667                     !is_last_spte(_iter.old_spte, _iter.level))         \
668                         continue;                                       \
669                 else
670
671 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)         \
672         for_each_tdp_pte(_iter, __va(_mmu->root_hpa),           \
673                          _mmu->shadow_root_level, _start, _end)
674
675 /*
676  * Yield if the MMU lock is contended or this thread needs to return control
677  * to the scheduler.
678  *
679  * If this function should yield and flush is set, it will perform a remote
680  * TLB flush before yielding.
681  *
682  * If this function yields, iter->yielded is set and the caller must skip to
683  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
684  * over the paging structures to allow the iterator to continue its traversal
685  * from the paging structure root.
686  *
687  * Returns true if this function yielded.
688  */
689 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
690                                                           struct tdp_iter *iter,
691                                                           bool flush, bool shared)
692 {
693         WARN_ON(iter->yielded);
694
695         /* Ensure forward progress has been made before yielding. */
696         if (iter->next_last_level_gfn == iter->yielded_gfn)
697                 return false;
698
699         if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
700                 rcu_read_unlock();
701
702                 if (flush)
703                         kvm_flush_remote_tlbs(kvm);
704
705                 if (shared)
706                         cond_resched_rwlock_read(&kvm->mmu_lock);
707                 else
708                         cond_resched_rwlock_write(&kvm->mmu_lock);
709
710                 rcu_read_lock();
711
712                 WARN_ON(iter->gfn > iter->next_last_level_gfn);
713
714                 iter->yielded = true;
715         }
716
717         return iter->yielded;
718 }
719
720 /*
721  * Tears down the mappings for the range of gfns, [start, end), and frees the
722  * non-root pages mapping GFNs strictly within that range. Returns true if
723  * SPTEs have been cleared and a TLB flush is needed before releasing the
724  * MMU lock.
725  *
726  * If can_yield is true, will release the MMU lock and reschedule if the
727  * scheduler needs the CPU or there is contention on the MMU lock. If this
728  * function cannot yield, it will not release the MMU lock or reschedule and
729  * the caller must ensure it does not supply too large a GFN range, or the
730  * operation can cause a soft lockup.
731  *
732  * If shared is true, this thread holds the MMU lock in read mode and must
733  * account for the possibility that other threads are modifying the paging
734  * structures concurrently. If shared is false, this thread should hold the
735  * MMU lock in write mode.
736  */
737 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
738                           gfn_t start, gfn_t end, bool can_yield, bool flush,
739                           bool shared)
740 {
741         gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
742         bool zap_all = (start == 0 && end >= max_gfn_host);
743         struct tdp_iter iter;
744
745         /*
746          * No need to try to step down in the iterator when zapping all SPTEs,
747          * zapping the top-level non-leaf SPTEs will recurse on their children.
748          */
749         int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
750
751         /*
752          * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
753          * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
754          * and so KVM will never install a SPTE for such addresses.
755          */
756         end = min(end, max_gfn_host);
757
758         kvm_lockdep_assert_mmu_lock_held(kvm, shared);
759
760         rcu_read_lock();
761
762         for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
763                                    min_level, start, end) {
764 retry:
765                 if (can_yield &&
766                     tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
767                         flush = false;
768                         continue;
769                 }
770
771                 if (!is_shadow_present_pte(iter.old_spte))
772                         continue;
773
774                 /*
775                  * If this is a non-last-level SPTE that covers a larger range
776                  * than should be zapped, continue, and zap the mappings at a
777                  * lower level, except when zapping all SPTEs.
778                  */
779                 if (!zap_all &&
780                     (iter.gfn < start ||
781                      iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
782                     !is_last_spte(iter.old_spte, iter.level))
783                         continue;
784
785                 if (!shared) {
786                         tdp_mmu_set_spte(kvm, &iter, 0);
787                         flush = true;
788                 } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
789                         goto retry;
790                 }
791         }
792
793         rcu_read_unlock();
794         return flush;
795 }
796
797 /*
798  * Tears down the mappings for the range of gfns, [start, end), and frees the
799  * non-root pages mapping GFNs strictly within that range. Returns true if
800  * SPTEs have been cleared and a TLB flush is needed before releasing the
801  * MMU lock.
802  */
803 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
804                                  gfn_t end, bool can_yield, bool flush)
805 {
806         struct kvm_mmu_page *root;
807
808         for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
809                 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
810                                       false);
811
812         return flush;
813 }
814
815 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
816 {
817         bool flush = false;
818         int i;
819
820         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
821                 flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush);
822
823         if (flush)
824                 kvm_flush_remote_tlbs(kvm);
825 }
826
827 static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
828                                                   struct kvm_mmu_page *prev_root)
829 {
830         struct kvm_mmu_page *next_root;
831
832         if (prev_root)
833                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
834                                                   &prev_root->link,
835                                                   typeof(*prev_root), link);
836         else
837                 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
838                                                    typeof(*next_root), link);
839
840         while (next_root && !(next_root->role.invalid &&
841                               refcount_read(&next_root->tdp_mmu_root_count)))
842                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
843                                                   &next_root->link,
844                                                   typeof(*next_root), link);
845
846         return next_root;
847 }
848
849 /*
850  * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
851  * invalidated root, they will not be freed until this function drops the
852  * reference. Before dropping that reference, tear down the paging
853  * structure so that whichever thread does drop the last reference
854  * only has to do a trivial amount of work. Since the roots are invalid,
855  * no new SPTEs should be created under them.
856  */
857 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
858 {
859         struct kvm_mmu_page *next_root;
860         struct kvm_mmu_page *root;
861         bool flush = false;
862
863         lockdep_assert_held_read(&kvm->mmu_lock);
864
865         rcu_read_lock();
866
867         root = next_invalidated_root(kvm, NULL);
868
869         while (root) {
870                 next_root = next_invalidated_root(kvm, root);
871
872                 rcu_read_unlock();
873
874                 flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
875
876                 /*
877                  * Put the reference acquired in
878                  * kvm_tdp_mmu_invalidate_roots
879                  */
880                 kvm_tdp_mmu_put_root(kvm, root, true);
881
882                 root = next_root;
883
884                 rcu_read_lock();
885         }
886
887         rcu_read_unlock();
888
889         if (flush)
890                 kvm_flush_remote_tlbs(kvm);
891 }
892
893 /*
894  * Mark each TDP MMU root as invalid so that other threads
895  * will drop their references and allow the root count to
896  * go to 0.
897  *
898  * Also take a reference on all roots so that this thread
899  * can do the bulk of the work required to free the roots
900  * once they are invalidated. Without this reference, a
901  * vCPU thread might drop the last reference to a root and
902  * get stuck with tearing down the entire paging structure.
903  *
904  * Roots which have a zero refcount should be skipped as
905  * they're already being torn down.
906  * Already invalid roots should be referenced again so that
907  * they aren't freed before kvm_tdp_mmu_zap_all_fast is
908  * done with them.
909  *
910  * This has essentially the same effect for the TDP MMU
911  * as updating mmu_valid_gen does for the shadow MMU.
912  */
913 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
914 {
915         struct kvm_mmu_page *root;
916
917         lockdep_assert_held_write(&kvm->mmu_lock);
918         list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
919                 if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
920                         root->role.invalid = true;
921 }
922
923 /*
924  * Installs a last-level SPTE to handle a TDP page fault.
925  * (NPT/EPT violation/misconfiguration)
926  */
927 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
928                                           struct kvm_page_fault *fault,
929                                           struct tdp_iter *iter)
930 {
931         struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
932         u64 new_spte;
933         int ret = RET_PF_FIXED;
934         bool wrprot = false;
935
936         WARN_ON(sp->role.level != fault->goal_level);
937         if (unlikely(!fault->slot))
938                 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
939         else
940                 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
941                                          fault->pfn, iter->old_spte, fault->prefetch, true,
942                                          fault->map_writable, &new_spte);
943
944         if (new_spte == iter->old_spte)
945                 ret = RET_PF_SPURIOUS;
946         else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
947                 return RET_PF_RETRY;
948
949         /*
950          * If the page fault was caused by a write but the page is write
951          * protected, emulation is needed. If the emulation was skipped,
952          * the vCPU would have the same fault again.
953          */
954         if (wrprot) {
955                 if (fault->write)
956                         ret = RET_PF_EMULATE;
957         }
958
959         /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
960         if (unlikely(is_mmio_spte(new_spte))) {
961                 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
962                                      new_spte);
963                 ret = RET_PF_EMULATE;
964         } else {
965                 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
966                                        rcu_dereference(iter->sptep));
967         }
968
969         /*
970          * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
971          * consistent with legacy MMU behavior.
972          */
973         if (ret != RET_PF_SPURIOUS)
974                 vcpu->stat.pf_fixed++;
975
976         return ret;
977 }
978
979 /*
980  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
981  * page tables and SPTEs to translate the faulting guest physical address.
982  */
983 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
984 {
985         struct kvm_mmu *mmu = vcpu->arch.mmu;
986         struct tdp_iter iter;
987         struct kvm_mmu_page *sp;
988         u64 *child_pt;
989         u64 new_spte;
990         int ret;
991
992         kvm_mmu_hugepage_adjust(vcpu, fault);
993
994         trace_kvm_mmu_spte_requested(fault);
995
996         rcu_read_lock();
997
998         tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
999                 if (fault->nx_huge_page_workaround_enabled)
1000                         disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1001
1002                 if (iter.level == fault->goal_level)
1003                         break;
1004
1005                 /*
1006                  * If there is an SPTE mapping a large page at a higher level
1007                  * than the target, that SPTE must be cleared and replaced
1008                  * with a non-leaf SPTE.
1009                  */
1010                 if (is_shadow_present_pte(iter.old_spte) &&
1011                     is_large_pte(iter.old_spte)) {
1012                         if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
1013                                 break;
1014
1015                         /*
1016                          * The iter must explicitly re-read the spte here
1017                          * because the new value informs the !present
1018                          * path below.
1019                          */
1020                         iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1021                 }
1022
1023                 if (!is_shadow_present_pte(iter.old_spte)) {
1024                         /*
1025                          * If SPTE has been frozen by another thread, just
1026                          * give up and retry, avoiding unnecessary page table
1027                          * allocation and free.
1028                          */
1029                         if (is_removed_spte(iter.old_spte))
1030                                 break;
1031
1032                         sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
1033                         child_pt = sp->spt;
1034
1035                         new_spte = make_nonleaf_spte(child_pt,
1036                                                      !shadow_accessed_mask);
1037
1038                         if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) {
1039                                 tdp_mmu_link_page(vcpu->kvm, sp,
1040                                                   fault->huge_page_disallowed &&
1041                                                   fault->req_level >= iter.level);
1042
1043                                 trace_kvm_mmu_get_page(sp, true);
1044                         } else {
1045                                 tdp_mmu_free_sp(sp);
1046                                 break;
1047                         }
1048                 }
1049         }
1050
1051         if (iter.level != fault->goal_level) {
1052                 rcu_read_unlock();
1053                 return RET_PF_RETRY;
1054         }
1055
1056         ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1057         rcu_read_unlock();
1058
1059         return ret;
1060 }
1061
1062 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1063                                  bool flush)
1064 {
1065         return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
1066                                            range->end, range->may_block, flush);
1067 }
1068
1069 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1070                               struct kvm_gfn_range *range);
1071
1072 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1073                                                    struct kvm_gfn_range *range,
1074                                                    tdp_handler_t handler)
1075 {
1076         struct kvm_mmu_page *root;
1077         struct tdp_iter iter;
1078         bool ret = false;
1079
1080         rcu_read_lock();
1081
1082         /*
1083          * Don't support rescheduling, none of the MMU notifiers that funnel
1084          * into this helper allow blocking; it'd be dead, wasteful code.
1085          */
1086         for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1087                 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1088                         ret |= handler(kvm, &iter, range);
1089         }
1090
1091         rcu_read_unlock();
1092
1093         return ret;
1094 }
1095
1096 /*
1097  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1098  * if any of the GFNs in the range have been accessed.
1099  */
1100 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1101                           struct kvm_gfn_range *range)
1102 {
1103         u64 new_spte = 0;
1104
1105         /* If we have a non-accessed entry we don't need to change the pte. */
1106         if (!is_accessed_spte(iter->old_spte))
1107                 return false;
1108
1109         new_spte = iter->old_spte;
1110
1111         if (spte_ad_enabled(new_spte)) {
1112                 new_spte &= ~shadow_accessed_mask;
1113         } else {
1114                 /*
1115                  * Capture the dirty status of the page, so that it doesn't get
1116                  * lost when the SPTE is marked for access tracking.
1117                  */
1118                 if (is_writable_pte(new_spte))
1119                         kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1120
1121                 new_spte = mark_spte_for_access_track(new_spte);
1122         }
1123
1124         tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
1125
1126         return true;
1127 }
1128
1129 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1130 {
1131         return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1132 }
1133
1134 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1135                          struct kvm_gfn_range *range)
1136 {
1137         return is_accessed_spte(iter->old_spte);
1138 }
1139
1140 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1141 {
1142         return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1143 }
1144
1145 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1146                          struct kvm_gfn_range *range)
1147 {
1148         u64 new_spte;
1149
1150         /* Huge pages aren't expected to be modified without first being zapped. */
1151         WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1152
1153         if (iter->level != PG_LEVEL_4K ||
1154             !is_shadow_present_pte(iter->old_spte))
1155                 return false;
1156
1157         /*
1158          * Note, when changing a read-only SPTE, it's not strictly necessary to
1159          * zero the SPTE before setting the new PFN, but doing so preserves the
1160          * invariant that the PFN of a present * leaf SPTE can never change.
1161          * See __handle_changed_spte().
1162          */
1163         tdp_mmu_set_spte(kvm, iter, 0);
1164
1165         if (!pte_write(range->pte)) {
1166                 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1167                                                                   pte_pfn(range->pte));
1168
1169                 tdp_mmu_set_spte(kvm, iter, new_spte);
1170         }
1171
1172         return true;
1173 }
1174
1175 /*
1176  * Handle the changed_pte MMU notifier for the TDP MMU.
1177  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1178  * notifier.
1179  * Returns non-zero if a flush is needed before releasing the MMU lock.
1180  */
1181 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1182 {
1183         bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1184
1185         /* FIXME: return 'flush' instead of flushing here. */
1186         if (flush)
1187                 kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
1188
1189         return false;
1190 }
1191
1192 /*
1193  * Remove write access from all SPTEs at or above min_level that map GFNs
1194  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1195  * be flushed.
1196  */
1197 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1198                              gfn_t start, gfn_t end, int min_level)
1199 {
1200         struct tdp_iter iter;
1201         u64 new_spte;
1202         bool spte_set = false;
1203
1204         rcu_read_lock();
1205
1206         BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1207
1208         for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1209                                    min_level, start, end) {
1210 retry:
1211                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1212                         continue;
1213
1214                 if (!is_shadow_present_pte(iter.old_spte) ||
1215                     !is_last_spte(iter.old_spte, iter.level) ||
1216                     !(iter.old_spte & PT_WRITABLE_MASK))
1217                         continue;
1218
1219                 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1220
1221                 if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1222                         goto retry;
1223
1224                 spte_set = true;
1225         }
1226
1227         rcu_read_unlock();
1228         return spte_set;
1229 }
1230
1231 /*
1232  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1233  * only affect leaf SPTEs down to min_level.
1234  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1235  */
1236 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1237                              const struct kvm_memory_slot *slot, int min_level)
1238 {
1239         struct kvm_mmu_page *root;
1240         bool spte_set = false;
1241
1242         lockdep_assert_held_read(&kvm->mmu_lock);
1243
1244         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1245                 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1246                              slot->base_gfn + slot->npages, min_level);
1247
1248         return spte_set;
1249 }
1250
1251 /*
1252  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1253  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1254  * If AD bits are not enabled, this will require clearing the writable bit on
1255  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1256  * be flushed.
1257  */
1258 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1259                            gfn_t start, gfn_t end)
1260 {
1261         struct tdp_iter iter;
1262         u64 new_spte;
1263         bool spte_set = false;
1264
1265         rcu_read_lock();
1266
1267         tdp_root_for_each_leaf_pte(iter, root, start, end) {
1268 retry:
1269                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1270                         continue;
1271
1272                 if (spte_ad_need_write_protect(iter.old_spte)) {
1273                         if (is_writable_pte(iter.old_spte))
1274                                 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1275                         else
1276                                 continue;
1277                 } else {
1278                         if (iter.old_spte & shadow_dirty_mask)
1279                                 new_spte = iter.old_spte & ~shadow_dirty_mask;
1280                         else
1281                                 continue;
1282                 }
1283
1284                 if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1285                         goto retry;
1286
1287                 spte_set = true;
1288         }
1289
1290         rcu_read_unlock();
1291         return spte_set;
1292 }
1293
1294 /*
1295  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1296  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1297  * If AD bits are not enabled, this will require clearing the writable bit on
1298  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1299  * be flushed.
1300  */
1301 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1302                                   const struct kvm_memory_slot *slot)
1303 {
1304         struct kvm_mmu_page *root;
1305         bool spte_set = false;
1306
1307         lockdep_assert_held_read(&kvm->mmu_lock);
1308
1309         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1310                 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1311                                 slot->base_gfn + slot->npages);
1312
1313         return spte_set;
1314 }
1315
1316 /*
1317  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1318  * set in mask, starting at gfn. The given memslot is expected to contain all
1319  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1320  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1321  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1322  */
1323 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1324                                   gfn_t gfn, unsigned long mask, bool wrprot)
1325 {
1326         struct tdp_iter iter;
1327         u64 new_spte;
1328
1329         rcu_read_lock();
1330
1331         tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1332                                     gfn + BITS_PER_LONG) {
1333                 if (!mask)
1334                         break;
1335
1336                 if (iter.level > PG_LEVEL_4K ||
1337                     !(mask & (1UL << (iter.gfn - gfn))))
1338                         continue;
1339
1340                 mask &= ~(1UL << (iter.gfn - gfn));
1341
1342                 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1343                         if (is_writable_pte(iter.old_spte))
1344                                 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1345                         else
1346                                 continue;
1347                 } else {
1348                         if (iter.old_spte & shadow_dirty_mask)
1349                                 new_spte = iter.old_spte & ~shadow_dirty_mask;
1350                         else
1351                                 continue;
1352                 }
1353
1354                 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1355         }
1356
1357         rcu_read_unlock();
1358 }
1359
1360 /*
1361  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1362  * set in mask, starting at gfn. The given memslot is expected to contain all
1363  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1364  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1365  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1366  */
1367 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1368                                        struct kvm_memory_slot *slot,
1369                                        gfn_t gfn, unsigned long mask,
1370                                        bool wrprot)
1371 {
1372         struct kvm_mmu_page *root;
1373
1374         lockdep_assert_held_write(&kvm->mmu_lock);
1375         for_each_tdp_mmu_root(kvm, root, slot->as_id)
1376                 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1377 }
1378
1379 /*
1380  * Clear leaf entries which could be replaced by large mappings, for
1381  * GFNs within the slot.
1382  */
1383 static void zap_collapsible_spte_range(struct kvm *kvm,
1384                                        struct kvm_mmu_page *root,
1385                                        const struct kvm_memory_slot *slot)
1386 {
1387         gfn_t start = slot->base_gfn;
1388         gfn_t end = start + slot->npages;
1389         struct tdp_iter iter;
1390         kvm_pfn_t pfn;
1391
1392         rcu_read_lock();
1393
1394         tdp_root_for_each_pte(iter, root, start, end) {
1395 retry:
1396                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1397                         continue;
1398
1399                 if (!is_shadow_present_pte(iter.old_spte) ||
1400                     !is_last_spte(iter.old_spte, iter.level))
1401                         continue;
1402
1403                 pfn = spte_to_pfn(iter.old_spte);
1404                 if (kvm_is_reserved_pfn(pfn) ||
1405                     iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
1406                                                             pfn, PG_LEVEL_NUM))
1407                         continue;
1408
1409                 /* Note, a successful atomic zap also does a remote TLB flush. */
1410                 if (!tdp_mmu_zap_spte_atomic(kvm, &iter))
1411                         goto retry;
1412         }
1413
1414         rcu_read_unlock();
1415 }
1416
1417 /*
1418  * Clear non-leaf entries (and free associated page tables) which could
1419  * be replaced by large mappings, for GFNs within the slot.
1420  */
1421 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1422                                        const struct kvm_memory_slot *slot)
1423 {
1424         struct kvm_mmu_page *root;
1425
1426         lockdep_assert_held_read(&kvm->mmu_lock);
1427
1428         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1429                 zap_collapsible_spte_range(kvm, root, slot);
1430 }
1431
1432 /*
1433  * Removes write access on the last level SPTE mapping this GFN and unsets the
1434  * MMU-writable bit to ensure future writes continue to be intercepted.
1435  * Returns true if an SPTE was set and a TLB flush is needed.
1436  */
1437 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1438                               gfn_t gfn, int min_level)
1439 {
1440         struct tdp_iter iter;
1441         u64 new_spte;
1442         bool spte_set = false;
1443
1444         BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1445
1446         rcu_read_lock();
1447
1448         for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1449                                    min_level, gfn, gfn + 1) {
1450                 if (!is_shadow_present_pte(iter.old_spte) ||
1451                     !is_last_spte(iter.old_spte, iter.level))
1452                         continue;
1453
1454                 new_spte = iter.old_spte &
1455                         ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1456
1457                 if (new_spte == iter.old_spte)
1458                         break;
1459
1460                 tdp_mmu_set_spte(kvm, &iter, new_spte);
1461                 spte_set = true;
1462         }
1463
1464         rcu_read_unlock();
1465
1466         return spte_set;
1467 }
1468
1469 /*
1470  * Removes write access on the last level SPTE mapping this GFN and unsets the
1471  * MMU-writable bit to ensure future writes continue to be intercepted.
1472  * Returns true if an SPTE was set and a TLB flush is needed.
1473  */
1474 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1475                                    struct kvm_memory_slot *slot, gfn_t gfn,
1476                                    int min_level)
1477 {
1478         struct kvm_mmu_page *root;
1479         bool spte_set = false;
1480
1481         lockdep_assert_held_write(&kvm->mmu_lock);
1482         for_each_tdp_mmu_root(kvm, root, slot->as_id)
1483                 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1484
1485         return spte_set;
1486 }
1487
1488 /*
1489  * Return the level of the lowest level SPTE added to sptes.
1490  * That SPTE may be non-present.
1491  *
1492  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1493  */
1494 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1495                          int *root_level)
1496 {
1497         struct tdp_iter iter;
1498         struct kvm_mmu *mmu = vcpu->arch.mmu;
1499         gfn_t gfn = addr >> PAGE_SHIFT;
1500         int leaf = -1;
1501
1502         *root_level = vcpu->arch.mmu->shadow_root_level;
1503
1504         tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1505                 leaf = iter.level;
1506                 sptes[leaf] = iter.old_spte;
1507         }
1508
1509         return leaf;
1510 }
1511
1512 /*
1513  * Returns the last level spte pointer of the shadow page walk for the given
1514  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1515  * walk could be performed, returns NULL and *spte does not contain valid data.
1516  *
1517  * Contract:
1518  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1519  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1520  *
1521  * WARNING: This function is only intended to be called during fast_page_fault.
1522  */
1523 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1524                                         u64 *spte)
1525 {
1526         struct tdp_iter iter;
1527         struct kvm_mmu *mmu = vcpu->arch.mmu;
1528         gfn_t gfn = addr >> PAGE_SHIFT;
1529         tdp_ptep_t sptep = NULL;
1530
1531         tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1532                 *spte = iter.old_spte;
1533                 sptep = iter.sptep;
1534         }
1535
1536         /*
1537          * Perform the rcu_dereference to get the raw spte pointer value since
1538          * we are passing it up to fast_page_fault, which is shared with the
1539          * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1540          * annotation.
1541          *
1542          * This is safe since fast_page_fault obeys the contracts of this
1543          * function as well as all TDP MMU contracts around modifying SPTEs
1544          * outside of mmu_lock.
1545          */
1546         return rcu_dereference(sptep);
1547 }