1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables kernel and guest-mode vCPU access to guest physical
6 * memory with suitable invalidation mechanisms.
8 * Copyright © 2021 Amazon.com, Inc. or its affiliates.
11 * David Woodhouse <dwmw2@infradead.org>
14 #include <linux/kvm_host.h>
15 #include <linux/kvm.h>
16 #include <linux/highmem.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
23 * MMU notifier 'invalidate_range_start' hook.
25 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
26 unsigned long end, bool may_block)
28 struct gfn_to_pfn_cache *gpc;
30 spin_lock(&kvm->gpc_lock);
31 list_for_each_entry(gpc, &kvm->gpc_list, list) {
32 read_lock_irq(&gpc->lock);
34 /* Only a single page so no need to care about length */
35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
36 gpc->uhva >= start && gpc->uhva < end) {
37 read_unlock_irq(&gpc->lock);
40 * There is a small window here where the cache could
41 * be modified, and invalidation would no longer be
42 * necessary. Hence check again whether invalidation
43 * is still necessary once the write lock has been
47 write_lock_irq(&gpc->lock);
48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
49 gpc->uhva >= start && gpc->uhva < end)
51 write_unlock_irq(&gpc->lock);
55 read_unlock_irq(&gpc->lock);
57 spin_unlock(&kvm->gpc_lock);
60 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
62 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
68 * If the page was cached from a memslot, make sure the memslots have
69 * not been re-configured.
71 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
74 if (kvm_is_error_hva(gpc->uhva))
77 if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
86 static void *gpc_map(kvm_pfn_t pfn)
89 return kmap(pfn_to_page(pfn));
91 #ifdef CONFIG_HAS_IOMEM
92 return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
98 static void gpc_unmap(kvm_pfn_t pfn, void *khva)
100 /* Unmap the old pfn/page if it was mapped before. */
101 if (is_error_noslot_pfn(pfn) || !khva)
104 if (pfn_valid(pfn)) {
105 kunmap(pfn_to_page(pfn));
109 #ifdef CONFIG_HAS_IOMEM
114 static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
117 * mn_active_invalidate_count acts for all intents and purposes
118 * like mmu_invalidate_in_progress here; but the latter cannot
119 * be used here because the invalidation of caches in the
120 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
123 * Note, it does not matter that mn_active_invalidate_count
124 * is not protected by gpc->lock. It is guaranteed to
125 * be elevated before the mmu_notifier acquires gpc->lock, and
126 * isn't dropped until after mmu_invalidate_seq is updated.
128 if (kvm->mn_active_invalidate_count)
132 * Ensure mn_active_invalidate_count is read before
133 * mmu_invalidate_seq. This pairs with the smp_wmb() in
134 * mmu_notifier_invalidate_range_end() to guarantee either the
135 * old (non-zero) value of mn_active_invalidate_count or the
136 * new (incremented) value of mmu_invalidate_seq is observed.
139 return kvm->mmu_invalidate_seq != mmu_seq;
142 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
144 /* Note, the new page offset may be different than the old! */
145 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
146 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
147 void *new_khva = NULL;
148 unsigned long mmu_seq;
150 lockdep_assert_held(&gpc->refresh_lock);
152 lockdep_assert_held_write(&gpc->lock);
155 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
156 * assets have already been updated and so a concurrent check() from a
157 * different task may not fail the gpa/uhva/generation checks.
162 mmu_seq = gpc->kvm->mmu_invalidate_seq;
165 write_unlock_irq(&gpc->lock);
168 * If the previous iteration "failed" due to an mmu_notifier
169 * event, release the pfn and unmap the kernel virtual address
170 * from the previous attempt. Unmapping might sleep, so this
171 * needs to be done after dropping the lock. Opportunistically
172 * check for resched while the lock isn't held.
174 if (new_pfn != KVM_PFN_ERR_FAULT) {
176 * Keep the mapping if the previous iteration reused
177 * the existing mapping and didn't create a new one.
179 if (new_khva != old_khva)
180 gpc_unmap(new_pfn, new_khva);
182 kvm_release_pfn_clean(new_pfn);
187 /* We always request a writeable mapping */
188 new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
189 if (is_error_noslot_pfn(new_pfn))
193 * Obtain a new kernel mapping if KVM itself will access the
194 * pfn. Note, kmap() and memremap() can both sleep, so this
195 * too must be done outside of gpc->lock!
197 if (new_pfn == gpc->pfn)
200 new_khva = gpc_map(new_pfn);
203 kvm_release_pfn_clean(new_pfn);
207 write_lock_irq(&gpc->lock);
210 * Other tasks must wait for _this_ refresh to complete before
211 * attempting to refresh.
213 WARN_ON_ONCE(gpc->valid);
214 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
218 gpc->khva = new_khva + offset_in_page(gpc->uhva);
221 * Put the reference to the _new_ pfn. The pfn is now tracked by the
222 * cache and can be safely migrated, swapped, etc... as the cache will
223 * invalidate any mappings in response to relevant mmu_notifier events.
225 kvm_release_pfn_clean(new_pfn);
230 write_lock_irq(&gpc->lock);
235 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
238 unsigned long page_offset;
239 bool unmap_old = false;
240 unsigned long old_uhva;
242 bool hva_change = false;
246 /* Either gpa or uhva must be valid, but not both */
247 if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
251 * The cached acces must fit within a single page. The 'len' argument
252 * exists only to enforce that.
254 page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
256 if (page_offset + len > PAGE_SIZE)
259 lockdep_assert_held(&gpc->refresh_lock);
261 write_lock_irq(&gpc->lock);
269 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
270 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
272 if (kvm_is_error_gpa(gpa)) {
273 gpc->gpa = INVALID_GPA;
275 gpc->uhva = PAGE_ALIGN_DOWN(uhva);
277 if (gpc->uhva != old_uhva)
280 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
282 if (gpc->gpa != gpa || gpc->generation != slots->generation ||
283 kvm_is_error_hva(gpc->uhva)) {
284 gfn_t gfn = gpa_to_gfn(gpa);
287 gpc->generation = slots->generation;
288 gpc->memslot = __gfn_to_memslot(slots, gfn);
289 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
291 if (kvm_is_error_hva(gpc->uhva)) {
297 * Even if the GPA and/or the memslot generation changed, the
298 * HVA may still be the same.
300 if (gpc->uhva != old_uhva)
303 gpc->uhva = old_uhva;
307 /* Note: the offset must be correct before calling hva_to_pfn_retry() */
308 gpc->uhva += page_offset;
311 * If the userspace HVA changed or the PFN was already invalid,
312 * drop the lock and do the HVA to PFN lookup again.
314 if (!gpc->valid || hva_change) {
315 ret = hva_to_pfn_retry(gpc);
318 * If the HVA→PFN mapping was already valid, don't unmap it.
319 * But do update gpc->khva because the offset within the page
322 gpc->khva = old_khva + page_offset;
329 * Invalidate the cache and purge the pfn/khva if the refresh failed.
330 * Some/all of the uhva, gpa, and memslot generation info may still be
331 * valid, leave it as is.
335 gpc->pfn = KVM_PFN_ERR_FAULT;
339 /* Detect a pfn change before dropping the lock! */
340 unmap_old = (old_pfn != gpc->pfn);
343 write_unlock_irq(&gpc->lock);
346 gpc_unmap(old_pfn, old_khva);
351 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
355 guard(mutex)(&gpc->refresh_lock);
358 * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
359 * or HVA-based, not both. For GPA-based caches, the HVA will be
360 * recomputed during refresh if necessary.
362 uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
364 return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len);
367 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
369 rwlock_init(&gpc->lock);
370 mutex_init(&gpc->refresh_lock);
373 gpc->pfn = KVM_PFN_ERR_FAULT;
374 gpc->gpa = INVALID_GPA;
375 gpc->uhva = KVM_HVA_ERR_BAD;
376 gpc->active = gpc->valid = false;
379 static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
382 struct kvm *kvm = gpc->kvm;
384 guard(mutex)(&gpc->refresh_lock);
387 if (KVM_BUG_ON(gpc->valid, kvm))
390 spin_lock(&kvm->gpc_lock);
391 list_add(&gpc->list, &kvm->gpc_list);
392 spin_unlock(&kvm->gpc_lock);
395 * Activate the cache after adding it to the list, a concurrent
396 * refresh must not establish a mapping until the cache is
397 * reachable by mmu_notifier events.
399 write_lock_irq(&gpc->lock);
401 write_unlock_irq(&gpc->lock);
403 return __kvm_gpc_refresh(gpc, gpa, uhva, len);
406 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
408 return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
411 int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
413 return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
416 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
418 struct kvm *kvm = gpc->kvm;
422 guard(mutex)(&gpc->refresh_lock);
426 * Deactivate the cache before removing it from the list, KVM
427 * must stall mmu_notifier events until all users go away, i.e.
428 * until gpc->lock is dropped and refresh is guaranteed to fail.
430 write_lock_irq(&gpc->lock);
435 * Leave the GPA => uHVA cache intact, it's protected by the
436 * memslot generation. The PFN lookup needs to be redone every
437 * time as mmu_notifier protection is lost when the cache is
438 * removed from the VM's gpc_list.
440 old_khva = gpc->khva - offset_in_page(gpc->khva);
444 gpc->pfn = KVM_PFN_ERR_FAULT;
445 write_unlock_irq(&gpc->lock);
447 spin_lock(&kvm->gpc_lock);
448 list_del(&gpc->list);
449 spin_unlock(&kvm->gpc_lock);
451 gpc_unmap(old_pfn, old_khva);