1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables kernel and guest-mode vCPU access to guest physical
6 * memory with suitable invalidation mechanisms.
8 * Copyright © 2021 Amazon.com, Inc. or its affiliates.
11 * David Woodhouse <dwmw2@infradead.org>
14 #include <linux/kvm_host.h>
15 #include <linux/kvm.h>
16 #include <linux/highmem.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
23 * MMU notifier 'invalidate_range_start' hook.
25 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
26 unsigned long end, bool may_block)
28 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
29 struct gfn_to_pfn_cache *gpc;
30 bool evict_vcpus = false;
32 spin_lock(&kvm->gpc_lock);
33 list_for_each_entry(gpc, &kvm->gpc_list, list) {
34 write_lock_irq(&gpc->lock);
36 /* Only a single page so no need to care about length */
37 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
38 gpc->uhva >= start && gpc->uhva < end) {
42 * If a guest vCPU could be using the physical address,
43 * it needs to be forced out of guest mode.
45 if (gpc->usage & KVM_GUEST_USES_PFN) {
48 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
50 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
53 write_unlock_irq(&gpc->lock);
55 spin_unlock(&kvm->gpc_lock);
59 * KVM needs to ensure the vCPU is fully out of guest context
60 * before allowing the invalidation to continue.
62 unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
66 * If the OOM reaper is active, then all vCPUs should have
67 * been stopped already, so perform the request without
68 * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
71 req &= ~KVM_REQUEST_WAIT;
73 called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
75 WARN_ON_ONCE(called && !may_block);
79 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
81 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
86 if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
89 if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
97 EXPORT_SYMBOL_GPL(kvm_gpc_check);
99 static void *gpc_map(kvm_pfn_t pfn)
102 return kmap(pfn_to_page(pfn));
104 #ifdef CONFIG_HAS_IOMEM
105 return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
111 static void gpc_unmap(kvm_pfn_t pfn, void *khva)
113 /* Unmap the old pfn/page if it was mapped before. */
114 if (is_error_noslot_pfn(pfn) || !khva)
117 if (pfn_valid(pfn)) {
118 kunmap(pfn_to_page(pfn));
122 #ifdef CONFIG_HAS_IOMEM
127 static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
130 * mn_active_invalidate_count acts for all intents and purposes
131 * like mmu_invalidate_in_progress here; but the latter cannot
132 * be used here because the invalidation of caches in the
133 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
136 * Note, it does not matter that mn_active_invalidate_count
137 * is not protected by gpc->lock. It is guaranteed to
138 * be elevated before the mmu_notifier acquires gpc->lock, and
139 * isn't dropped until after mmu_invalidate_seq is updated.
141 if (kvm->mn_active_invalidate_count)
145 * Ensure mn_active_invalidate_count is read before
146 * mmu_invalidate_seq. This pairs with the smp_wmb() in
147 * mmu_notifier_invalidate_range_end() to guarantee either the
148 * old (non-zero) value of mn_active_invalidate_count or the
149 * new (incremented) value of mmu_invalidate_seq is observed.
152 return kvm->mmu_invalidate_seq != mmu_seq;
155 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
157 /* Note, the new page offset may be different than the old! */
158 void *old_khva = gpc->khva - offset_in_page(gpc->khva);
159 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
160 void *new_khva = NULL;
161 unsigned long mmu_seq;
163 lockdep_assert_held(&gpc->refresh_lock);
165 lockdep_assert_held_write(&gpc->lock);
168 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
169 * assets have already been updated and so a concurrent check() from a
170 * different task may not fail the gpa/uhva/generation checks.
175 mmu_seq = gpc->kvm->mmu_invalidate_seq;
178 write_unlock_irq(&gpc->lock);
181 * If the previous iteration "failed" due to an mmu_notifier
182 * event, release the pfn and unmap the kernel virtual address
183 * from the previous attempt. Unmapping might sleep, so this
184 * needs to be done after dropping the lock. Opportunistically
185 * check for resched while the lock isn't held.
187 if (new_pfn != KVM_PFN_ERR_FAULT) {
189 * Keep the mapping if the previous iteration reused
190 * the existing mapping and didn't create a new one.
192 if (new_khva != old_khva)
193 gpc_unmap(new_pfn, new_khva);
195 kvm_release_pfn_clean(new_pfn);
200 /* We always request a writeable mapping */
201 new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
202 if (is_error_noslot_pfn(new_pfn))
206 * Obtain a new kernel mapping if KVM itself will access the
207 * pfn. Note, kmap() and memremap() can both sleep, so this
208 * too must be done outside of gpc->lock!
210 if (gpc->usage & KVM_HOST_USES_PFN) {
211 if (new_pfn == gpc->pfn)
214 new_khva = gpc_map(new_pfn);
217 kvm_release_pfn_clean(new_pfn);
222 write_lock_irq(&gpc->lock);
225 * Other tasks must wait for _this_ refresh to complete before
226 * attempting to refresh.
228 WARN_ON_ONCE(gpc->valid);
229 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
233 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
236 * Put the reference to the _new_ pfn. The pfn is now tracked by the
237 * cache and can be safely migrated, swapped, etc... as the cache will
238 * invalidate any mappings in response to relevant mmu_notifier events.
240 kvm_release_pfn_clean(new_pfn);
245 write_lock_irq(&gpc->lock);
250 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
253 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
254 unsigned long page_offset = gpa & ~PAGE_MASK;
255 bool unmap_old = false;
256 unsigned long old_uhva;
262 * If must fit within a single page. The 'len' argument is
263 * only to enforce that.
265 if (page_offset + len > PAGE_SIZE)
269 * If another task is refreshing the cache, wait for it to complete.
270 * There is no guarantee that concurrent refreshes will see the same
271 * gpa, memslots generation, etc..., so they must be fully serialized.
273 mutex_lock(&gpc->refresh_lock);
275 write_lock_irq(&gpc->lock);
283 old_khva = gpc->khva - offset_in_page(gpc->khva);
284 old_uhva = gpc->uhva;
286 /* If the userspace HVA is invalid, refresh that first */
287 if (gpc->gpa != gpa || gpc->generation != slots->generation ||
288 kvm_is_error_hva(gpc->uhva)) {
289 gfn_t gfn = gpa_to_gfn(gpa);
292 gpc->generation = slots->generation;
293 gpc->memslot = __gfn_to_memslot(slots, gfn);
294 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
296 if (kvm_is_error_hva(gpc->uhva)) {
303 * If the userspace HVA changed or the PFN was already invalid,
304 * drop the lock and do the HVA to PFN lookup again.
306 if (!gpc->valid || old_uhva != gpc->uhva) {
307 ret = hva_to_pfn_retry(gpc);
310 * If the HVA→PFN mapping was already valid, don't unmap it.
311 * But do update gpc->khva because the offset within the page
314 gpc->khva = old_khva + page_offset;
321 * Invalidate the cache and purge the pfn/khva if the refresh failed.
322 * Some/all of the uhva, gpa, and memslot generation info may still be
323 * valid, leave it as is.
327 gpc->pfn = KVM_PFN_ERR_FAULT;
331 /* Detect a pfn change before dropping the lock! */
332 unmap_old = (old_pfn != gpc->pfn);
335 write_unlock_irq(&gpc->lock);
337 mutex_unlock(&gpc->refresh_lock);
340 gpc_unmap(old_pfn, old_khva);
345 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
347 return __kvm_gpc_refresh(gpc, gpc->gpa, len);
349 EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
351 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
352 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
354 WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
355 WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
357 rwlock_init(&gpc->lock);
358 mutex_init(&gpc->refresh_lock);
363 gpc->pfn = KVM_PFN_ERR_FAULT;
364 gpc->uhva = KVM_HVA_ERR_BAD;
366 EXPORT_SYMBOL_GPL(kvm_gpc_init);
368 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
370 struct kvm *kvm = gpc->kvm;
373 if (KVM_BUG_ON(gpc->valid, kvm))
376 spin_lock(&kvm->gpc_lock);
377 list_add(&gpc->list, &kvm->gpc_list);
378 spin_unlock(&kvm->gpc_lock);
381 * Activate the cache after adding it to the list, a concurrent
382 * refresh must not establish a mapping until the cache is
383 * reachable by mmu_notifier events.
385 write_lock_irq(&gpc->lock);
387 write_unlock_irq(&gpc->lock);
389 return __kvm_gpc_refresh(gpc, gpa, len);
391 EXPORT_SYMBOL_GPL(kvm_gpc_activate);
393 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
395 struct kvm *kvm = gpc->kvm;
401 * Deactivate the cache before removing it from the list, KVM
402 * must stall mmu_notifier events until all users go away, i.e.
403 * until gpc->lock is dropped and refresh is guaranteed to fail.
405 write_lock_irq(&gpc->lock);
410 * Leave the GPA => uHVA cache intact, it's protected by the
411 * memslot generation. The PFN lookup needs to be redone every
412 * time as mmu_notifier protection is lost when the cache is
413 * removed from the VM's gpc_list.
415 old_khva = gpc->khva - offset_in_page(gpc->khva);
419 gpc->pfn = KVM_PFN_ERR_FAULT;
420 write_unlock_irq(&gpc->lock);
422 spin_lock(&kvm->gpc_lock);
423 list_del(&gpc->list);
424 spin_unlock(&kvm->gpc_lock);
426 gpc_unmap(old_pfn, old_khva);
429 EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);