1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_pgtable.h>
18 #include <asm/kvm_ras.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_emulate.h>
25 static struct kvm_pgtable *hyp_pgtable;
26 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
28 static unsigned long hyp_idmap_start;
29 static unsigned long hyp_idmap_end;
30 static phys_addr_t hyp_idmap_vector;
32 static unsigned long io_map_base;
36 * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
37 * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
38 * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too
39 * long will also starve other vCPUs. We have to also make sure that the page
40 * tables are not freed while we released the lock.
42 static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
44 int (*fn)(struct kvm_pgtable *, u64, u64),
51 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
55 next = stage2_pgd_addr_end(kvm, addr, end);
56 ret = fn(pgt, addr, next - addr);
60 if (resched && next != end)
61 cond_resched_lock(&kvm->mmu_lock);
62 } while (addr = next, addr != end);
67 #define stage2_apply_range_resched(kvm, addr, end, fn) \
68 stage2_apply_range(kvm, addr, end, fn, true)
70 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
72 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
76 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
77 * @kvm: pointer to kvm structure.
79 * Interface to HYP function to flush all VM TLB entries
81 void kvm_flush_remote_tlbs(struct kvm *kvm)
83 ++kvm->stat.generic.remote_tlb_flush_requests;
84 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
87 static bool kvm_is_device_pfn(unsigned long pfn)
89 return !pfn_is_map_memory(pfn);
92 static void *stage2_memcache_zalloc_page(void *arg)
94 struct kvm_mmu_memory_cache *mc = arg;
96 /* Allocated with __GFP_ZERO, so no need to zero */
97 return kvm_mmu_memory_cache_alloc(mc);
100 static void *kvm_host_zalloc_pages_exact(size_t size)
102 return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
105 static void kvm_host_get_page(void *addr)
107 get_page(virt_to_page(addr));
110 static void kvm_host_put_page(void *addr)
112 put_page(virt_to_page(addr));
115 static int kvm_host_page_count(void *addr)
117 return page_count(virt_to_page(addr));
120 static phys_addr_t kvm_host_pa(void *addr)
125 static void *kvm_host_va(phys_addr_t phys)
130 static void clean_dcache_guest_page(void *va, size_t size)
132 __clean_dcache_guest_page(va, size);
135 static void invalidate_icache_guest_page(void *va, size_t size)
137 __invalidate_icache_guest_page(va, size);
141 * Unmapping vs dcache management:
143 * If a guest maps certain memory pages as uncached, all writes will
144 * bypass the data cache and go directly to RAM. However, the CPUs
145 * can still speculate reads (not writes) and fill cache lines with
148 * Those cache lines will be *clean* cache lines though, so a
149 * clean+invalidate operation is equivalent to an invalidate
150 * operation, because no cache lines are marked dirty.
152 * Those clean cache lines could be filled prior to an uncached write
153 * by the guest, and the cache coherent IO subsystem would therefore
154 * end up writing old data to disk.
156 * This is why right after unmapping a page/section and invalidating
157 * the corresponding TLBs, we flush to make sure the IO subsystem will
158 * never hit in the cache.
160 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
161 * we then fully enforce cacheability of RAM, no matter what the guest
165 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
166 * @mmu: The KVM stage-2 MMU pointer
167 * @start: The intermediate physical base address of the range to unmap
168 * @size: The size of the area to unmap
169 * @may_block: Whether or not we are permitted to block
171 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
172 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
173 * destroying the VM), otherwise another faulting VCPU may come in and mess
174 * with things behind our backs.
176 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
179 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
180 phys_addr_t end = start + size;
182 assert_spin_locked(&kvm->mmu_lock);
183 WARN_ON(size & ~PAGE_MASK);
184 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
188 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
190 __unmap_stage2_range(mmu, start, size, true);
193 static void stage2_flush_memslot(struct kvm *kvm,
194 struct kvm_memory_slot *memslot)
196 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
197 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
199 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
203 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
204 * @kvm: The struct kvm pointer
206 * Go through the stage 2 page tables and invalidate any cache lines
207 * backing memory already mapped to the VM.
209 static void stage2_flush_vm(struct kvm *kvm)
211 struct kvm_memslots *slots;
212 struct kvm_memory_slot *memslot;
215 idx = srcu_read_lock(&kvm->srcu);
216 spin_lock(&kvm->mmu_lock);
218 slots = kvm_memslots(kvm);
219 kvm_for_each_memslot(memslot, slots)
220 stage2_flush_memslot(kvm, memslot);
222 spin_unlock(&kvm->mmu_lock);
223 srcu_read_unlock(&kvm->srcu, idx);
227 * free_hyp_pgds - free Hyp-mode page tables
229 void free_hyp_pgds(void)
231 mutex_lock(&kvm_hyp_pgd_mutex);
233 kvm_pgtable_hyp_destroy(hyp_pgtable);
237 mutex_unlock(&kvm_hyp_pgd_mutex);
240 static bool kvm_host_owns_hyp_mappings(void)
242 if (static_branch_likely(&kvm_protected_mode_initialized))
246 * This can happen at boot time when __create_hyp_mappings() is called
247 * after the hyp protection has been enabled, but the static key has
248 * not been flipped yet.
250 if (!hyp_pgtable && is_protected_kvm_enabled())
253 WARN_ON(!hyp_pgtable);
258 static int __create_hyp_mappings(unsigned long start, unsigned long size,
259 unsigned long phys, enum kvm_pgtable_prot prot)
263 if (!kvm_host_owns_hyp_mappings()) {
264 return kvm_call_hyp_nvhe(__pkvm_create_mappings,
265 start, size, phys, prot);
268 mutex_lock(&kvm_hyp_pgd_mutex);
269 err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
270 mutex_unlock(&kvm_hyp_pgd_mutex);
275 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
277 if (!is_vmalloc_addr(kaddr)) {
278 BUG_ON(!virt_addr_valid(kaddr));
281 return page_to_phys(vmalloc_to_page(kaddr)) +
282 offset_in_page(kaddr);
287 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
288 * @from: The virtual kernel start address of the range
289 * @to: The virtual kernel end address of the range (exclusive)
290 * @prot: The protection to be applied to this range
292 * The same virtual address as the kernel virtual address is also used
293 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
296 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
298 phys_addr_t phys_addr;
299 unsigned long virt_addr;
300 unsigned long start = kern_hyp_va((unsigned long)from);
301 unsigned long end = kern_hyp_va((unsigned long)to);
303 if (is_kernel_in_hyp_mode())
306 start = start & PAGE_MASK;
307 end = PAGE_ALIGN(end);
309 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
312 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
313 err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
322 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
323 unsigned long *haddr,
324 enum kvm_pgtable_prot prot)
329 if (!kvm_host_owns_hyp_mappings()) {
330 base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
331 phys_addr, size, prot);
332 if (IS_ERR_OR_NULL((void *)base))
333 return PTR_ERR((void *)base);
339 mutex_lock(&kvm_hyp_pgd_mutex);
342 * This assumes that we have enough space below the idmap
343 * page to allocate our VAs. If not, the check below will
344 * kick. A potential alternative would be to detect that
345 * overflow and switch to an allocation above the idmap.
347 * The allocated size is always a multiple of PAGE_SIZE.
349 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
350 base = io_map_base - size;
353 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
354 * allocating the new area, as it would indicate we've
355 * overflowed the idmap/IO address range.
357 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
362 mutex_unlock(&kvm_hyp_pgd_mutex);
367 ret = __create_hyp_mappings(base, size, phys_addr, prot);
371 *haddr = base + offset_in_page(phys_addr);
377 * create_hyp_io_mappings - Map IO into both kernel and HYP
378 * @phys_addr: The physical start address which gets mapped
379 * @size: Size of the region being mapped
380 * @kaddr: Kernel VA for this mapping
381 * @haddr: HYP VA for this mapping
383 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
384 void __iomem **kaddr,
385 void __iomem **haddr)
390 *kaddr = ioremap(phys_addr, size);
394 if (is_kernel_in_hyp_mode()) {
399 ret = __create_hyp_private_mapping(phys_addr, size,
400 &addr, PAGE_HYP_DEVICE);
408 *haddr = (void __iomem *)addr;
413 * create_hyp_exec_mappings - Map an executable range into HYP
414 * @phys_addr: The physical start address which gets mapped
415 * @size: Size of the region being mapped
416 * @haddr: HYP VA for this mapping
418 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
424 BUG_ON(is_kernel_in_hyp_mode());
426 ret = __create_hyp_private_mapping(phys_addr, size,
427 &addr, PAGE_HYP_EXEC);
433 *haddr = (void *)addr;
437 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
438 .zalloc_page = stage2_memcache_zalloc_page,
439 .zalloc_pages_exact = kvm_host_zalloc_pages_exact,
440 .free_pages_exact = free_pages_exact,
441 .get_page = kvm_host_get_page,
442 .put_page = kvm_host_put_page,
443 .page_count = kvm_host_page_count,
444 .phys_to_virt = kvm_host_va,
445 .virt_to_phys = kvm_host_pa,
446 .dcache_clean_inval_poc = clean_dcache_guest_page,
447 .icache_inval_pou = invalidate_icache_guest_page,
451 * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
452 * @kvm: The pointer to the KVM structure
453 * @mmu: The pointer to the s2 MMU structure
455 * Allocates only the stage-2 HW PGD level table(s).
456 * Note we don't need locking here as this is only called when the VM is
457 * created, which can only be done once.
459 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
462 struct kvm_pgtable *pgt;
464 if (mmu->pgt != NULL) {
465 kvm_err("kvm_arch already initialized?\n");
469 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL);
473 err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
475 goto out_free_pgtable;
477 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
478 if (!mmu->last_vcpu_ran) {
480 goto out_destroy_pgtable;
483 for_each_possible_cpu(cpu)
484 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
486 mmu->arch = &kvm->arch;
488 mmu->pgd_phys = __pa(pgt->pgd);
489 mmu->vmid.vmid_gen = 0;
493 kvm_pgtable_stage2_destroy(pgt);
499 static void stage2_unmap_memslot(struct kvm *kvm,
500 struct kvm_memory_slot *memslot)
502 hva_t hva = memslot->userspace_addr;
503 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
504 phys_addr_t size = PAGE_SIZE * memslot->npages;
505 hva_t reg_end = hva + size;
508 * A memory region could potentially cover multiple VMAs, and any holes
509 * between them, so iterate over all of them to find out if we should
512 * +--------------------------------------------+
513 * +---------------+----------------+ +----------------+
514 * | : VMA 1 | VMA 2 | | VMA 3 : |
515 * +---------------+----------------+ +----------------+
517 * +--------------------------------------------+
520 struct vm_area_struct *vma;
521 hva_t vm_start, vm_end;
523 vma = find_vma_intersection(current->mm, hva, reg_end);
528 * Take the intersection of this VMA with the memory region
530 vm_start = max(hva, vma->vm_start);
531 vm_end = min(reg_end, vma->vm_end);
533 if (!(vma->vm_flags & VM_PFNMAP)) {
534 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
535 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
538 } while (hva < reg_end);
542 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
543 * @kvm: The struct kvm pointer
545 * Go through the memregions and unmap any regular RAM
546 * backing memory already mapped to the VM.
548 void stage2_unmap_vm(struct kvm *kvm)
550 struct kvm_memslots *slots;
551 struct kvm_memory_slot *memslot;
554 idx = srcu_read_lock(&kvm->srcu);
555 mmap_read_lock(current->mm);
556 spin_lock(&kvm->mmu_lock);
558 slots = kvm_memslots(kvm);
559 kvm_for_each_memslot(memslot, slots)
560 stage2_unmap_memslot(kvm, memslot);
562 spin_unlock(&kvm->mmu_lock);
563 mmap_read_unlock(current->mm);
564 srcu_read_unlock(&kvm->srcu, idx);
567 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
569 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
570 struct kvm_pgtable *pgt = NULL;
572 spin_lock(&kvm->mmu_lock);
577 free_percpu(mmu->last_vcpu_ran);
579 spin_unlock(&kvm->mmu_lock);
582 kvm_pgtable_stage2_destroy(pgt);
588 * kvm_phys_addr_ioremap - map a device range to guest IPA
590 * @kvm: The KVM pointer
591 * @guest_ipa: The IPA at which to insert the mapping
592 * @pa: The physical address of the device
593 * @size: The size of the mapping
594 * @writable: Whether or not to create a writable mapping
596 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
597 phys_addr_t pa, unsigned long size, bool writable)
601 struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
602 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
603 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
605 (writable ? KVM_PGTABLE_PROT_W : 0);
607 size += offset_in_page(guest_ipa);
608 guest_ipa &= PAGE_MASK;
610 for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
611 ret = kvm_mmu_topup_memory_cache(&cache,
612 kvm_mmu_cache_min_pages(kvm));
616 spin_lock(&kvm->mmu_lock);
617 ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
619 spin_unlock(&kvm->mmu_lock);
626 kvm_mmu_free_memory_cache(&cache);
631 * stage2_wp_range() - write protect stage2 memory region range
632 * @mmu: The KVM stage-2 MMU pointer
633 * @addr: Start address of range
634 * @end: End address of range
636 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
638 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
639 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
643 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
644 * @kvm: The KVM pointer
645 * @slot: The memory slot to write protect
647 * Called to start logging dirty pages after memory region
648 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
649 * all present PUD, PMD and PTEs are write protected in the memory region.
650 * Afterwards read of dirty page log can be called.
652 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
653 * serializing operations for VM memory regions.
655 static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
657 struct kvm_memslots *slots = kvm_memslots(kvm);
658 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
659 phys_addr_t start, end;
661 if (WARN_ON_ONCE(!memslot))
664 start = memslot->base_gfn << PAGE_SHIFT;
665 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
667 spin_lock(&kvm->mmu_lock);
668 stage2_wp_range(&kvm->arch.mmu, start, end);
669 spin_unlock(&kvm->mmu_lock);
670 kvm_flush_remote_tlbs(kvm);
674 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
675 * @kvm: The KVM pointer
676 * @slot: The memory slot associated with mask
677 * @gfn_offset: The gfn offset in memory slot
678 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
679 * slot to be write protected
681 * Walks bits set in mask write protects the associated pte's. Caller must
682 * acquire kvm_mmu_lock.
684 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
685 struct kvm_memory_slot *slot,
686 gfn_t gfn_offset, unsigned long mask)
688 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
689 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
690 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
692 stage2_wp_range(&kvm->arch.mmu, start, end);
696 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
699 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
700 * enable dirty logging for them.
702 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
703 struct kvm_memory_slot *slot,
704 gfn_t gfn_offset, unsigned long mask)
706 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
709 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
711 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
714 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
716 unsigned long map_size)
719 hva_t uaddr_start, uaddr_end;
722 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
723 if (map_size == PAGE_SIZE)
726 size = memslot->npages * PAGE_SIZE;
728 gpa_start = memslot->base_gfn << PAGE_SHIFT;
730 uaddr_start = memslot->userspace_addr;
731 uaddr_end = uaddr_start + size;
734 * Pages belonging to memslots that don't have the same alignment
735 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
736 * PMD/PUD entries, because we'll end up mapping the wrong pages.
738 * Consider a layout like the following:
740 * memslot->userspace_addr:
741 * +-----+--------------------+--------------------+---+
742 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
743 * +-----+--------------------+--------------------+---+
745 * memslot->base_gfn << PAGE_SHIFT:
746 * +---+--------------------+--------------------+-----+
747 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
748 * +---+--------------------+--------------------+-----+
750 * If we create those stage-2 blocks, we'll end up with this incorrect
756 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
760 * Next, let's make sure we're not trying to map anything not covered
761 * by the memslot. This means we have to prohibit block size mappings
762 * for the beginning and end of a non-block aligned and non-block sized
763 * memory slot (illustrated by the head and tail parts of the
764 * userspace view above containing pages 'abcde' and 'xyz',
767 * Note that it doesn't matter if we do the check using the
768 * userspace_addr or the base_gfn, as both are equally aligned (per
769 * the check above) and equally sized.
771 return (hva & ~(map_size - 1)) >= uaddr_start &&
772 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
776 * Check if the given hva is backed by a transparent huge page (THP) and
777 * whether it can be mapped using block mapping in stage2. If so, adjust
778 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
779 * supported. This will need to be updated to support other THP sizes.
781 * Returns the size of the mapping.
784 transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
785 unsigned long hva, kvm_pfn_t *pfnp,
788 kvm_pfn_t pfn = *pfnp;
791 * Make sure the adjustment is done only for THP pages. Also make
792 * sure that the HVA and IPA are sufficiently aligned and that the
793 * block map is contained within the memslot.
795 if (kvm_is_transparent_hugepage(pfn) &&
796 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
798 * The address we faulted on is backed by a transparent huge
799 * page. However, because we map the compound huge page and
800 * not the individual tail page, we need to transfer the
801 * refcount to the head page. We have to be careful that the
802 * THP doesn't start to split while we are adjusting the
805 * We are sure this doesn't happen, because mmu_notifier_retry
806 * was successful and we are holding the mmu_lock, so if this
807 * THP is trying to split, it will be blocked in the mmu
808 * notifier before touching any of the pages, specifically
809 * before being able to call __split_huge_page_refcount().
811 * We can therefore safely transfer the refcount from PG_tail
812 * to PG_head and switch the pfn from a tail page to the head
816 kvm_release_pfn_clean(pfn);
817 pfn &= ~(PTRS_PER_PMD - 1);
824 /* Use page mapping if we cannot use block mapping. */
828 static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
832 if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
833 return huge_page_shift(hstate_vma(vma));
835 if (!(vma->vm_flags & VM_PFNMAP))
838 VM_BUG_ON(is_vm_hugetlb_page(vma));
840 pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
842 #ifndef __PAGETABLE_PMD_FOLDED
843 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
844 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
845 ALIGN(hva, PUD_SIZE) <= vma->vm_end)
849 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
850 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
851 ALIGN(hva, PMD_SIZE) <= vma->vm_end)
858 * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
859 * able to see the page's tags and therefore they must be initialised first. If
860 * PG_mte_tagged is set, tags have already been initialised.
862 * The race in the test/set of the PG_mte_tagged flag is handled by:
863 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
864 * racing to santise the same page
865 * - mmap_lock protects between a VM faulting a page in and the VMM performing
866 * an mprotect() to add VM_MTE
868 static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
871 unsigned long i, nr_pages = size >> PAGE_SHIFT;
874 if (!kvm_has_mte(kvm))
878 * pfn_to_online_page() is used to reject ZONE_DEVICE pages
879 * that may not support tags.
881 page = pfn_to_online_page(pfn);
886 for (i = 0; i < nr_pages; i++, page++) {
887 if (!test_bit(PG_mte_tagged, &page->flags)) {
888 mte_clear_page_tags(page_address(page));
889 set_bit(PG_mte_tagged, &page->flags);
896 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
897 struct kvm_memory_slot *memslot, unsigned long hva,
898 unsigned long fault_status)
901 bool write_fault, writable, force_pte = false;
905 unsigned long mmu_seq;
906 struct kvm *kvm = vcpu->kvm;
907 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
908 struct vm_area_struct *vma;
912 bool logging_active = memslot_is_logging(memslot);
913 unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
914 unsigned long vma_pagesize, fault_granule;
915 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
916 struct kvm_pgtable *pgt;
918 fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
919 write_fault = kvm_is_write_fault(vcpu);
920 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
921 VM_BUG_ON(write_fault && exec_fault);
923 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
924 kvm_err("Unexpected L2 read permission error\n");
929 * Let's check if we will get back a huge page backed by hugetlbfs, or
930 * get block mapping for device MMIO region.
932 mmap_read_lock(current->mm);
933 vma = vma_lookup(current->mm, hva);
934 if (unlikely(!vma)) {
935 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
936 mmap_read_unlock(current->mm);
941 * logging_active is guaranteed to never be true for VM_PFNMAP
944 if (logging_active) {
946 vma_shift = PAGE_SHIFT;
948 vma_shift = get_vma_page_shift(vma, hva);
951 shared = (vma->vm_flags & VM_SHARED);
954 #ifndef __PAGETABLE_PMD_FOLDED
956 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
961 vma_shift = PMD_SHIFT;
964 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
968 vma_shift = PAGE_SHIFT;
974 WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
977 vma_pagesize = 1UL << vma_shift;
978 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
979 fault_ipa &= ~(vma_pagesize - 1);
981 gfn = fault_ipa >> PAGE_SHIFT;
982 mmap_read_unlock(current->mm);
985 * Permission faults just need to update the existing leaf entry,
986 * and so normally don't require allocations from the memcache. The
987 * only exception to this is when dirty logging is enabled at runtime
988 * and a write fault needs to collapse a block entry into a table.
990 if (fault_status != FSC_PERM || (logging_active && write_fault)) {
991 ret = kvm_mmu_topup_memory_cache(memcache,
992 kvm_mmu_cache_min_pages(kvm));
997 mmu_seq = vcpu->kvm->mmu_notifier_seq;
999 * Ensure the read of mmu_notifier_seq happens before we call
1000 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1001 * the page we just got a reference to gets unmapped before we have a
1002 * chance to grab the mmu_lock, which ensure that if the page gets
1003 * unmapped afterwards, the call to kvm_unmap_gfn will take it away
1004 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1005 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1007 * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
1008 * used to avoid unnecessary overhead introduced to locate the memory
1009 * slot because it's always fixed even @gfn is adjusted for huge pages.
1013 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
1014 write_fault, &writable, NULL);
1015 if (pfn == KVM_PFN_ERR_HWPOISON) {
1016 kvm_send_hwpoison_signal(hva, vma_shift);
1019 if (is_error_noslot_pfn(pfn))
1022 if (kvm_is_device_pfn(pfn)) {
1024 * If the page was identified as device early by looking at
1025 * the VMA flags, vma_pagesize is already representing the
1026 * largest quantity we can map. If instead it was mapped
1027 * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
1028 * and must not be upgraded.
1030 * In both cases, we don't let transparent_hugepage_adjust()
1031 * change things at the last minute.
1034 } else if (logging_active && !write_fault) {
1036 * Only actually map the page as writable if this was a write
1042 if (exec_fault && device)
1045 spin_lock(&kvm->mmu_lock);
1046 pgt = vcpu->arch.hw_mmu->pgt;
1047 if (mmu_notifier_retry(kvm, mmu_seq))
1051 * If we are not forced to use page mapping, check if we are
1052 * backed by a THP and thus use block mapping if possible.
1054 if (vma_pagesize == PAGE_SIZE && !(force_pte || device))
1055 vma_pagesize = transparent_hugepage_adjust(memslot, hva,
1058 if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
1059 /* Check the VMM hasn't introduced a new VM_SHARED VMA */
1061 ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
1069 prot |= KVM_PGTABLE_PROT_W;
1072 prot |= KVM_PGTABLE_PROT_X;
1075 prot |= KVM_PGTABLE_PROT_DEVICE;
1076 else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
1077 prot |= KVM_PGTABLE_PROT_X;
1080 * Under the premise of getting a FSC_PERM fault, we just need to relax
1081 * permissions only if vma_pagesize equals fault_granule. Otherwise,
1082 * kvm_pgtable_stage2_map() should be called to change block size.
1084 if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
1085 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1087 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
1088 __pfn_to_phys(pfn), prot,
1092 /* Mark the page dirty only if the fault is handled successfully */
1093 if (writable && !ret) {
1094 kvm_set_pfn_dirty(pfn);
1095 mark_page_dirty_in_slot(kvm, memslot, gfn);
1099 spin_unlock(&kvm->mmu_lock);
1100 kvm_set_pfn_accessed(pfn);
1101 kvm_release_pfn_clean(pfn);
1102 return ret != -EAGAIN ? ret : 0;
1105 /* Resolve the access fault by making the page young again. */
1106 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1110 struct kvm_s2_mmu *mmu;
1112 trace_kvm_access_fault(fault_ipa);
1114 spin_lock(&vcpu->kvm->mmu_lock);
1115 mmu = vcpu->arch.hw_mmu;
1116 kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
1117 spin_unlock(&vcpu->kvm->mmu_lock);
1121 kvm_set_pfn_accessed(pte_pfn(pte));
1125 * kvm_handle_guest_abort - handles all 2nd stage aborts
1126 * @vcpu: the VCPU pointer
1128 * Any abort that gets to the host is almost guaranteed to be caused by a
1129 * missing second stage translation table entry, which can mean that either the
1130 * guest simply needs more memory and we must allocate an appropriate page or it
1131 * can mean that the guest tried to access I/O memory, which is emulated by user
1132 * space. The distinction is based on the IPA causing the fault and whether this
1133 * memory region has been registered as standard RAM by user space.
1135 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1137 unsigned long fault_status;
1138 phys_addr_t fault_ipa;
1139 struct kvm_memory_slot *memslot;
1141 bool is_iabt, write_fault, writable;
1145 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1147 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1148 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1150 /* Synchronous External Abort? */
1151 if (kvm_vcpu_abt_issea(vcpu)) {
1153 * For RAS the host kernel may handle this abort.
1154 * There is no need to pass the error into the guest.
1156 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1157 kvm_inject_vabt(vcpu);
1162 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
1163 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1165 /* Check the stage-2 fault is trans. fault or write fault */
1166 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1167 fault_status != FSC_ACCESS) {
1168 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1169 kvm_vcpu_trap_get_class(vcpu),
1170 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1171 (unsigned long)kvm_vcpu_get_esr(vcpu));
1175 idx = srcu_read_lock(&vcpu->kvm->srcu);
1177 gfn = fault_ipa >> PAGE_SHIFT;
1178 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1179 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1180 write_fault = kvm_is_write_fault(vcpu);
1181 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1183 * The guest has put either its instructions or its page-tables
1184 * somewhere it shouldn't have. Userspace won't be able to do
1185 * anything about this (there's no syndrome for a start), so
1186 * re-inject the abort back into the guest.
1193 if (kvm_vcpu_abt_iss1tw(vcpu)) {
1194 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1200 * Check for a cache maintenance operation. Since we
1201 * ended-up here, we know it is outside of any memory
1202 * slot. But we can't find out if that is for a device,
1203 * or if the guest is just being stupid. The only thing
1204 * we know for sure is that this range cannot be cached.
1206 * So let's assume that the guest is just being
1207 * cautious, and skip the instruction.
1209 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1216 * The IPA is reported as [MAX:12], so we need to
1217 * complement it with the bottom 12 bits from the
1218 * faulting VA. This is always 12 bits, irrespective
1221 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1222 ret = io_mem_abort(vcpu, fault_ipa);
1226 /* Userspace should not be able to register out-of-bounds IPAs */
1227 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1229 if (fault_status == FSC_ACCESS) {
1230 handle_access_fault(vcpu, fault_ipa);
1235 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1239 if (ret == -ENOEXEC) {
1240 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1244 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1248 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1250 if (!kvm->arch.mmu.pgt)
1253 __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
1254 (range->end - range->start) << PAGE_SHIFT,
1260 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1262 kvm_pfn_t pfn = pte_pfn(range->pte);
1265 if (!kvm->arch.mmu.pgt)
1268 WARN_ON(range->end - range->start != 1);
1270 ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
1275 * We've moved a page around, probably through CoW, so let's treat
1276 * it just like a translation fault and the map handler will clean
1277 * the cache to the PoC.
1279 * The MMU notifiers will have unmapped a huge PMD before calling
1280 * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
1281 * therefore we never need to clear out a huge PMD through this
1282 * calling path and a memcache is not required.
1284 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
1285 PAGE_SIZE, __pfn_to_phys(pfn),
1286 KVM_PGTABLE_PROT_R, NULL);
1291 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1293 u64 size = (range->end - range->start) << PAGE_SHIFT;
1297 if (!kvm->arch.mmu.pgt)
1300 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1302 kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
1303 range->start << PAGE_SHIFT);
1305 return pte_valid(pte) && pte_young(pte);
1308 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1310 if (!kvm->arch.mmu.pgt)
1313 return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
1314 range->start << PAGE_SHIFT);
1317 phys_addr_t kvm_mmu_get_httbr(void)
1319 return __pa(hyp_pgtable->pgd);
1322 phys_addr_t kvm_get_idmap_vector(void)
1324 return hyp_idmap_vector;
1327 static int kvm_map_idmap_text(void)
1329 unsigned long size = hyp_idmap_end - hyp_idmap_start;
1330 int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
1333 kvm_err("Failed to idmap %lx-%lx\n",
1334 hyp_idmap_start, hyp_idmap_end);
1339 static void *kvm_hyp_zalloc_page(void *arg)
1341 return (void *)get_zeroed_page(GFP_KERNEL);
1344 static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
1345 .zalloc_page = kvm_hyp_zalloc_page,
1346 .get_page = kvm_host_get_page,
1347 .put_page = kvm_host_put_page,
1348 .phys_to_virt = kvm_host_va,
1349 .virt_to_phys = kvm_host_pa,
1352 int kvm_mmu_init(u32 *hyp_va_bits)
1356 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
1357 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
1358 hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
1359 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
1360 hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
1363 * We rely on the linker script to ensure at build time that the HYP
1364 * init code does not cross a page boundary.
1366 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1368 *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1369 kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
1370 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1371 kvm_debug("HYP VA range: %lx:%lx\n",
1372 kern_hyp_va(PAGE_OFFSET),
1373 kern_hyp_va((unsigned long)high_memory - 1));
1375 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1376 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
1377 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1379 * The idmap page is intersecting with the VA space,
1380 * it is not safe to continue further.
1382 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1387 hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
1389 kvm_err("Hyp mode page-table not allocated\n");
1394 err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
1396 goto out_free_pgtable;
1398 err = kvm_map_idmap_text();
1400 goto out_destroy_pgtable;
1402 io_map_base = hyp_idmap_start;
1405 out_destroy_pgtable:
1406 kvm_pgtable_hyp_destroy(hyp_pgtable);
1414 void kvm_arch_commit_memory_region(struct kvm *kvm,
1415 const struct kvm_userspace_memory_region *mem,
1416 struct kvm_memory_slot *old,
1417 const struct kvm_memory_slot *new,
1418 enum kvm_mr_change change)
1421 * At this point memslot has been committed and there is an
1422 * allocated dirty_bitmap[], dirty pages will be tracked while the
1423 * memory slot is write protected.
1425 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1427 * If we're with initial-all-set, we don't need to write
1428 * protect any pages because they're all reported as dirty.
1429 * Huge pages and normal pages will be write protect gradually.
1431 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1432 kvm_mmu_wp_memory_region(kvm, mem->slot);
1437 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1438 struct kvm_memory_slot *memslot,
1439 const struct kvm_userspace_memory_region *mem,
1440 enum kvm_mr_change change)
1442 hva_t hva = mem->userspace_addr;
1443 hva_t reg_end = hva + mem->memory_size;
1446 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1447 change != KVM_MR_FLAGS_ONLY)
1451 * Prevent userspace from creating a memory region outside of the IPA
1452 * space addressable by the KVM guest IPA space.
1454 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
1457 mmap_read_lock(current->mm);
1459 * A memory region could potentially cover multiple VMAs, and any holes
1460 * between them, so iterate over all of them.
1462 * +--------------------------------------------+
1463 * +---------------+----------------+ +----------------+
1464 * | : VMA 1 | VMA 2 | | VMA 3 : |
1465 * +---------------+----------------+ +----------------+
1467 * +--------------------------------------------+
1470 struct vm_area_struct *vma;
1472 vma = find_vma_intersection(current->mm, hva, reg_end);
1477 * VM_SHARED mappings are not allowed with MTE to avoid races
1478 * when updating the PG_mte_tagged page flag, see
1479 * sanitise_mte_tags for more details.
1481 if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
1484 if (vma->vm_flags & VM_PFNMAP) {
1485 /* IO region dirty page logging not allowed */
1486 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1491 hva = min(reg_end, vma->vm_end);
1492 } while (hva < reg_end);
1494 mmap_read_unlock(current->mm);
1498 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1502 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
1506 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1508 kvm_free_stage2_pgd(&kvm->arch.mmu);
1511 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1512 struct kvm_memory_slot *slot)
1514 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1515 phys_addr_t size = slot->npages << PAGE_SHIFT;
1517 spin_lock(&kvm->mmu_lock);
1518 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
1519 spin_unlock(&kvm->mmu_lock);
1523 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1526 * - S/W ops are local to a CPU (not broadcast)
1527 * - We have line migration behind our back (speculation)
1528 * - System caches don't support S/W at all (damn!)
1530 * In the face of the above, the best we can do is to try and convert
1531 * S/W ops to VA ops. Because the guest is not allowed to infer the
1532 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1533 * which is a rather good thing for us.
1535 * Also, it is only used when turning caches on/off ("The expected
1536 * usage of the cache maintenance instructions that operate by set/way
1537 * is associated with the cache maintenance instructions associated
1538 * with the powerdown and powerup of caches, if this is required by
1539 * the implementation.").
1541 * We use the following policy:
1543 * - If we trap a S/W operation, we enable VM trapping to detect
1544 * caches being turned on/off, and do a full clean.
1546 * - We flush the caches on both caches being turned on and off.
1548 * - Once the caches are enabled, we stop trapping VM ops.
1550 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1552 unsigned long hcr = *vcpu_hcr(vcpu);
1555 * If this is the first time we do a S/W operation
1556 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1559 * Otherwise, rely on the VM trapping to wait for the MMU +
1560 * Caches to be turned off. At that point, we'll be able to
1561 * clean the caches again.
1563 if (!(hcr & HCR_TVM)) {
1564 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1565 vcpu_has_cache_enabled(vcpu));
1566 stage2_flush_vm(vcpu->kvm);
1567 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
1571 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
1573 bool now_enabled = vcpu_has_cache_enabled(vcpu);
1576 * If switching the MMU+caches on, need to invalidate the caches.
1577 * If switching it off, need to clean the caches.
1578 * Clean + invalidate does the trick always.
1580 if (now_enabled != was_enabled)
1581 stage2_flush_vm(vcpu->kvm);
1583 /* Caches are now on, stop trapping VM ops (until a S/W op) */
1585 *vcpu_hcr(vcpu) &= ~HCR_TVM;
1587 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);