1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_ras.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
24 static pgd_t *boot_hyp_pgd;
25 static pgd_t *hyp_pgd;
26 static pgd_t *merged_hyp_pgd;
27 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
29 static unsigned long hyp_idmap_start;
30 static unsigned long hyp_idmap_end;
31 static phys_addr_t hyp_idmap_vector;
33 static unsigned long io_map_base;
35 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
37 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
38 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
40 static bool is_iomap(unsigned long flags)
42 return flags & KVM_S2PTE_FLAG_IS_IOMAP;
45 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
47 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
51 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
52 * @kvm: pointer to kvm structure.
54 * Interface to HYP function to flush all VM TLB entries
56 void kvm_flush_remote_tlbs(struct kvm *kvm)
58 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
61 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
63 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
67 * D-Cache management functions. They take the page table entries by
68 * value, as they are flushing the cache using the kernel mapping (or
71 static void kvm_flush_dcache_pte(pte_t pte)
73 __kvm_flush_dcache_pte(pte);
76 static void kvm_flush_dcache_pmd(pmd_t pmd)
78 __kvm_flush_dcache_pmd(pmd);
81 static void kvm_flush_dcache_pud(pud_t pud)
83 __kvm_flush_dcache_pud(pud);
86 static bool kvm_is_device_pfn(unsigned long pfn)
88 return !pfn_valid(pfn);
92 * stage2_dissolve_pmd() - clear and flush huge PMD entry
93 * @kvm: pointer to kvm structure.
95 * @pmd: pmd pointer for IPA
97 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
99 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
101 if (!pmd_thp_or_huge(*pmd))
105 kvm_tlb_flush_vmid_ipa(kvm, addr);
106 put_page(virt_to_page(pmd));
110 * stage2_dissolve_pud() - clear and flush huge PUD entry
111 * @kvm: pointer to kvm structure.
113 * @pud: pud pointer for IPA
115 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
117 static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
119 if (!stage2_pud_huge(kvm, *pudp))
122 stage2_pud_clear(kvm, pudp);
123 kvm_tlb_flush_vmid_ipa(kvm, addr);
124 put_page(virt_to_page(pudp));
127 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
132 BUG_ON(max > KVM_NR_MEM_OBJS);
133 if (cache->nobjs >= min)
135 while (cache->nobjs < max) {
136 page = (void *)__get_free_page(GFP_PGTABLE_USER);
139 cache->objects[cache->nobjs++] = page;
144 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
147 free_page((unsigned long)mc->objects[--mc->nobjs]);
150 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
154 BUG_ON(!mc || !mc->nobjs);
155 p = mc->objects[--mc->nobjs];
159 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
161 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
162 stage2_pgd_clear(kvm, pgd);
163 kvm_tlb_flush_vmid_ipa(kvm, addr);
164 stage2_pud_free(kvm, pud_table);
165 put_page(virt_to_page(pgd));
168 static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
170 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
171 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
172 stage2_pud_clear(kvm, pud);
173 kvm_tlb_flush_vmid_ipa(kvm, addr);
174 stage2_pmd_free(kvm, pmd_table);
175 put_page(virt_to_page(pud));
178 static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
180 pte_t *pte_table = pte_offset_kernel(pmd, 0);
181 VM_BUG_ON(pmd_thp_or_huge(*pmd));
183 kvm_tlb_flush_vmid_ipa(kvm, addr);
184 free_page((unsigned long)pte_table);
185 put_page(virt_to_page(pmd));
188 static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
190 WRITE_ONCE(*ptep, new_pte);
194 static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
196 WRITE_ONCE(*pmdp, new_pmd);
200 static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
202 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
205 static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
207 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
211 static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
213 WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
218 * Unmapping vs dcache management:
220 * If a guest maps certain memory pages as uncached, all writes will
221 * bypass the data cache and go directly to RAM. However, the CPUs
222 * can still speculate reads (not writes) and fill cache lines with
225 * Those cache lines will be *clean* cache lines though, so a
226 * clean+invalidate operation is equivalent to an invalidate
227 * operation, because no cache lines are marked dirty.
229 * Those clean cache lines could be filled prior to an uncached write
230 * by the guest, and the cache coherent IO subsystem would therefore
231 * end up writing old data to disk.
233 * This is why right after unmapping a page/section and invalidating
234 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
235 * the IO subsystem will never hit in the cache.
237 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
238 * we then fully enforce cacheability of RAM, no matter what the guest
241 static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
242 phys_addr_t addr, phys_addr_t end)
244 phys_addr_t start_addr = addr;
245 pte_t *pte, *start_pte;
247 start_pte = pte = pte_offset_kernel(pmd, addr);
249 if (!pte_none(*pte)) {
250 pte_t old_pte = *pte;
252 kvm_set_pte(pte, __pte(0));
253 kvm_tlb_flush_vmid_ipa(kvm, addr);
255 /* No need to invalidate the cache for device mappings */
256 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
257 kvm_flush_dcache_pte(old_pte);
259 put_page(virt_to_page(pte));
261 } while (pte++, addr += PAGE_SIZE, addr != end);
263 if (stage2_pte_table_empty(kvm, start_pte))
264 clear_stage2_pmd_entry(kvm, pmd, start_addr);
267 static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
268 phys_addr_t addr, phys_addr_t end)
270 phys_addr_t next, start_addr = addr;
271 pmd_t *pmd, *start_pmd;
273 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
275 next = stage2_pmd_addr_end(kvm, addr, end);
276 if (!pmd_none(*pmd)) {
277 if (pmd_thp_or_huge(*pmd)) {
278 pmd_t old_pmd = *pmd;
281 kvm_tlb_flush_vmid_ipa(kvm, addr);
283 kvm_flush_dcache_pmd(old_pmd);
285 put_page(virt_to_page(pmd));
287 unmap_stage2_ptes(kvm, pmd, addr, next);
290 } while (pmd++, addr = next, addr != end);
292 if (stage2_pmd_table_empty(kvm, start_pmd))
293 clear_stage2_pud_entry(kvm, pud, start_addr);
296 static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
297 phys_addr_t addr, phys_addr_t end)
299 phys_addr_t next, start_addr = addr;
300 pud_t *pud, *start_pud;
302 start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
304 next = stage2_pud_addr_end(kvm, addr, end);
305 if (!stage2_pud_none(kvm, *pud)) {
306 if (stage2_pud_huge(kvm, *pud)) {
307 pud_t old_pud = *pud;
309 stage2_pud_clear(kvm, pud);
310 kvm_tlb_flush_vmid_ipa(kvm, addr);
311 kvm_flush_dcache_pud(old_pud);
312 put_page(virt_to_page(pud));
314 unmap_stage2_pmds(kvm, pud, addr, next);
317 } while (pud++, addr = next, addr != end);
319 if (stage2_pud_table_empty(kvm, start_pud))
320 clear_stage2_pgd_entry(kvm, pgd, start_addr);
324 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
325 * @kvm: The VM pointer
326 * @start: The intermediate physical base address of the range to unmap
327 * @size: The size of the area to unmap
329 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
330 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
331 * destroying the VM), otherwise another faulting VCPU may come in and mess
332 * with things behind our backs.
334 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
337 phys_addr_t addr = start, end = start + size;
340 assert_spin_locked(&kvm->mmu_lock);
341 WARN_ON(size & ~PAGE_MASK);
343 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
346 * Make sure the page table is still active, as another thread
347 * could have possibly freed the page table, while we released
350 if (!READ_ONCE(kvm->arch.pgd))
352 next = stage2_pgd_addr_end(kvm, addr, end);
353 if (!stage2_pgd_none(kvm, *pgd))
354 unmap_stage2_puds(kvm, pgd, addr, next);
356 * If the range is too large, release the kvm->mmu_lock
357 * to prevent starvation and lockup detector warnings.
360 cond_resched_lock(&kvm->mmu_lock);
361 } while (pgd++, addr = next, addr != end);
364 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
365 phys_addr_t addr, phys_addr_t end)
369 pte = pte_offset_kernel(pmd, addr);
371 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
372 kvm_flush_dcache_pte(*pte);
373 } while (pte++, addr += PAGE_SIZE, addr != end);
376 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
377 phys_addr_t addr, phys_addr_t end)
382 pmd = stage2_pmd_offset(kvm, pud, addr);
384 next = stage2_pmd_addr_end(kvm, addr, end);
385 if (!pmd_none(*pmd)) {
386 if (pmd_thp_or_huge(*pmd))
387 kvm_flush_dcache_pmd(*pmd);
389 stage2_flush_ptes(kvm, pmd, addr, next);
391 } while (pmd++, addr = next, addr != end);
394 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
395 phys_addr_t addr, phys_addr_t end)
400 pud = stage2_pud_offset(kvm, pgd, addr);
402 next = stage2_pud_addr_end(kvm, addr, end);
403 if (!stage2_pud_none(kvm, *pud)) {
404 if (stage2_pud_huge(kvm, *pud))
405 kvm_flush_dcache_pud(*pud);
407 stage2_flush_pmds(kvm, pud, addr, next);
409 } while (pud++, addr = next, addr != end);
412 static void stage2_flush_memslot(struct kvm *kvm,
413 struct kvm_memory_slot *memslot)
415 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
416 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
420 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
422 next = stage2_pgd_addr_end(kvm, addr, end);
423 if (!stage2_pgd_none(kvm, *pgd))
424 stage2_flush_puds(kvm, pgd, addr, next);
427 cond_resched_lock(&kvm->mmu_lock);
428 } while (pgd++, addr = next, addr != end);
432 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
433 * @kvm: The struct kvm pointer
435 * Go through the stage 2 page tables and invalidate any cache lines
436 * backing memory already mapped to the VM.
438 static void stage2_flush_vm(struct kvm *kvm)
440 struct kvm_memslots *slots;
441 struct kvm_memory_slot *memslot;
444 idx = srcu_read_lock(&kvm->srcu);
445 spin_lock(&kvm->mmu_lock);
447 slots = kvm_memslots(kvm);
448 kvm_for_each_memslot(memslot, slots)
449 stage2_flush_memslot(kvm, memslot);
451 spin_unlock(&kvm->mmu_lock);
452 srcu_read_unlock(&kvm->srcu, idx);
455 static void clear_hyp_pgd_entry(pgd_t *pgd)
457 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
459 pud_free(NULL, pud_table);
460 put_page(virt_to_page(pgd));
463 static void clear_hyp_pud_entry(pud_t *pud)
465 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
466 VM_BUG_ON(pud_huge(*pud));
468 pmd_free(NULL, pmd_table);
469 put_page(virt_to_page(pud));
472 static void clear_hyp_pmd_entry(pmd_t *pmd)
474 pte_t *pte_table = pte_offset_kernel(pmd, 0);
475 VM_BUG_ON(pmd_thp_or_huge(*pmd));
477 pte_free_kernel(NULL, pte_table);
478 put_page(virt_to_page(pmd));
481 static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
483 pte_t *pte, *start_pte;
485 start_pte = pte = pte_offset_kernel(pmd, addr);
487 if (!pte_none(*pte)) {
488 kvm_set_pte(pte, __pte(0));
489 put_page(virt_to_page(pte));
491 } while (pte++, addr += PAGE_SIZE, addr != end);
493 if (hyp_pte_table_empty(start_pte))
494 clear_hyp_pmd_entry(pmd);
497 static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
500 pmd_t *pmd, *start_pmd;
502 start_pmd = pmd = pmd_offset(pud, addr);
504 next = pmd_addr_end(addr, end);
505 /* Hyp doesn't use huge pmds */
507 unmap_hyp_ptes(pmd, addr, next);
508 } while (pmd++, addr = next, addr != end);
510 if (hyp_pmd_table_empty(start_pmd))
511 clear_hyp_pud_entry(pud);
514 static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
517 pud_t *pud, *start_pud;
519 start_pud = pud = pud_offset(pgd, addr);
521 next = pud_addr_end(addr, end);
522 /* Hyp doesn't use huge puds */
524 unmap_hyp_pmds(pud, addr, next);
525 } while (pud++, addr = next, addr != end);
527 if (hyp_pud_table_empty(start_pud))
528 clear_hyp_pgd_entry(pgd);
531 static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
533 return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
536 static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
537 phys_addr_t start, u64 size)
540 phys_addr_t addr = start, end = start + size;
544 * We don't unmap anything from HYP, except at the hyp tear down.
545 * Hence, we don't have to invalidate the TLBs here.
547 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
549 next = pgd_addr_end(addr, end);
551 unmap_hyp_puds(pgd, addr, next);
552 } while (pgd++, addr = next, addr != end);
555 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
557 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
560 static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
562 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
566 * free_hyp_pgds - free Hyp-mode page tables
568 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
569 * therefore contains either mappings in the kernel memory area (above
570 * PAGE_OFFSET), or device mappings in the idmap range.
572 * boot_hyp_pgd should only map the idmap range, and is only used in
573 * the extended idmap case.
575 void free_hyp_pgds(void)
579 mutex_lock(&kvm_hyp_pgd_mutex);
581 id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
584 /* In case we never called hyp_mmu_init() */
586 io_map_base = hyp_idmap_start;
587 unmap_hyp_idmap_range(id_pgd, io_map_base,
588 hyp_idmap_start + PAGE_SIZE - io_map_base);
592 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
597 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
598 (uintptr_t)high_memory - PAGE_OFFSET);
600 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
603 if (merged_hyp_pgd) {
604 clear_page(merged_hyp_pgd);
605 free_page((unsigned long)merged_hyp_pgd);
606 merged_hyp_pgd = NULL;
609 mutex_unlock(&kvm_hyp_pgd_mutex);
612 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
613 unsigned long end, unsigned long pfn,
621 pte = pte_offset_kernel(pmd, addr);
622 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
623 get_page(virt_to_page(pte));
625 } while (addr += PAGE_SIZE, addr != end);
628 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
629 unsigned long end, unsigned long pfn,
634 unsigned long addr, next;
638 pmd = pmd_offset(pud, addr);
640 BUG_ON(pmd_sect(*pmd));
642 if (pmd_none(*pmd)) {
643 pte = pte_alloc_one_kernel(NULL);
645 kvm_err("Cannot allocate Hyp pte\n");
648 kvm_pmd_populate(pmd, pte);
649 get_page(virt_to_page(pmd));
652 next = pmd_addr_end(addr, end);
654 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
655 pfn += (next - addr) >> PAGE_SHIFT;
656 } while (addr = next, addr != end);
661 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
662 unsigned long end, unsigned long pfn,
667 unsigned long addr, next;
672 pud = pud_offset(pgd, addr);
674 if (pud_none_or_clear_bad(pud)) {
675 pmd = pmd_alloc_one(NULL, addr);
677 kvm_err("Cannot allocate Hyp pmd\n");
680 kvm_pud_populate(pud, pmd);
681 get_page(virt_to_page(pud));
684 next = pud_addr_end(addr, end);
685 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
688 pfn += (next - addr) >> PAGE_SHIFT;
689 } while (addr = next, addr != end);
694 static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
695 unsigned long start, unsigned long end,
696 unsigned long pfn, pgprot_t prot)
700 unsigned long addr, next;
703 mutex_lock(&kvm_hyp_pgd_mutex);
704 addr = start & PAGE_MASK;
705 end = PAGE_ALIGN(end);
707 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
709 if (pgd_none(*pgd)) {
710 pud = pud_alloc_one(NULL, addr);
712 kvm_err("Cannot allocate Hyp pud\n");
716 kvm_pgd_populate(pgd, pud);
717 get_page(virt_to_page(pgd));
720 next = pgd_addr_end(addr, end);
721 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
724 pfn += (next - addr) >> PAGE_SHIFT;
725 } while (addr = next, addr != end);
727 mutex_unlock(&kvm_hyp_pgd_mutex);
731 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
733 if (!is_vmalloc_addr(kaddr)) {
734 BUG_ON(!virt_addr_valid(kaddr));
737 return page_to_phys(vmalloc_to_page(kaddr)) +
738 offset_in_page(kaddr);
743 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
744 * @from: The virtual kernel start address of the range
745 * @to: The virtual kernel end address of the range (exclusive)
746 * @prot: The protection to be applied to this range
748 * The same virtual address as the kernel virtual address is also used
749 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
752 int create_hyp_mappings(void *from, void *to, pgprot_t prot)
754 phys_addr_t phys_addr;
755 unsigned long virt_addr;
756 unsigned long start = kern_hyp_va((unsigned long)from);
757 unsigned long end = kern_hyp_va((unsigned long)to);
759 if (is_kernel_in_hyp_mode())
762 start = start & PAGE_MASK;
763 end = PAGE_ALIGN(end);
765 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
768 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
769 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
770 virt_addr, virt_addr + PAGE_SIZE,
771 __phys_to_pfn(phys_addr),
780 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
781 unsigned long *haddr, pgprot_t prot)
783 pgd_t *pgd = hyp_pgd;
787 mutex_lock(&kvm_hyp_pgd_mutex);
790 * This assumes that we have enough space below the idmap
791 * page to allocate our VAs. If not, the check below will
792 * kick. A potential alternative would be to detect that
793 * overflow and switch to an allocation above the idmap.
795 * The allocated size is always a multiple of PAGE_SIZE.
797 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
798 base = io_map_base - size;
801 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
802 * allocating the new area, as it would indicate we've
803 * overflowed the idmap/IO address range.
805 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
810 mutex_unlock(&kvm_hyp_pgd_mutex);
815 if (__kvm_cpu_uses_extended_idmap())
818 ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
820 __phys_to_pfn(phys_addr), prot);
824 *haddr = base + offset_in_page(phys_addr);
831 * create_hyp_io_mappings - Map IO into both kernel and HYP
832 * @phys_addr: The physical start address which gets mapped
833 * @size: Size of the region being mapped
834 * @kaddr: Kernel VA for this mapping
835 * @haddr: HYP VA for this mapping
837 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
838 void __iomem **kaddr,
839 void __iomem **haddr)
844 *kaddr = ioremap(phys_addr, size);
848 if (is_kernel_in_hyp_mode()) {
853 ret = __create_hyp_private_mapping(phys_addr, size,
854 &addr, PAGE_HYP_DEVICE);
862 *haddr = (void __iomem *)addr;
867 * create_hyp_exec_mappings - Map an executable range into HYP
868 * @phys_addr: The physical start address which gets mapped
869 * @size: Size of the region being mapped
870 * @haddr: HYP VA for this mapping
872 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
878 BUG_ON(is_kernel_in_hyp_mode());
880 ret = __create_hyp_private_mapping(phys_addr, size,
881 &addr, PAGE_HYP_EXEC);
887 *haddr = (void *)addr;
892 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
893 * @kvm: The KVM struct pointer for the VM.
895 * Allocates only the stage-2 HW PGD level table(s) of size defined by
896 * stage2_pgd_size(kvm).
898 * Note we don't need locking here as this is only called when the VM is
899 * created, which can only be done once.
901 int kvm_alloc_stage2_pgd(struct kvm *kvm)
903 phys_addr_t pgd_phys;
906 if (kvm->arch.pgd != NULL) {
907 kvm_err("kvm_arch already initialized?\n");
911 /* Allocate the HW PGD, making sure that each page gets its own refcount */
912 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
916 pgd_phys = virt_to_phys(pgd);
917 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
921 kvm->arch.pgd_phys = pgd_phys;
925 static void stage2_unmap_memslot(struct kvm *kvm,
926 struct kvm_memory_slot *memslot)
928 hva_t hva = memslot->userspace_addr;
929 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
930 phys_addr_t size = PAGE_SIZE * memslot->npages;
931 hva_t reg_end = hva + size;
934 * A memory region could potentially cover multiple VMAs, and any holes
935 * between them, so iterate over all of them to find out if we should
938 * +--------------------------------------------+
939 * +---------------+----------------+ +----------------+
940 * | : VMA 1 | VMA 2 | | VMA 3 : |
941 * +---------------+----------------+ +----------------+
943 * +--------------------------------------------+
946 struct vm_area_struct *vma = find_vma(current->mm, hva);
947 hva_t vm_start, vm_end;
949 if (!vma || vma->vm_start >= reg_end)
953 * Take the intersection of this VMA with the memory region
955 vm_start = max(hva, vma->vm_start);
956 vm_end = min(reg_end, vma->vm_end);
958 if (!(vma->vm_flags & VM_PFNMAP)) {
959 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
960 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
963 } while (hva < reg_end);
967 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
968 * @kvm: The struct kvm pointer
970 * Go through the memregions and unmap any regular RAM
971 * backing memory already mapped to the VM.
973 void stage2_unmap_vm(struct kvm *kvm)
975 struct kvm_memslots *slots;
976 struct kvm_memory_slot *memslot;
979 idx = srcu_read_lock(&kvm->srcu);
980 down_read(¤t->mm->mmap_sem);
981 spin_lock(&kvm->mmu_lock);
983 slots = kvm_memslots(kvm);
984 kvm_for_each_memslot(memslot, slots)
985 stage2_unmap_memslot(kvm, memslot);
987 spin_unlock(&kvm->mmu_lock);
988 up_read(¤t->mm->mmap_sem);
989 srcu_read_unlock(&kvm->srcu, idx);
993 * kvm_free_stage2_pgd - free all stage-2 tables
994 * @kvm: The KVM struct pointer for the VM.
996 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
997 * underlying level-2 and level-3 tables before freeing the actual level-1 table
998 * and setting the struct pointer to NULL.
1000 void kvm_free_stage2_pgd(struct kvm *kvm)
1004 spin_lock(&kvm->mmu_lock);
1005 if (kvm->arch.pgd) {
1006 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
1007 pgd = READ_ONCE(kvm->arch.pgd);
1008 kvm->arch.pgd = NULL;
1009 kvm->arch.pgd_phys = 0;
1011 spin_unlock(&kvm->mmu_lock);
1013 /* Free the HW pgd, one page at a time */
1015 free_pages_exact(pgd, stage2_pgd_size(kvm));
1018 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1024 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1025 if (stage2_pgd_none(kvm, *pgd)) {
1028 pud = mmu_memory_cache_alloc(cache);
1029 stage2_pgd_populate(kvm, pgd, pud);
1030 get_page(virt_to_page(pgd));
1033 return stage2_pud_offset(kvm, pgd, addr);
1036 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1042 pud = stage2_get_pud(kvm, cache, addr);
1043 if (!pud || stage2_pud_huge(kvm, *pud))
1046 if (stage2_pud_none(kvm, *pud)) {
1049 pmd = mmu_memory_cache_alloc(cache);
1050 stage2_pud_populate(kvm, pud, pmd);
1051 get_page(virt_to_page(pud));
1054 return stage2_pmd_offset(kvm, pud, addr);
1057 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1058 *cache, phys_addr_t addr, const pmd_t *new_pmd)
1060 pmd_t *pmd, old_pmd;
1063 pmd = stage2_get_pmd(kvm, cache, addr);
1068 * Multiple vcpus faulting on the same PMD entry, can
1069 * lead to them sequentially updating the PMD with the
1070 * same value. Following the break-before-make
1071 * (pmd_clear() followed by tlb_flush()) process can
1072 * hinder forward progress due to refaults generated
1073 * on missing translations.
1075 * Skip updating the page table if the entry is
1078 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1081 if (pmd_present(old_pmd)) {
1083 * If we already have PTE level mapping for this block,
1084 * we must unmap it to avoid inconsistent TLB state and
1085 * leaking the table page. We could end up in this situation
1086 * if the memory slot was marked for dirty logging and was
1087 * reverted, leaving PTE level mappings for the pages accessed
1088 * during the period. So, unmap the PTE level mapping for this
1089 * block and retry, as we could have released the upper level
1090 * table in the process.
1092 * Normal THP split/merge follows mmu_notifier callbacks and do
1093 * get handled accordingly.
1095 if (!pmd_thp_or_huge(old_pmd)) {
1096 unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
1100 * Mapping in huge pages should only happen through a
1101 * fault. If a page is merged into a transparent huge
1102 * page, the individual subpages of that huge page
1103 * should be unmapped through MMU notifiers before we
1106 * Merging of CompoundPages is not supported; they
1107 * should become splitting first, unmapped, merged,
1108 * and mapped back in on-demand.
1110 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1112 kvm_tlb_flush_vmid_ipa(kvm, addr);
1114 get_page(virt_to_page(pmd));
1117 kvm_set_pmd(pmd, *new_pmd);
1121 static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1122 phys_addr_t addr, const pud_t *new_pudp)
1124 pud_t *pudp, old_pud;
1127 pudp = stage2_get_pud(kvm, cache, addr);
1133 * A large number of vcpus faulting on the same stage 2 entry,
1134 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1135 * Skip updating the page tables if there is no change.
1137 if (pud_val(old_pud) == pud_val(*new_pudp))
1140 if (stage2_pud_present(kvm, old_pud)) {
1142 * If we already have table level mapping for this block, unmap
1143 * the range for this block and retry.
1145 if (!stage2_pud_huge(kvm, old_pud)) {
1146 unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
1150 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
1151 stage2_pud_clear(kvm, pudp);
1152 kvm_tlb_flush_vmid_ipa(kvm, addr);
1154 get_page(virt_to_page(pudp));
1157 kvm_set_pud(pudp, *new_pudp);
1162 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1163 * true if a valid and present leaf-entry is found. A pointer to the
1164 * leaf-entry is returned in the appropriate level variable - pudpp,
1167 static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
1168 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
1178 pudp = stage2_get_pud(kvm, NULL, addr);
1179 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1182 if (stage2_pud_huge(kvm, *pudp)) {
1187 pmdp = stage2_pmd_offset(kvm, pudp, addr);
1188 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1191 if (pmd_thp_or_huge(*pmdp)) {
1196 ptep = pte_offset_kernel(pmdp, addr);
1197 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1204 static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
1211 found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
1216 return kvm_s2pud_exec(pudp);
1218 return kvm_s2pmd_exec(pmdp);
1220 return kvm_s2pte_exec(ptep);
1223 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1224 phys_addr_t addr, const pte_t *new_pte,
1225 unsigned long flags)
1229 pte_t *pte, old_pte;
1230 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1231 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1233 VM_BUG_ON(logging_active && !cache);
1235 /* Create stage-2 page table mapping - Levels 0 and 1 */
1236 pud = stage2_get_pud(kvm, cache, addr);
1239 * Ignore calls from kvm_set_spte_hva for unallocated
1246 * While dirty page logging - dissolve huge PUD, then continue
1247 * on to allocate page.
1250 stage2_dissolve_pud(kvm, addr, pud);
1252 if (stage2_pud_none(kvm, *pud)) {
1254 return 0; /* ignore calls from kvm_set_spte_hva */
1255 pmd = mmu_memory_cache_alloc(cache);
1256 stage2_pud_populate(kvm, pud, pmd);
1257 get_page(virt_to_page(pud));
1260 pmd = stage2_pmd_offset(kvm, pud, addr);
1263 * Ignore calls from kvm_set_spte_hva for unallocated
1270 * While dirty page logging - dissolve huge PMD, then continue on to
1274 stage2_dissolve_pmd(kvm, addr, pmd);
1276 /* Create stage-2 page mappings - Level 2 */
1277 if (pmd_none(*pmd)) {
1279 return 0; /* ignore calls from kvm_set_spte_hva */
1280 pte = mmu_memory_cache_alloc(cache);
1281 kvm_pmd_populate(pmd, pte);
1282 get_page(virt_to_page(pmd));
1285 pte = pte_offset_kernel(pmd, addr);
1287 if (iomap && pte_present(*pte))
1290 /* Create 2nd stage page table mapping - Level 3 */
1292 if (pte_present(old_pte)) {
1293 /* Skip page table update if there is no change */
1294 if (pte_val(old_pte) == pte_val(*new_pte))
1297 kvm_set_pte(pte, __pte(0));
1298 kvm_tlb_flush_vmid_ipa(kvm, addr);
1300 get_page(virt_to_page(pte));
1303 kvm_set_pte(pte, *new_pte);
1307 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1308 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1310 if (pte_young(*pte)) {
1311 *pte = pte_mkold(*pte);
1317 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1319 return __ptep_test_and_clear_young(pte);
1323 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1325 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1328 static int stage2_pudp_test_and_clear_young(pud_t *pud)
1330 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1334 * kvm_phys_addr_ioremap - map a device range to guest IPA
1336 * @kvm: The KVM pointer
1337 * @guest_ipa: The IPA at which to insert the mapping
1338 * @pa: The physical address of the device
1339 * @size: The size of the mapping
1341 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1342 phys_addr_t pa, unsigned long size, bool writable)
1344 phys_addr_t addr, end;
1347 struct kvm_mmu_memory_cache cache = { 0, };
1349 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1350 pfn = __phys_to_pfn(pa);
1352 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1353 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
1356 pte = kvm_s2pte_mkwrite(pte);
1358 ret = mmu_topup_memory_cache(&cache,
1359 kvm_mmu_cache_min_pages(kvm),
1363 spin_lock(&kvm->mmu_lock);
1364 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1365 KVM_S2PTE_FLAG_IS_IOMAP);
1366 spin_unlock(&kvm->mmu_lock);
1374 mmu_free_memory_cache(&cache);
1378 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1380 kvm_pfn_t pfn = *pfnp;
1381 gfn_t gfn = *ipap >> PAGE_SHIFT;
1383 if (kvm_is_transparent_hugepage(pfn)) {
1386 * The address we faulted on is backed by a transparent huge
1387 * page. However, because we map the compound huge page and
1388 * not the individual tail page, we need to transfer the
1389 * refcount to the head page. We have to be careful that the
1390 * THP doesn't start to split while we are adjusting the
1393 * We are sure this doesn't happen, because mmu_notifier_retry
1394 * was successful and we are holding the mmu_lock, so if this
1395 * THP is trying to split, it will be blocked in the mmu
1396 * notifier before touching any of the pages, specifically
1397 * before being able to call __split_huge_page_refcount().
1399 * We can therefore safely transfer the refcount from PG_tail
1400 * to PG_head and switch the pfn from a tail page to the head
1403 mask = PTRS_PER_PMD - 1;
1404 VM_BUG_ON((gfn & mask) != (pfn & mask));
1407 kvm_release_pfn_clean(pfn);
1420 * stage2_wp_ptes - write protect PMD range
1421 * @pmd: pointer to pmd entry
1422 * @addr: range start address
1423 * @end: range end address
1425 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1429 pte = pte_offset_kernel(pmd, addr);
1431 if (!pte_none(*pte)) {
1432 if (!kvm_s2pte_readonly(pte))
1433 kvm_set_s2pte_readonly(pte);
1435 } while (pte++, addr += PAGE_SIZE, addr != end);
1439 * stage2_wp_pmds - write protect PUD range
1440 * kvm: kvm instance for the VM
1441 * @pud: pointer to pud entry
1442 * @addr: range start address
1443 * @end: range end address
1445 static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1446 phys_addr_t addr, phys_addr_t end)
1451 pmd = stage2_pmd_offset(kvm, pud, addr);
1454 next = stage2_pmd_addr_end(kvm, addr, end);
1455 if (!pmd_none(*pmd)) {
1456 if (pmd_thp_or_huge(*pmd)) {
1457 if (!kvm_s2pmd_readonly(pmd))
1458 kvm_set_s2pmd_readonly(pmd);
1460 stage2_wp_ptes(pmd, addr, next);
1463 } while (pmd++, addr = next, addr != end);
1467 * stage2_wp_puds - write protect PGD range
1468 * @pgd: pointer to pgd entry
1469 * @addr: range start address
1470 * @end: range end address
1472 static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1473 phys_addr_t addr, phys_addr_t end)
1478 pud = stage2_pud_offset(kvm, pgd, addr);
1480 next = stage2_pud_addr_end(kvm, addr, end);
1481 if (!stage2_pud_none(kvm, *pud)) {
1482 if (stage2_pud_huge(kvm, *pud)) {
1483 if (!kvm_s2pud_readonly(pud))
1484 kvm_set_s2pud_readonly(pud);
1486 stage2_wp_pmds(kvm, pud, addr, next);
1489 } while (pud++, addr = next, addr != end);
1493 * stage2_wp_range() - write protect stage2 memory region range
1494 * @kvm: The KVM pointer
1495 * @addr: Start address of range
1496 * @end: End address of range
1498 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1503 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1506 * Release kvm_mmu_lock periodically if the memory region is
1507 * large. Otherwise, we may see kernel panics with
1508 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1509 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1510 * will also starve other vCPUs. We have to also make sure
1511 * that the page tables are not freed while we released
1514 cond_resched_lock(&kvm->mmu_lock);
1515 if (!READ_ONCE(kvm->arch.pgd))
1517 next = stage2_pgd_addr_end(kvm, addr, end);
1518 if (stage2_pgd_present(kvm, *pgd))
1519 stage2_wp_puds(kvm, pgd, addr, next);
1520 } while (pgd++, addr = next, addr != end);
1524 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1525 * @kvm: The KVM pointer
1526 * @slot: The memory slot to write protect
1528 * Called to start logging dirty pages after memory region
1529 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1530 * all present PUD, PMD and PTEs are write protected in the memory region.
1531 * Afterwards read of dirty page log can be called.
1533 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1534 * serializing operations for VM memory regions.
1536 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1538 struct kvm_memslots *slots = kvm_memslots(kvm);
1539 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1540 phys_addr_t start, end;
1542 if (WARN_ON_ONCE(!memslot))
1545 start = memslot->base_gfn << PAGE_SHIFT;
1546 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1548 spin_lock(&kvm->mmu_lock);
1549 stage2_wp_range(kvm, start, end);
1550 spin_unlock(&kvm->mmu_lock);
1551 kvm_flush_remote_tlbs(kvm);
1555 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1556 * @kvm: The KVM pointer
1557 * @slot: The memory slot associated with mask
1558 * @gfn_offset: The gfn offset in memory slot
1559 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1560 * slot to be write protected
1562 * Walks bits set in mask write protects the associated pte's. Caller must
1563 * acquire kvm_mmu_lock.
1565 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1566 struct kvm_memory_slot *slot,
1567 gfn_t gfn_offset, unsigned long mask)
1569 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1570 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1571 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1573 stage2_wp_range(kvm, start, end);
1577 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1580 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1581 * enable dirty logging for them.
1583 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1584 struct kvm_memory_slot *slot,
1585 gfn_t gfn_offset, unsigned long mask)
1587 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1590 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1592 __clean_dcache_guest_page(pfn, size);
1595 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1597 __invalidate_icache_guest_page(pfn, size);
1600 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
1602 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1605 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1607 unsigned long map_size)
1610 hva_t uaddr_start, uaddr_end;
1613 size = memslot->npages * PAGE_SIZE;
1615 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1617 uaddr_start = memslot->userspace_addr;
1618 uaddr_end = uaddr_start + size;
1621 * Pages belonging to memslots that don't have the same alignment
1622 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1623 * PMD/PUD entries, because we'll end up mapping the wrong pages.
1625 * Consider a layout like the following:
1627 * memslot->userspace_addr:
1628 * +-----+--------------------+--------------------+---+
1629 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1630 * +-----+--------------------+--------------------+---+
1632 * memslot->base_gfn << PAGE_SIZE:
1633 * +---+--------------------+--------------------+-----+
1634 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1635 * +---+--------------------+--------------------+-----+
1637 * If we create those stage-2 blocks, we'll end up with this incorrect
1643 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1647 * Next, let's make sure we're not trying to map anything not covered
1648 * by the memslot. This means we have to prohibit block size mappings
1649 * for the beginning and end of a non-block aligned and non-block sized
1650 * memory slot (illustrated by the head and tail parts of the
1651 * userspace view above containing pages 'abcde' and 'xyz',
1654 * Note that it doesn't matter if we do the check using the
1655 * userspace_addr or the base_gfn, as both are equally aligned (per
1656 * the check above) and equally sized.
1658 return (hva & ~(map_size - 1)) >= uaddr_start &&
1659 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1662 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1663 struct kvm_memory_slot *memslot, unsigned long hva,
1664 unsigned long fault_status)
1667 bool write_fault, writable, force_pte = false;
1668 bool exec_fault, needs_exec;
1669 unsigned long mmu_seq;
1670 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1671 struct kvm *kvm = vcpu->kvm;
1672 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1673 struct vm_area_struct *vma;
1676 pgprot_t mem_type = PAGE_S2;
1677 bool logging_active = memslot_is_logging(memslot);
1678 unsigned long vma_pagesize, flags = 0;
1680 write_fault = kvm_is_write_fault(vcpu);
1681 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1682 VM_BUG_ON(write_fault && exec_fault);
1684 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1685 kvm_err("Unexpected L2 read permission error\n");
1689 /* Let's check if we will get back a huge page backed by hugetlbfs */
1690 down_read(¤t->mm->mmap_sem);
1691 vma = find_vma_intersection(current->mm, hva, hva + 1);
1692 if (unlikely(!vma)) {
1693 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1694 up_read(¤t->mm->mmap_sem);
1698 if (is_vm_hugetlb_page(vma))
1699 vma_shift = huge_page_shift(hstate_vma(vma));
1701 vma_shift = PAGE_SHIFT;
1703 vma_pagesize = 1ULL << vma_shift;
1704 if (logging_active ||
1705 (vma->vm_flags & VM_PFNMAP) ||
1706 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1708 vma_pagesize = PAGE_SIZE;
1712 * The stage2 has a minimum of 2 level table (For arm64 see
1713 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1714 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1715 * As for PUD huge maps, we must make sure that we have at least
1716 * 3 levels, i.e, PMD is not folded.
1718 if (vma_pagesize == PMD_SIZE ||
1719 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
1720 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1721 up_read(¤t->mm->mmap_sem);
1723 /* We need minimum second+third level pages */
1724 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
1729 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1731 * Ensure the read of mmu_notifier_seq happens before we call
1732 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1733 * the page we just got a reference to gets unmapped before we have a
1734 * chance to grab the mmu_lock, which ensure that if the page gets
1735 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1736 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1737 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1741 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1742 if (pfn == KVM_PFN_ERR_HWPOISON) {
1743 kvm_send_hwpoison_signal(hva, vma_shift);
1746 if (is_error_noslot_pfn(pfn))
1749 if (kvm_is_device_pfn(pfn)) {
1750 mem_type = PAGE_S2_DEVICE;
1751 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1752 } else if (logging_active) {
1754 * Faults on pages in a memslot with logging enabled
1755 * should not be mapped with huge pages (it introduces churn
1756 * and performance degradation), so force a pte mapping.
1758 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1761 * Only actually map the page as writable if this was a write
1768 if (exec_fault && is_iomap(flags))
1771 spin_lock(&kvm->mmu_lock);
1772 if (mmu_notifier_retry(kvm, mmu_seq))
1775 if (vma_pagesize == PAGE_SIZE && !force_pte) {
1777 * Only PMD_SIZE transparent hugepages(THP) are
1778 * currently supported. This code will need to be
1779 * updated to support other THP sizes.
1781 * Make sure the host VA and the guest IPA are sufficiently
1782 * aligned and that the block is contained within the memslot.
1784 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
1785 transparent_hugepage_adjust(&pfn, &fault_ipa))
1786 vma_pagesize = PMD_SIZE;
1790 kvm_set_pfn_dirty(pfn);
1792 if (fault_status != FSC_PERM && !is_iomap(flags))
1793 clean_dcache_guest_page(pfn, vma_pagesize);
1796 invalidate_icache_guest_page(pfn, vma_pagesize);
1799 * If we took an execution fault we have made the
1800 * icache/dcache coherent above and should now let the s2
1801 * mapping be executable.
1803 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1804 * execute permissions, and we preserve whatever we have.
1806 needs_exec = exec_fault ||
1807 (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
1809 if (vma_pagesize == PUD_SIZE) {
1810 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1812 new_pud = kvm_pud_mkhuge(new_pud);
1814 new_pud = kvm_s2pud_mkwrite(new_pud);
1817 new_pud = kvm_s2pud_mkexec(new_pud);
1819 ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
1820 } else if (vma_pagesize == PMD_SIZE) {
1821 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1823 new_pmd = kvm_pmd_mkhuge(new_pmd);
1826 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1829 new_pmd = kvm_s2pmd_mkexec(new_pmd);
1831 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1833 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
1836 new_pte = kvm_s2pte_mkwrite(new_pte);
1837 mark_page_dirty(kvm, gfn);
1841 new_pte = kvm_s2pte_mkexec(new_pte);
1843 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1847 spin_unlock(&kvm->mmu_lock);
1848 kvm_set_pfn_accessed(pfn);
1849 kvm_release_pfn_clean(pfn);
1854 * Resolve the access fault by making the page young again.
1855 * Note that because the faulting entry is guaranteed not to be
1856 * cached in the TLB, we don't need to invalidate anything.
1857 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1858 * so there is no need for atomic (pte|pmd)_mkyoung operations.
1860 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1866 bool pfn_valid = false;
1868 trace_kvm_access_fault(fault_ipa);
1870 spin_lock(&vcpu->kvm->mmu_lock);
1872 if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
1875 if (pud) { /* HugeTLB */
1876 *pud = kvm_s2pud_mkyoung(*pud);
1877 pfn = kvm_pud_pfn(*pud);
1879 } else if (pmd) { /* THP, HugeTLB */
1880 *pmd = pmd_mkyoung(*pmd);
1881 pfn = pmd_pfn(*pmd);
1884 *pte = pte_mkyoung(*pte); /* Just a page... */
1885 pfn = pte_pfn(*pte);
1890 spin_unlock(&vcpu->kvm->mmu_lock);
1892 kvm_set_pfn_accessed(pfn);
1896 * kvm_handle_guest_abort - handles all 2nd stage aborts
1897 * @vcpu: the VCPU pointer
1898 * @run: the kvm_run structure
1900 * Any abort that gets to the host is almost guaranteed to be caused by a
1901 * missing second stage translation table entry, which can mean that either the
1902 * guest simply needs more memory and we must allocate an appropriate page or it
1903 * can mean that the guest tried to access I/O memory, which is emulated by user
1904 * space. The distinction is based on the IPA causing the fault and whether this
1905 * memory region has been registered as standard RAM by user space.
1907 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1909 unsigned long fault_status;
1910 phys_addr_t fault_ipa;
1911 struct kvm_memory_slot *memslot;
1913 bool is_iabt, write_fault, writable;
1917 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1919 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1920 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1922 /* Synchronous External Abort? */
1923 if (kvm_vcpu_dabt_isextabt(vcpu)) {
1925 * For RAS the host kernel may handle this abort.
1926 * There is no need to pass the error into the guest.
1928 if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
1931 if (unlikely(!is_iabt)) {
1932 kvm_inject_vabt(vcpu);
1937 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1938 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1940 /* Check the stage-2 fault is trans. fault or write fault */
1941 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1942 fault_status != FSC_ACCESS) {
1943 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1944 kvm_vcpu_trap_get_class(vcpu),
1945 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1946 (unsigned long)kvm_vcpu_get_hsr(vcpu));
1950 idx = srcu_read_lock(&vcpu->kvm->srcu);
1952 gfn = fault_ipa >> PAGE_SHIFT;
1953 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1954 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1955 write_fault = kvm_is_write_fault(vcpu);
1956 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1958 /* Prefetch Abort on I/O address */
1964 * Check for a cache maintenance operation. Since we
1965 * ended-up here, we know it is outside of any memory
1966 * slot. But we can't find out if that is for a device,
1967 * or if the guest is just being stupid. The only thing
1968 * we know for sure is that this range cannot be cached.
1970 * So let's assume that the guest is just being
1971 * cautious, and skip the instruction.
1973 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1974 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1980 * The IPA is reported as [MAX:12], so we need to
1981 * complement it with the bottom 12 bits from the
1982 * faulting VA. This is always 12 bits, irrespective
1985 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1986 ret = io_mem_abort(vcpu, run, fault_ipa);
1990 /* Userspace should not be able to register out-of-bounds IPAs */
1991 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1993 if (fault_status == FSC_ACCESS) {
1994 handle_access_fault(vcpu, fault_ipa);
1999 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
2003 if (ret == -ENOEXEC) {
2004 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
2008 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2012 static int handle_hva_to_gpa(struct kvm *kvm,
2013 unsigned long start,
2015 int (*handler)(struct kvm *kvm,
2016 gpa_t gpa, u64 size,
2020 struct kvm_memslots *slots;
2021 struct kvm_memory_slot *memslot;
2024 slots = kvm_memslots(kvm);
2026 /* we only care about the pages that the guest sees */
2027 kvm_for_each_memslot(memslot, slots) {
2028 unsigned long hva_start, hva_end;
2031 hva_start = max(start, memslot->userspace_addr);
2032 hva_end = min(end, memslot->userspace_addr +
2033 (memslot->npages << PAGE_SHIFT));
2034 if (hva_start >= hva_end)
2037 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2038 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
2044 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2046 unmap_stage2_range(kvm, gpa, size);
2050 int kvm_unmap_hva_range(struct kvm *kvm,
2051 unsigned long start, unsigned long end)
2056 trace_kvm_unmap_hva_range(start, end);
2057 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2061 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2063 pte_t *pte = (pte_t *)data;
2065 WARN_ON(size != PAGE_SIZE);
2067 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2068 * flag clear because MMU notifiers will have unmapped a huge PMD before
2069 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2070 * therefore stage2_set_pte() never needs to clear out a huge PMD
2071 * through this calling path.
2073 stage2_set_pte(kvm, NULL, gpa, pte, 0);
2078 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
2080 unsigned long end = hva + PAGE_SIZE;
2081 kvm_pfn_t pfn = pte_pfn(pte);
2087 trace_kvm_set_spte_hva(hva);
2090 * We've moved a page around, probably through CoW, so let's treat it
2091 * just like a translation fault and clean the cache to the PoC.
2093 clean_dcache_guest_page(pfn, PAGE_SIZE);
2094 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
2095 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
2100 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2106 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2107 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2111 return stage2_pudp_test_and_clear_young(pud);
2113 return stage2_pmdp_test_and_clear_young(pmd);
2115 return stage2_ptep_test_and_clear_young(pte);
2118 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2124 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2125 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2129 return kvm_s2pud_young(*pud);
2131 return pmd_young(*pmd);
2133 return pte_young(*pte);
2136 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2140 trace_kvm_age_hva(start, end);
2141 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2144 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2148 trace_kvm_test_age_hva(hva);
2149 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
2150 kvm_test_age_hva_handler, NULL);
2153 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2155 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
2158 phys_addr_t kvm_mmu_get_httbr(void)
2160 if (__kvm_cpu_uses_extended_idmap())
2161 return virt_to_phys(merged_hyp_pgd);
2163 return virt_to_phys(hyp_pgd);
2166 phys_addr_t kvm_get_idmap_vector(void)
2168 return hyp_idmap_vector;
2171 static int kvm_map_idmap_text(pgd_t *pgd)
2175 /* Create the idmap in the boot page tables */
2176 err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
2177 hyp_idmap_start, hyp_idmap_end,
2178 __phys_to_pfn(hyp_idmap_start),
2181 kvm_err("Failed to idmap %lx-%lx\n",
2182 hyp_idmap_start, hyp_idmap_end);
2187 int kvm_mmu_init(void)
2191 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
2192 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
2193 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
2194 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
2195 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
2198 * We rely on the linker script to ensure at build time that the HYP
2199 * init code does not cross a page boundary.
2201 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
2203 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2204 kvm_debug("HYP VA range: %lx:%lx\n",
2205 kern_hyp_va(PAGE_OFFSET),
2206 kern_hyp_va((unsigned long)high_memory - 1));
2208 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
2209 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
2210 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
2212 * The idmap page is intersecting with the VA space,
2213 * it is not safe to continue further.
2215 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2220 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
2222 kvm_err("Hyp mode PGD not allocated\n");
2227 if (__kvm_cpu_uses_extended_idmap()) {
2228 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2230 if (!boot_hyp_pgd) {
2231 kvm_err("Hyp boot PGD not allocated\n");
2236 err = kvm_map_idmap_text(boot_hyp_pgd);
2240 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2241 if (!merged_hyp_pgd) {
2242 kvm_err("Failed to allocate extra HYP pgd\n");
2245 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2248 err = kvm_map_idmap_text(hyp_pgd);
2253 io_map_base = hyp_idmap_start;
2260 void kvm_arch_commit_memory_region(struct kvm *kvm,
2261 const struct kvm_userspace_memory_region *mem,
2262 struct kvm_memory_slot *old,
2263 const struct kvm_memory_slot *new,
2264 enum kvm_mr_change change)
2267 * At this point memslot has been committed and there is an
2268 * allocated dirty_bitmap[], dirty pages will be tracked while the
2269 * memory slot is write protected.
2271 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
2272 kvm_mmu_wp_memory_region(kvm, mem->slot);
2275 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2276 struct kvm_memory_slot *memslot,
2277 const struct kvm_userspace_memory_region *mem,
2278 enum kvm_mr_change change)
2280 hva_t hva = mem->userspace_addr;
2281 hva_t reg_end = hva + mem->memory_size;
2282 bool writable = !(mem->flags & KVM_MEM_READONLY);
2285 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2286 change != KVM_MR_FLAGS_ONLY)
2290 * Prevent userspace from creating a memory region outside of the IPA
2291 * space addressable by the KVM guest IPA space.
2293 if (memslot->base_gfn + memslot->npages >=
2294 (kvm_phys_size(kvm) >> PAGE_SHIFT))
2297 down_read(¤t->mm->mmap_sem);
2299 * A memory region could potentially cover multiple VMAs, and any holes
2300 * between them, so iterate over all of them to find out if we can map
2301 * any of them right now.
2303 * +--------------------------------------------+
2304 * +---------------+----------------+ +----------------+
2305 * | : VMA 1 | VMA 2 | | VMA 3 : |
2306 * +---------------+----------------+ +----------------+
2308 * +--------------------------------------------+
2311 struct vm_area_struct *vma = find_vma(current->mm, hva);
2312 hva_t vm_start, vm_end;
2314 if (!vma || vma->vm_start >= reg_end)
2318 * Take the intersection of this VMA with the memory region
2320 vm_start = max(hva, vma->vm_start);
2321 vm_end = min(reg_end, vma->vm_end);
2323 if (vma->vm_flags & VM_PFNMAP) {
2324 gpa_t gpa = mem->guest_phys_addr +
2325 (vm_start - mem->userspace_addr);
2328 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2329 pa += vm_start - vma->vm_start;
2331 /* IO region dirty page logging not allowed */
2332 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2337 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2344 } while (hva < reg_end);
2346 if (change == KVM_MR_FLAGS_ONLY)
2349 spin_lock(&kvm->mmu_lock);
2351 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
2353 stage2_flush_memslot(kvm, memslot);
2354 spin_unlock(&kvm->mmu_lock);
2356 up_read(¤t->mm->mmap_sem);
2360 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
2364 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2368 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2370 kvm_free_stage2_pgd(kvm);
2373 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2374 struct kvm_memory_slot *slot)
2376 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2377 phys_addr_t size = slot->npages << PAGE_SHIFT;
2379 spin_lock(&kvm->mmu_lock);
2380 unmap_stage2_range(kvm, gpa, size);
2381 spin_unlock(&kvm->mmu_lock);
2385 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2388 * - S/W ops are local to a CPU (not broadcast)
2389 * - We have line migration behind our back (speculation)
2390 * - System caches don't support S/W at all (damn!)
2392 * In the face of the above, the best we can do is to try and convert
2393 * S/W ops to VA ops. Because the guest is not allowed to infer the
2394 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2395 * which is a rather good thing for us.
2397 * Also, it is only used when turning caches on/off ("The expected
2398 * usage of the cache maintenance instructions that operate by set/way
2399 * is associated with the cache maintenance instructions associated
2400 * with the powerdown and powerup of caches, if this is required by
2401 * the implementation.").
2403 * We use the following policy:
2405 * - If we trap a S/W operation, we enable VM trapping to detect
2406 * caches being turned on/off, and do a full clean.
2408 * - We flush the caches on both caches being turned on and off.
2410 * - Once the caches are enabled, we stop trapping VM ops.
2412 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2414 unsigned long hcr = *vcpu_hcr(vcpu);
2417 * If this is the first time we do a S/W operation
2418 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2421 * Otherwise, rely on the VM trapping to wait for the MMU +
2422 * Caches to be turned off. At that point, we'll be able to
2423 * clean the caches again.
2425 if (!(hcr & HCR_TVM)) {
2426 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2427 vcpu_has_cache_enabled(vcpu));
2428 stage2_flush_vm(vcpu->kvm);
2429 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2433 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2435 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2438 * If switching the MMU+caches on, need to invalidate the caches.
2439 * If switching it off, need to clean the caches.
2440 * Clean + invalidate does the trick always.
2442 if (now_enabled != was_enabled)
2443 stage2_flush_vm(vcpu->kvm);
2445 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2447 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2449 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);