1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_pgtable.h>
18 #include <asm/kvm_ras.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_emulate.h>
25 static struct kvm_pgtable *hyp_pgtable;
26 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
28 static unsigned long hyp_idmap_start;
29 static unsigned long hyp_idmap_end;
30 static phys_addr_t hyp_idmap_vector;
32 static unsigned long io_map_base;
34 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
35 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
37 static bool is_iomap(unsigned long flags)
39 return flags & KVM_S2PTE_FLAG_IS_IOMAP;
42 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
44 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
48 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
49 * @kvm: pointer to kvm structure.
51 * Interface to HYP function to flush all VM TLB entries
53 void kvm_flush_remote_tlbs(struct kvm *kvm)
55 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
58 static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
61 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, level);
65 * D-Cache management functions. They take the page table entries by
66 * value, as they are flushing the cache using the kernel mapping (or
69 static void kvm_flush_dcache_pte(pte_t pte)
71 __kvm_flush_dcache_pte(pte);
74 static void kvm_flush_dcache_pmd(pmd_t pmd)
76 __kvm_flush_dcache_pmd(pmd);
79 static void kvm_flush_dcache_pud(pud_t pud)
81 __kvm_flush_dcache_pud(pud);
84 static bool kvm_is_device_pfn(unsigned long pfn)
86 return !pfn_valid(pfn);
90 * stage2_dissolve_pmd() - clear and flush huge PMD entry
91 * @mmu: pointer to mmu structure to operate on
93 * @pmd: pmd pointer for IPA
95 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
97 static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t *pmd)
99 if (!pmd_thp_or_huge(*pmd))
103 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
104 put_page(virt_to_page(pmd));
108 * stage2_dissolve_pud() - clear and flush huge PUD entry
109 * @mmu: pointer to mmu structure to operate on
111 * @pud: pud pointer for IPA
113 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
115 static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t *pudp)
117 struct kvm *kvm = mmu->kvm;
119 if (!stage2_pud_huge(kvm, *pudp))
122 stage2_pud_clear(kvm, pudp);
123 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
124 put_page(virt_to_page(pudp));
127 static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr_t addr)
129 struct kvm *kvm = mmu->kvm;
130 p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
131 stage2_pgd_clear(kvm, pgd);
132 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
133 stage2_p4d_free(kvm, p4d_table);
134 put_page(virt_to_page(pgd));
137 static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr_t addr)
139 struct kvm *kvm = mmu->kvm;
140 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
141 stage2_p4d_clear(kvm, p4d);
142 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
143 stage2_pud_free(kvm, pud_table);
144 put_page(virt_to_page(p4d));
147 static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr_t addr)
149 struct kvm *kvm = mmu->kvm;
150 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
152 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
153 stage2_pud_clear(kvm, pud);
154 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
155 stage2_pmd_free(kvm, pmd_table);
156 put_page(virt_to_page(pud));
159 static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr)
161 pte_t *pte_table = pte_offset_kernel(pmd, 0);
162 VM_BUG_ON(pmd_thp_or_huge(*pmd));
164 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
165 free_page((unsigned long)pte_table);
166 put_page(virt_to_page(pmd));
169 static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
171 WRITE_ONCE(*ptep, new_pte);
175 static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
177 WRITE_ONCE(*pmdp, new_pmd);
181 static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
183 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
186 static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
188 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
192 static inline void kvm_p4d_populate(p4d_t *p4dp, pud_t *pudp)
194 WRITE_ONCE(*p4dp, kvm_mk_p4d(pudp));
198 static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp)
200 #ifndef __PAGETABLE_P4D_FOLDED
201 WRITE_ONCE(*pgdp, kvm_mk_pgd(p4dp));
207 * Unmapping vs dcache management:
209 * If a guest maps certain memory pages as uncached, all writes will
210 * bypass the data cache and go directly to RAM. However, the CPUs
211 * can still speculate reads (not writes) and fill cache lines with
214 * Those cache lines will be *clean* cache lines though, so a
215 * clean+invalidate operation is equivalent to an invalidate
216 * operation, because no cache lines are marked dirty.
218 * Those clean cache lines could be filled prior to an uncached write
219 * by the guest, and the cache coherent IO subsystem would therefore
220 * end up writing old data to disk.
222 * This is why right after unmapping a page/section and invalidating
223 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
224 * the IO subsystem will never hit in the cache.
226 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
227 * we then fully enforce cacheability of RAM, no matter what the guest
230 static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
231 phys_addr_t addr, phys_addr_t end)
233 phys_addr_t start_addr = addr;
234 pte_t *pte, *start_pte;
236 start_pte = pte = pte_offset_kernel(pmd, addr);
238 if (!pte_none(*pte)) {
239 pte_t old_pte = *pte;
241 kvm_set_pte(pte, __pte(0));
242 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
244 /* No need to invalidate the cache for device mappings */
245 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
246 kvm_flush_dcache_pte(old_pte);
248 put_page(virt_to_page(pte));
250 } while (pte++, addr += PAGE_SIZE, addr != end);
252 if (stage2_pte_table_empty(mmu->kvm, start_pte))
253 clear_stage2_pmd_entry(mmu, pmd, start_addr);
256 static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
257 phys_addr_t addr, phys_addr_t end)
259 struct kvm *kvm = mmu->kvm;
260 phys_addr_t next, start_addr = addr;
261 pmd_t *pmd, *start_pmd;
263 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
265 next = stage2_pmd_addr_end(kvm, addr, end);
266 if (!pmd_none(*pmd)) {
267 if (pmd_thp_or_huge(*pmd)) {
268 pmd_t old_pmd = *pmd;
271 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
273 kvm_flush_dcache_pmd(old_pmd);
275 put_page(virt_to_page(pmd));
277 unmap_stage2_ptes(mmu, pmd, addr, next);
280 } while (pmd++, addr = next, addr != end);
282 if (stage2_pmd_table_empty(kvm, start_pmd))
283 clear_stage2_pud_entry(mmu, pud, start_addr);
286 static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
287 phys_addr_t addr, phys_addr_t end)
289 struct kvm *kvm = mmu->kvm;
290 phys_addr_t next, start_addr = addr;
291 pud_t *pud, *start_pud;
293 start_pud = pud = stage2_pud_offset(kvm, p4d, addr);
295 next = stage2_pud_addr_end(kvm, addr, end);
296 if (!stage2_pud_none(kvm, *pud)) {
297 if (stage2_pud_huge(kvm, *pud)) {
298 pud_t old_pud = *pud;
300 stage2_pud_clear(kvm, pud);
301 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
302 kvm_flush_dcache_pud(old_pud);
303 put_page(virt_to_page(pud));
305 unmap_stage2_pmds(mmu, pud, addr, next);
308 } while (pud++, addr = next, addr != end);
310 if (stage2_pud_table_empty(kvm, start_pud))
311 clear_stage2_p4d_entry(mmu, p4d, start_addr);
314 static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
315 phys_addr_t addr, phys_addr_t end)
317 struct kvm *kvm = mmu->kvm;
318 phys_addr_t next, start_addr = addr;
319 p4d_t *p4d, *start_p4d;
321 start_p4d = p4d = stage2_p4d_offset(kvm, pgd, addr);
323 next = stage2_p4d_addr_end(kvm, addr, end);
324 if (!stage2_p4d_none(kvm, *p4d))
325 unmap_stage2_puds(mmu, p4d, addr, next);
326 } while (p4d++, addr = next, addr != end);
328 if (stage2_p4d_table_empty(kvm, start_p4d))
329 clear_stage2_pgd_entry(mmu, pgd, start_addr);
333 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
334 * @kvm: The VM pointer
335 * @start: The intermediate physical base address of the range to unmap
336 * @size: The size of the area to unmap
338 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
339 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
340 * destroying the VM), otherwise another faulting VCPU may come in and mess
341 * with things behind our backs.
343 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
346 struct kvm *kvm = mmu->kvm;
348 phys_addr_t addr = start, end = start + size;
351 assert_spin_locked(&kvm->mmu_lock);
352 WARN_ON(size & ~PAGE_MASK);
354 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
357 * Make sure the page table is still active, as another thread
358 * could have possibly freed the page table, while we released
361 if (!READ_ONCE(mmu->pgd))
363 next = stage2_pgd_addr_end(kvm, addr, end);
364 if (!stage2_pgd_none(kvm, *pgd))
365 unmap_stage2_p4ds(mmu, pgd, addr, next);
367 * If the range is too large, release the kvm->mmu_lock
368 * to prevent starvation and lockup detector warnings.
370 if (may_block && next != end)
371 cond_resched_lock(&kvm->mmu_lock);
372 } while (pgd++, addr = next, addr != end);
375 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
377 __unmap_stage2_range(mmu, start, size, true);
380 static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
381 phys_addr_t addr, phys_addr_t end)
385 pte = pte_offset_kernel(pmd, addr);
387 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
388 kvm_flush_dcache_pte(*pte);
389 } while (pte++, addr += PAGE_SIZE, addr != end);
392 static void stage2_flush_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
393 phys_addr_t addr, phys_addr_t end)
395 struct kvm *kvm = mmu->kvm;
399 pmd = stage2_pmd_offset(kvm, pud, addr);
401 next = stage2_pmd_addr_end(kvm, addr, end);
402 if (!pmd_none(*pmd)) {
403 if (pmd_thp_or_huge(*pmd))
404 kvm_flush_dcache_pmd(*pmd);
406 stage2_flush_ptes(mmu, pmd, addr, next);
408 } while (pmd++, addr = next, addr != end);
411 static void stage2_flush_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
412 phys_addr_t addr, phys_addr_t end)
414 struct kvm *kvm = mmu->kvm;
418 pud = stage2_pud_offset(kvm, p4d, addr);
420 next = stage2_pud_addr_end(kvm, addr, end);
421 if (!stage2_pud_none(kvm, *pud)) {
422 if (stage2_pud_huge(kvm, *pud))
423 kvm_flush_dcache_pud(*pud);
425 stage2_flush_pmds(mmu, pud, addr, next);
427 } while (pud++, addr = next, addr != end);
430 static void stage2_flush_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
431 phys_addr_t addr, phys_addr_t end)
433 struct kvm *kvm = mmu->kvm;
437 p4d = stage2_p4d_offset(kvm, pgd, addr);
439 next = stage2_p4d_addr_end(kvm, addr, end);
440 if (!stage2_p4d_none(kvm, *p4d))
441 stage2_flush_puds(mmu, p4d, addr, next);
442 } while (p4d++, addr = next, addr != end);
445 static void stage2_flush_memslot(struct kvm *kvm,
446 struct kvm_memory_slot *memslot)
448 struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
449 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
450 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
454 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
456 next = stage2_pgd_addr_end(kvm, addr, end);
457 if (!stage2_pgd_none(kvm, *pgd))
458 stage2_flush_p4ds(mmu, pgd, addr, next);
461 cond_resched_lock(&kvm->mmu_lock);
462 } while (pgd++, addr = next, addr != end);
466 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
467 * @kvm: The struct kvm pointer
469 * Go through the stage 2 page tables and invalidate any cache lines
470 * backing memory already mapped to the VM.
472 static void stage2_flush_vm(struct kvm *kvm)
474 struct kvm_memslots *slots;
475 struct kvm_memory_slot *memslot;
478 idx = srcu_read_lock(&kvm->srcu);
479 spin_lock(&kvm->mmu_lock);
481 slots = kvm_memslots(kvm);
482 kvm_for_each_memslot(memslot, slots)
483 stage2_flush_memslot(kvm, memslot);
485 spin_unlock(&kvm->mmu_lock);
486 srcu_read_unlock(&kvm->srcu, idx);
490 * free_hyp_pgds - free Hyp-mode page tables
492 void free_hyp_pgds(void)
494 mutex_lock(&kvm_hyp_pgd_mutex);
496 kvm_pgtable_hyp_destroy(hyp_pgtable);
499 mutex_unlock(&kvm_hyp_pgd_mutex);
502 static int __create_hyp_mappings(unsigned long start, unsigned long size,
503 unsigned long phys, enum kvm_pgtable_prot prot)
507 mutex_lock(&kvm_hyp_pgd_mutex);
508 err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
509 mutex_unlock(&kvm_hyp_pgd_mutex);
514 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
516 if (!is_vmalloc_addr(kaddr)) {
517 BUG_ON(!virt_addr_valid(kaddr));
520 return page_to_phys(vmalloc_to_page(kaddr)) +
521 offset_in_page(kaddr);
526 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
527 * @from: The virtual kernel start address of the range
528 * @to: The virtual kernel end address of the range (exclusive)
529 * @prot: The protection to be applied to this range
531 * The same virtual address as the kernel virtual address is also used
532 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
535 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
537 phys_addr_t phys_addr;
538 unsigned long virt_addr;
539 unsigned long start = kern_hyp_va((unsigned long)from);
540 unsigned long end = kern_hyp_va((unsigned long)to);
542 if (is_kernel_in_hyp_mode())
545 start = start & PAGE_MASK;
546 end = PAGE_ALIGN(end);
548 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
551 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
552 err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
561 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
562 unsigned long *haddr,
563 enum kvm_pgtable_prot prot)
568 mutex_lock(&kvm_hyp_pgd_mutex);
571 * This assumes that we have enough space below the idmap
572 * page to allocate our VAs. If not, the check below will
573 * kick. A potential alternative would be to detect that
574 * overflow and switch to an allocation above the idmap.
576 * The allocated size is always a multiple of PAGE_SIZE.
578 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
579 base = io_map_base - size;
582 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
583 * allocating the new area, as it would indicate we've
584 * overflowed the idmap/IO address range.
586 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
591 mutex_unlock(&kvm_hyp_pgd_mutex);
596 ret = __create_hyp_mappings(base, size, phys_addr, prot);
600 *haddr = base + offset_in_page(phys_addr);
606 * create_hyp_io_mappings - Map IO into both kernel and HYP
607 * @phys_addr: The physical start address which gets mapped
608 * @size: Size of the region being mapped
609 * @kaddr: Kernel VA for this mapping
610 * @haddr: HYP VA for this mapping
612 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
613 void __iomem **kaddr,
614 void __iomem **haddr)
619 *kaddr = ioremap(phys_addr, size);
623 if (is_kernel_in_hyp_mode()) {
628 ret = __create_hyp_private_mapping(phys_addr, size,
629 &addr, PAGE_HYP_DEVICE);
637 *haddr = (void __iomem *)addr;
642 * create_hyp_exec_mappings - Map an executable range into HYP
643 * @phys_addr: The physical start address which gets mapped
644 * @size: Size of the region being mapped
645 * @haddr: HYP VA for this mapping
647 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
653 BUG_ON(is_kernel_in_hyp_mode());
655 ret = __create_hyp_private_mapping(phys_addr, size,
656 &addr, PAGE_HYP_EXEC);
662 *haddr = (void *)addr;
667 * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
668 * @kvm: The pointer to the KVM structure
669 * @mmu: The pointer to the s2 MMU structure
671 * Allocates only the stage-2 HW PGD level table(s) of size defined by
672 * stage2_pgd_size(mmu->kvm).
674 * Note we don't need locking here as this is only called when the VM is
675 * created, which can only be done once.
677 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
679 phys_addr_t pgd_phys;
683 if (mmu->pgd != NULL) {
684 kvm_err("kvm_arch already initialized?\n");
688 /* Allocate the HW PGD, making sure that each page gets its own refcount */
689 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
693 pgd_phys = virt_to_phys(pgd);
694 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
697 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
698 if (!mmu->last_vcpu_ran) {
699 free_pages_exact(pgd, stage2_pgd_size(kvm));
703 for_each_possible_cpu(cpu)
704 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
708 mmu->pgd_phys = pgd_phys;
709 mmu->vmid.vmid_gen = 0;
714 static void stage2_unmap_memslot(struct kvm *kvm,
715 struct kvm_memory_slot *memslot)
717 hva_t hva = memslot->userspace_addr;
718 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
719 phys_addr_t size = PAGE_SIZE * memslot->npages;
720 hva_t reg_end = hva + size;
723 * A memory region could potentially cover multiple VMAs, and any holes
724 * between them, so iterate over all of them to find out if we should
727 * +--------------------------------------------+
728 * +---------------+----------------+ +----------------+
729 * | : VMA 1 | VMA 2 | | VMA 3 : |
730 * +---------------+----------------+ +----------------+
732 * +--------------------------------------------+
735 struct vm_area_struct *vma = find_vma(current->mm, hva);
736 hva_t vm_start, vm_end;
738 if (!vma || vma->vm_start >= reg_end)
742 * Take the intersection of this VMA with the memory region
744 vm_start = max(hva, vma->vm_start);
745 vm_end = min(reg_end, vma->vm_end);
747 if (!(vma->vm_flags & VM_PFNMAP)) {
748 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
749 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
752 } while (hva < reg_end);
756 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
757 * @kvm: The struct kvm pointer
759 * Go through the memregions and unmap any regular RAM
760 * backing memory already mapped to the VM.
762 void stage2_unmap_vm(struct kvm *kvm)
764 struct kvm_memslots *slots;
765 struct kvm_memory_slot *memslot;
768 idx = srcu_read_lock(&kvm->srcu);
769 mmap_read_lock(current->mm);
770 spin_lock(&kvm->mmu_lock);
772 slots = kvm_memslots(kvm);
773 kvm_for_each_memslot(memslot, slots)
774 stage2_unmap_memslot(kvm, memslot);
776 spin_unlock(&kvm->mmu_lock);
777 mmap_read_unlock(current->mm);
778 srcu_read_unlock(&kvm->srcu, idx);
781 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
783 struct kvm *kvm = mmu->kvm;
786 spin_lock(&kvm->mmu_lock);
788 unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
789 pgd = READ_ONCE(mmu->pgd);
792 spin_unlock(&kvm->mmu_lock);
794 /* Free the HW pgd, one page at a time */
796 free_pages_exact(pgd, stage2_pgd_size(kvm));
797 free_percpu(mmu->last_vcpu_ran);
801 static p4d_t *stage2_get_p4d(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
804 struct kvm *kvm = mmu->kvm;
808 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
809 if (stage2_pgd_none(kvm, *pgd)) {
812 p4d = kvm_mmu_memory_cache_alloc(cache);
813 stage2_pgd_populate(kvm, pgd, p4d);
814 get_page(virt_to_page(pgd));
817 return stage2_p4d_offset(kvm, pgd, addr);
820 static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
823 struct kvm *kvm = mmu->kvm;
827 p4d = stage2_get_p4d(mmu, cache, addr);
828 if (stage2_p4d_none(kvm, *p4d)) {
831 pud = kvm_mmu_memory_cache_alloc(cache);
832 stage2_p4d_populate(kvm, p4d, pud);
833 get_page(virt_to_page(p4d));
836 return stage2_pud_offset(kvm, p4d, addr);
839 static pmd_t *stage2_get_pmd(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
842 struct kvm *kvm = mmu->kvm;
846 pud = stage2_get_pud(mmu, cache, addr);
847 if (!pud || stage2_pud_huge(kvm, *pud))
850 if (stage2_pud_none(kvm, *pud)) {
853 pmd = kvm_mmu_memory_cache_alloc(cache);
854 stage2_pud_populate(kvm, pud, pmd);
855 get_page(virt_to_page(pud));
858 return stage2_pmd_offset(kvm, pud, addr);
861 static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu,
862 struct kvm_mmu_memory_cache *cache,
863 phys_addr_t addr, const pmd_t *new_pmd)
868 pmd = stage2_get_pmd(mmu, cache, addr);
873 * Multiple vcpus faulting on the same PMD entry, can
874 * lead to them sequentially updating the PMD with the
875 * same value. Following the break-before-make
876 * (pmd_clear() followed by tlb_flush()) process can
877 * hinder forward progress due to refaults generated
878 * on missing translations.
880 * Skip updating the page table if the entry is
883 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
886 if (pmd_present(old_pmd)) {
888 * If we already have PTE level mapping for this block,
889 * we must unmap it to avoid inconsistent TLB state and
890 * leaking the table page. We could end up in this situation
891 * if the memory slot was marked for dirty logging and was
892 * reverted, leaving PTE level mappings for the pages accessed
893 * during the period. So, unmap the PTE level mapping for this
894 * block and retry, as we could have released the upper level
895 * table in the process.
897 * Normal THP split/merge follows mmu_notifier callbacks and do
898 * get handled accordingly.
900 if (!pmd_thp_or_huge(old_pmd)) {
901 unmap_stage2_range(mmu, addr & S2_PMD_MASK, S2_PMD_SIZE);
905 * Mapping in huge pages should only happen through a
906 * fault. If a page is merged into a transparent huge
907 * page, the individual subpages of that huge page
908 * should be unmapped through MMU notifiers before we
911 * Merging of CompoundPages is not supported; they
912 * should become splitting first, unmapped, merged,
913 * and mapped back in on-demand.
915 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
917 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
919 get_page(virt_to_page(pmd));
922 kvm_set_pmd(pmd, *new_pmd);
926 static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu,
927 struct kvm_mmu_memory_cache *cache,
928 phys_addr_t addr, const pud_t *new_pudp)
930 struct kvm *kvm = mmu->kvm;
931 pud_t *pudp, old_pud;
934 pudp = stage2_get_pud(mmu, cache, addr);
940 * A large number of vcpus faulting on the same stage 2 entry,
941 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
942 * Skip updating the page tables if there is no change.
944 if (pud_val(old_pud) == pud_val(*new_pudp))
947 if (stage2_pud_present(kvm, old_pud)) {
949 * If we already have table level mapping for this block, unmap
950 * the range for this block and retry.
952 if (!stage2_pud_huge(kvm, old_pud)) {
953 unmap_stage2_range(mmu, addr & S2_PUD_MASK, S2_PUD_SIZE);
957 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
958 stage2_pud_clear(kvm, pudp);
959 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
961 get_page(virt_to_page(pudp));
964 kvm_set_pud(pudp, *new_pudp);
969 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
970 * true if a valid and present leaf-entry is found. A pointer to the
971 * leaf-entry is returned in the appropriate level variable - pudpp,
974 static bool stage2_get_leaf_entry(struct kvm_s2_mmu *mmu, phys_addr_t addr,
975 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
977 struct kvm *kvm = mmu->kvm;
986 pudp = stage2_get_pud(mmu, NULL, addr);
987 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
990 if (stage2_pud_huge(kvm, *pudp)) {
995 pmdp = stage2_pmd_offset(kvm, pudp, addr);
996 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
999 if (pmd_thp_or_huge(*pmdp)) {
1004 ptep = pte_offset_kernel(pmdp, addr);
1005 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1012 static bool stage2_is_exec(struct kvm_s2_mmu *mmu, phys_addr_t addr, unsigned long sz)
1019 found = stage2_get_leaf_entry(mmu, addr, &pudp, &pmdp, &ptep);
1024 return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
1026 return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
1028 return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
1031 static int stage2_set_pte(struct kvm_s2_mmu *mmu,
1032 struct kvm_mmu_memory_cache *cache,
1033 phys_addr_t addr, const pte_t *new_pte,
1034 unsigned long flags)
1036 struct kvm *kvm = mmu->kvm;
1039 pte_t *pte, old_pte;
1040 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1041 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1043 VM_BUG_ON(logging_active && !cache);
1045 /* Create stage-2 page table mapping - Levels 0 and 1 */
1046 pud = stage2_get_pud(mmu, cache, addr);
1049 * Ignore calls from kvm_set_spte_hva for unallocated
1056 * While dirty page logging - dissolve huge PUD, then continue
1057 * on to allocate page.
1060 stage2_dissolve_pud(mmu, addr, pud);
1062 if (stage2_pud_none(kvm, *pud)) {
1064 return 0; /* ignore calls from kvm_set_spte_hva */
1065 pmd = kvm_mmu_memory_cache_alloc(cache);
1066 stage2_pud_populate(kvm, pud, pmd);
1067 get_page(virt_to_page(pud));
1070 pmd = stage2_pmd_offset(kvm, pud, addr);
1073 * Ignore calls from kvm_set_spte_hva for unallocated
1080 * While dirty page logging - dissolve huge PMD, then continue on to
1084 stage2_dissolve_pmd(mmu, addr, pmd);
1086 /* Create stage-2 page mappings - Level 2 */
1087 if (pmd_none(*pmd)) {
1089 return 0; /* ignore calls from kvm_set_spte_hva */
1090 pte = kvm_mmu_memory_cache_alloc(cache);
1091 kvm_pmd_populate(pmd, pte);
1092 get_page(virt_to_page(pmd));
1095 pte = pte_offset_kernel(pmd, addr);
1097 if (iomap && pte_present(*pte))
1100 /* Create 2nd stage page table mapping - Level 3 */
1102 if (pte_present(old_pte)) {
1103 /* Skip page table update if there is no change */
1104 if (pte_val(old_pte) == pte_val(*new_pte))
1107 kvm_set_pte(pte, __pte(0));
1108 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
1110 get_page(virt_to_page(pte));
1113 kvm_set_pte(pte, *new_pte);
1117 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1118 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1120 if (pte_young(*pte)) {
1121 *pte = pte_mkold(*pte);
1127 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1129 return __ptep_test_and_clear_young(pte);
1133 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1135 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1138 static int stage2_pudp_test_and_clear_young(pud_t *pud)
1140 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1144 * kvm_phys_addr_ioremap - map a device range to guest IPA
1146 * @kvm: The KVM pointer
1147 * @guest_ipa: The IPA at which to insert the mapping
1148 * @pa: The physical address of the device
1149 * @size: The size of the mapping
1151 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1152 phys_addr_t pa, unsigned long size, bool writable)
1154 phys_addr_t addr, end;
1157 struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
1159 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1160 pfn = __phys_to_pfn(pa);
1162 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1163 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
1166 pte = kvm_s2pte_mkwrite(pte);
1168 ret = kvm_mmu_topup_memory_cache(&cache,
1169 kvm_mmu_cache_min_pages(kvm));
1172 spin_lock(&kvm->mmu_lock);
1173 ret = stage2_set_pte(&kvm->arch.mmu, &cache, addr, &pte,
1174 KVM_S2PTE_FLAG_IS_IOMAP);
1175 spin_unlock(&kvm->mmu_lock);
1183 kvm_mmu_free_memory_cache(&cache);
1188 * stage2_wp_ptes - write protect PMD range
1189 * @pmd: pointer to pmd entry
1190 * @addr: range start address
1191 * @end: range end address
1193 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1197 pte = pte_offset_kernel(pmd, addr);
1199 if (!pte_none(*pte)) {
1200 if (!kvm_s2pte_readonly(pte))
1201 kvm_set_s2pte_readonly(pte);
1203 } while (pte++, addr += PAGE_SIZE, addr != end);
1207 * stage2_wp_pmds - write protect PUD range
1208 * kvm: kvm instance for the VM
1209 * @pud: pointer to pud entry
1210 * @addr: range start address
1211 * @end: range end address
1213 static void stage2_wp_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
1214 phys_addr_t addr, phys_addr_t end)
1216 struct kvm *kvm = mmu->kvm;
1220 pmd = stage2_pmd_offset(kvm, pud, addr);
1223 next = stage2_pmd_addr_end(kvm, addr, end);
1224 if (!pmd_none(*pmd)) {
1225 if (pmd_thp_or_huge(*pmd)) {
1226 if (!kvm_s2pmd_readonly(pmd))
1227 kvm_set_s2pmd_readonly(pmd);
1229 stage2_wp_ptes(pmd, addr, next);
1232 } while (pmd++, addr = next, addr != end);
1236 * stage2_wp_puds - write protect P4D range
1237 * @p4d: pointer to p4d entry
1238 * @addr: range start address
1239 * @end: range end address
1241 static void stage2_wp_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
1242 phys_addr_t addr, phys_addr_t end)
1244 struct kvm *kvm = mmu->kvm;
1248 pud = stage2_pud_offset(kvm, p4d, addr);
1250 next = stage2_pud_addr_end(kvm, addr, end);
1251 if (!stage2_pud_none(kvm, *pud)) {
1252 if (stage2_pud_huge(kvm, *pud)) {
1253 if (!kvm_s2pud_readonly(pud))
1254 kvm_set_s2pud_readonly(pud);
1256 stage2_wp_pmds(mmu, pud, addr, next);
1259 } while (pud++, addr = next, addr != end);
1263 * stage2_wp_p4ds - write protect PGD range
1264 * @pgd: pointer to pgd entry
1265 * @addr: range start address
1266 * @end: range end address
1268 static void stage2_wp_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
1269 phys_addr_t addr, phys_addr_t end)
1271 struct kvm *kvm = mmu->kvm;
1275 p4d = stage2_p4d_offset(kvm, pgd, addr);
1277 next = stage2_p4d_addr_end(kvm, addr, end);
1278 if (!stage2_p4d_none(kvm, *p4d))
1279 stage2_wp_puds(mmu, p4d, addr, next);
1280 } while (p4d++, addr = next, addr != end);
1284 * stage2_wp_range() - write protect stage2 memory region range
1285 * @kvm: The KVM pointer
1286 * @addr: Start address of range
1287 * @end: End address of range
1289 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
1291 struct kvm *kvm = mmu->kvm;
1295 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
1298 * Release kvm_mmu_lock periodically if the memory region is
1299 * large. Otherwise, we may see kernel panics with
1300 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1301 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1302 * will also starve other vCPUs. We have to also make sure
1303 * that the page tables are not freed while we released
1306 cond_resched_lock(&kvm->mmu_lock);
1307 if (!READ_ONCE(mmu->pgd))
1309 next = stage2_pgd_addr_end(kvm, addr, end);
1310 if (stage2_pgd_present(kvm, *pgd))
1311 stage2_wp_p4ds(mmu, pgd, addr, next);
1312 } while (pgd++, addr = next, addr != end);
1316 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1317 * @kvm: The KVM pointer
1318 * @slot: The memory slot to write protect
1320 * Called to start logging dirty pages after memory region
1321 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1322 * all present PUD, PMD and PTEs are write protected in the memory region.
1323 * Afterwards read of dirty page log can be called.
1325 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1326 * serializing operations for VM memory regions.
1328 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1330 struct kvm_memslots *slots = kvm_memslots(kvm);
1331 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1332 phys_addr_t start, end;
1334 if (WARN_ON_ONCE(!memslot))
1337 start = memslot->base_gfn << PAGE_SHIFT;
1338 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1340 spin_lock(&kvm->mmu_lock);
1341 stage2_wp_range(&kvm->arch.mmu, start, end);
1342 spin_unlock(&kvm->mmu_lock);
1343 kvm_flush_remote_tlbs(kvm);
1347 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1348 * @kvm: The KVM pointer
1349 * @slot: The memory slot associated with mask
1350 * @gfn_offset: The gfn offset in memory slot
1351 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1352 * slot to be write protected
1354 * Walks bits set in mask write protects the associated pte's. Caller must
1355 * acquire kvm_mmu_lock.
1357 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1358 struct kvm_memory_slot *slot,
1359 gfn_t gfn_offset, unsigned long mask)
1361 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1362 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1363 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1365 stage2_wp_range(&kvm->arch.mmu, start, end);
1369 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1372 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1373 * enable dirty logging for them.
1375 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1376 struct kvm_memory_slot *slot,
1377 gfn_t gfn_offset, unsigned long mask)
1379 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1382 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1384 __clean_dcache_guest_page(pfn, size);
1387 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1389 __invalidate_icache_guest_page(pfn, size);
1392 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
1394 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1397 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1399 unsigned long map_size)
1402 hva_t uaddr_start, uaddr_end;
1405 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
1406 if (map_size == PAGE_SIZE)
1409 size = memslot->npages * PAGE_SIZE;
1411 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1413 uaddr_start = memslot->userspace_addr;
1414 uaddr_end = uaddr_start + size;
1417 * Pages belonging to memslots that don't have the same alignment
1418 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1419 * PMD/PUD entries, because we'll end up mapping the wrong pages.
1421 * Consider a layout like the following:
1423 * memslot->userspace_addr:
1424 * +-----+--------------------+--------------------+---+
1425 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1426 * +-----+--------------------+--------------------+---+
1428 * memslot->base_gfn << PAGE_SHIFT:
1429 * +---+--------------------+--------------------+-----+
1430 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1431 * +---+--------------------+--------------------+-----+
1433 * If we create those stage-2 blocks, we'll end up with this incorrect
1439 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1443 * Next, let's make sure we're not trying to map anything not covered
1444 * by the memslot. This means we have to prohibit block size mappings
1445 * for the beginning and end of a non-block aligned and non-block sized
1446 * memory slot (illustrated by the head and tail parts of the
1447 * userspace view above containing pages 'abcde' and 'xyz',
1450 * Note that it doesn't matter if we do the check using the
1451 * userspace_addr or the base_gfn, as both are equally aligned (per
1452 * the check above) and equally sized.
1454 return (hva & ~(map_size - 1)) >= uaddr_start &&
1455 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1459 * Check if the given hva is backed by a transparent huge page (THP) and
1460 * whether it can be mapped using block mapping in stage2. If so, adjust
1461 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
1462 * supported. This will need to be updated to support other THP sizes.
1464 * Returns the size of the mapping.
1466 static unsigned long
1467 transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
1468 unsigned long hva, kvm_pfn_t *pfnp,
1471 kvm_pfn_t pfn = *pfnp;
1474 * Make sure the adjustment is done only for THP pages. Also make
1475 * sure that the HVA and IPA are sufficiently aligned and that the
1476 * block map is contained within the memslot.
1478 if (kvm_is_transparent_hugepage(pfn) &&
1479 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1481 * The address we faulted on is backed by a transparent huge
1482 * page. However, because we map the compound huge page and
1483 * not the individual tail page, we need to transfer the
1484 * refcount to the head page. We have to be careful that the
1485 * THP doesn't start to split while we are adjusting the
1488 * We are sure this doesn't happen, because mmu_notifier_retry
1489 * was successful and we are holding the mmu_lock, so if this
1490 * THP is trying to split, it will be blocked in the mmu
1491 * notifier before touching any of the pages, specifically
1492 * before being able to call __split_huge_page_refcount().
1494 * We can therefore safely transfer the refcount from PG_tail
1495 * to PG_head and switch the pfn from a tail page to the head
1499 kvm_release_pfn_clean(pfn);
1500 pfn &= ~(PTRS_PER_PMD - 1);
1507 /* Use page mapping if we cannot use block mapping. */
1511 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1512 struct kvm_memory_slot *memslot, unsigned long hva,
1513 unsigned long fault_status)
1516 bool write_fault, writable, force_pte = false;
1517 bool exec_fault, needs_exec;
1518 unsigned long mmu_seq;
1519 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1520 struct kvm *kvm = vcpu->kvm;
1521 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1522 struct vm_area_struct *vma;
1525 pgprot_t mem_type = PAGE_S2;
1526 bool logging_active = memslot_is_logging(memslot);
1527 unsigned long vma_pagesize, flags = 0;
1528 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
1530 write_fault = kvm_is_write_fault(vcpu);
1531 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1532 VM_BUG_ON(write_fault && exec_fault);
1534 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1535 kvm_err("Unexpected L2 read permission error\n");
1539 /* Let's check if we will get back a huge page backed by hugetlbfs */
1540 mmap_read_lock(current->mm);
1541 vma = find_vma_intersection(current->mm, hva, hva + 1);
1542 if (unlikely(!vma)) {
1543 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1544 mmap_read_unlock(current->mm);
1548 if (is_vm_hugetlb_page(vma))
1549 vma_shift = huge_page_shift(hstate_vma(vma));
1551 vma_shift = PAGE_SHIFT;
1553 vma_pagesize = 1ULL << vma_shift;
1554 if (logging_active ||
1555 (vma->vm_flags & VM_PFNMAP) ||
1556 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1558 vma_pagesize = PAGE_SIZE;
1562 * The stage2 has a minimum of 2 level table (For arm64 see
1563 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1564 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1565 * As for PUD huge maps, we must make sure that we have at least
1566 * 3 levels, i.e, PMD is not folded.
1568 if (vma_pagesize == PMD_SIZE ||
1569 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
1570 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1571 mmap_read_unlock(current->mm);
1573 /* We need minimum second+third level pages */
1574 ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
1578 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1580 * Ensure the read of mmu_notifier_seq happens before we call
1581 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1582 * the page we just got a reference to gets unmapped before we have a
1583 * chance to grab the mmu_lock, which ensure that if the page gets
1584 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1585 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1586 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1590 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1591 if (pfn == KVM_PFN_ERR_HWPOISON) {
1592 kvm_send_hwpoison_signal(hva, vma_shift);
1595 if (is_error_noslot_pfn(pfn))
1598 if (kvm_is_device_pfn(pfn)) {
1599 mem_type = PAGE_S2_DEVICE;
1600 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1601 } else if (logging_active) {
1603 * Faults on pages in a memslot with logging enabled
1604 * should not be mapped with huge pages (it introduces churn
1605 * and performance degradation), so force a pte mapping.
1607 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1610 * Only actually map the page as writable if this was a write
1617 if (exec_fault && is_iomap(flags))
1620 spin_lock(&kvm->mmu_lock);
1621 if (mmu_notifier_retry(kvm, mmu_seq))
1625 * If we are not forced to use page mapping, check if we are
1626 * backed by a THP and thus use block mapping if possible.
1628 if (vma_pagesize == PAGE_SIZE && !force_pte)
1629 vma_pagesize = transparent_hugepage_adjust(memslot, hva,
1632 kvm_set_pfn_dirty(pfn);
1634 if (fault_status != FSC_PERM && !is_iomap(flags))
1635 clean_dcache_guest_page(pfn, vma_pagesize);
1638 invalidate_icache_guest_page(pfn, vma_pagesize);
1641 * If we took an execution fault we have made the
1642 * icache/dcache coherent above and should now let the s2
1643 * mapping be executable.
1645 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1646 * execute permissions, and we preserve whatever we have.
1648 needs_exec = exec_fault ||
1649 (fault_status == FSC_PERM &&
1650 stage2_is_exec(mmu, fault_ipa, vma_pagesize));
1652 if (vma_pagesize == PUD_SIZE) {
1653 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1655 new_pud = kvm_pud_mkhuge(new_pud);
1657 new_pud = kvm_s2pud_mkwrite(new_pud);
1660 new_pud = kvm_s2pud_mkexec(new_pud);
1662 ret = stage2_set_pud_huge(mmu, memcache, fault_ipa, &new_pud);
1663 } else if (vma_pagesize == PMD_SIZE) {
1664 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1666 new_pmd = kvm_pmd_mkhuge(new_pmd);
1669 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1672 new_pmd = kvm_s2pmd_mkexec(new_pmd);
1674 ret = stage2_set_pmd_huge(mmu, memcache, fault_ipa, &new_pmd);
1676 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
1679 new_pte = kvm_s2pte_mkwrite(new_pte);
1680 mark_page_dirty(kvm, gfn);
1684 new_pte = kvm_s2pte_mkexec(new_pte);
1686 ret = stage2_set_pte(mmu, memcache, fault_ipa, &new_pte, flags);
1690 spin_unlock(&kvm->mmu_lock);
1691 kvm_set_pfn_accessed(pfn);
1692 kvm_release_pfn_clean(pfn);
1697 * Resolve the access fault by making the page young again.
1698 * Note that because the faulting entry is guaranteed not to be
1699 * cached in the TLB, we don't need to invalidate anything.
1700 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1701 * so there is no need for atomic (pte|pmd)_mkyoung operations.
1703 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1709 bool pfn_valid = false;
1711 trace_kvm_access_fault(fault_ipa);
1713 spin_lock(&vcpu->kvm->mmu_lock);
1715 if (!stage2_get_leaf_entry(vcpu->arch.hw_mmu, fault_ipa, &pud, &pmd, &pte))
1718 if (pud) { /* HugeTLB */
1719 *pud = kvm_s2pud_mkyoung(*pud);
1720 pfn = kvm_pud_pfn(*pud);
1722 } else if (pmd) { /* THP, HugeTLB */
1723 *pmd = pmd_mkyoung(*pmd);
1724 pfn = pmd_pfn(*pmd);
1727 *pte = pte_mkyoung(*pte); /* Just a page... */
1728 pfn = pte_pfn(*pte);
1733 spin_unlock(&vcpu->kvm->mmu_lock);
1735 kvm_set_pfn_accessed(pfn);
1739 * kvm_handle_guest_abort - handles all 2nd stage aborts
1740 * @vcpu: the VCPU pointer
1742 * Any abort that gets to the host is almost guaranteed to be caused by a
1743 * missing second stage translation table entry, which can mean that either the
1744 * guest simply needs more memory and we must allocate an appropriate page or it
1745 * can mean that the guest tried to access I/O memory, which is emulated by user
1746 * space. The distinction is based on the IPA causing the fault and whether this
1747 * memory region has been registered as standard RAM by user space.
1749 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1751 unsigned long fault_status;
1752 phys_addr_t fault_ipa;
1753 struct kvm_memory_slot *memslot;
1755 bool is_iabt, write_fault, writable;
1759 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1761 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1762 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1764 /* Synchronous External Abort? */
1765 if (kvm_vcpu_abt_issea(vcpu)) {
1767 * For RAS the host kernel may handle this abort.
1768 * There is no need to pass the error into the guest.
1770 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1771 kvm_inject_vabt(vcpu);
1776 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
1777 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1779 /* Check the stage-2 fault is trans. fault or write fault */
1780 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1781 fault_status != FSC_ACCESS) {
1782 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1783 kvm_vcpu_trap_get_class(vcpu),
1784 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1785 (unsigned long)kvm_vcpu_get_esr(vcpu));
1789 idx = srcu_read_lock(&vcpu->kvm->srcu);
1791 gfn = fault_ipa >> PAGE_SHIFT;
1792 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1793 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1794 write_fault = kvm_is_write_fault(vcpu);
1795 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1797 * The guest has put either its instructions or its page-tables
1798 * somewhere it shouldn't have. Userspace won't be able to do
1799 * anything about this (there's no syndrome for a start), so
1800 * re-inject the abort back into the guest.
1807 if (kvm_vcpu_dabt_iss1tw(vcpu)) {
1808 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1814 * Check for a cache maintenance operation. Since we
1815 * ended-up here, we know it is outside of any memory
1816 * slot. But we can't find out if that is for a device,
1817 * or if the guest is just being stupid. The only thing
1818 * we know for sure is that this range cannot be cached.
1820 * So let's assume that the guest is just being
1821 * cautious, and skip the instruction.
1823 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1824 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1830 * The IPA is reported as [MAX:12], so we need to
1831 * complement it with the bottom 12 bits from the
1832 * faulting VA. This is always 12 bits, irrespective
1835 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1836 ret = io_mem_abort(vcpu, fault_ipa);
1840 /* Userspace should not be able to register out-of-bounds IPAs */
1841 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1843 if (fault_status == FSC_ACCESS) {
1844 handle_access_fault(vcpu, fault_ipa);
1849 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1853 if (ret == -ENOEXEC) {
1854 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1858 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1862 static int handle_hva_to_gpa(struct kvm *kvm,
1863 unsigned long start,
1865 int (*handler)(struct kvm *kvm,
1866 gpa_t gpa, u64 size,
1870 struct kvm_memslots *slots;
1871 struct kvm_memory_slot *memslot;
1874 slots = kvm_memslots(kvm);
1876 /* we only care about the pages that the guest sees */
1877 kvm_for_each_memslot(memslot, slots) {
1878 unsigned long hva_start, hva_end;
1881 hva_start = max(start, memslot->userspace_addr);
1882 hva_end = min(end, memslot->userspace_addr +
1883 (memslot->npages << PAGE_SHIFT));
1884 if (hva_start >= hva_end)
1887 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
1888 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
1894 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1896 unsigned flags = *(unsigned *)data;
1897 bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
1899 __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
1903 int kvm_unmap_hva_range(struct kvm *kvm,
1904 unsigned long start, unsigned long end, unsigned flags)
1906 if (!kvm->arch.mmu.pgd)
1909 trace_kvm_unmap_hva_range(start, end);
1910 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
1914 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1916 pte_t *pte = (pte_t *)data;
1918 WARN_ON(size != PAGE_SIZE);
1920 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1921 * flag clear because MMU notifiers will have unmapped a huge PMD before
1922 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1923 * therefore stage2_set_pte() never needs to clear out a huge PMD
1924 * through this calling path.
1926 stage2_set_pte(&kvm->arch.mmu, NULL, gpa, pte, 0);
1931 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1933 unsigned long end = hva + PAGE_SIZE;
1934 kvm_pfn_t pfn = pte_pfn(pte);
1937 if (!kvm->arch.mmu.pgd)
1940 trace_kvm_set_spte_hva(hva);
1943 * We've moved a page around, probably through CoW, so let's treat it
1944 * just like a translation fault and clean the cache to the PoC.
1946 clean_dcache_guest_page(pfn, PAGE_SIZE);
1947 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
1948 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1953 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1959 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1960 if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte))
1964 return stage2_pudp_test_and_clear_young(pud);
1966 return stage2_pmdp_test_and_clear_young(pmd);
1968 return stage2_ptep_test_and_clear_young(pte);
1971 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1977 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1978 if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte))
1982 return kvm_s2pud_young(*pud);
1984 return pmd_young(*pmd);
1986 return pte_young(*pte);
1989 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1991 if (!kvm->arch.mmu.pgd)
1993 trace_kvm_age_hva(start, end);
1994 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1997 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1999 if (!kvm->arch.mmu.pgd)
2001 trace_kvm_test_age_hva(hva);
2002 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
2003 kvm_test_age_hva_handler, NULL);
2006 phys_addr_t kvm_mmu_get_httbr(void)
2008 return __pa(hyp_pgtable->pgd);
2011 phys_addr_t kvm_get_idmap_vector(void)
2013 return hyp_idmap_vector;
2016 static int kvm_map_idmap_text(void)
2018 unsigned long size = hyp_idmap_end - hyp_idmap_start;
2019 int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
2022 kvm_err("Failed to idmap %lx-%lx\n",
2023 hyp_idmap_start, hyp_idmap_end);
2028 int kvm_mmu_init(void)
2033 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
2034 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
2035 hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
2036 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
2037 hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
2040 * We rely on the linker script to ensure at build time that the HYP
2041 * init code does not cross a page boundary.
2043 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
2045 hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
2046 kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits);
2047 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2048 kvm_debug("HYP VA range: %lx:%lx\n",
2049 kern_hyp_va(PAGE_OFFSET),
2050 kern_hyp_va((unsigned long)high_memory - 1));
2052 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
2053 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
2054 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
2056 * The idmap page is intersecting with the VA space,
2057 * it is not safe to continue further.
2059 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2064 hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
2066 kvm_err("Hyp mode page-table not allocated\n");
2071 err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits);
2073 goto out_free_pgtable;
2075 err = kvm_map_idmap_text();
2077 goto out_destroy_pgtable;
2079 io_map_base = hyp_idmap_start;
2082 out_destroy_pgtable:
2083 kvm_pgtable_hyp_destroy(hyp_pgtable);
2091 void kvm_arch_commit_memory_region(struct kvm *kvm,
2092 const struct kvm_userspace_memory_region *mem,
2093 struct kvm_memory_slot *old,
2094 const struct kvm_memory_slot *new,
2095 enum kvm_mr_change change)
2098 * At this point memslot has been committed and there is an
2099 * allocated dirty_bitmap[], dirty pages will be tracked while the
2100 * memory slot is write protected.
2102 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2104 * If we're with initial-all-set, we don't need to write
2105 * protect any pages because they're all reported as dirty.
2106 * Huge pages and normal pages will be write protect gradually.
2108 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
2109 kvm_mmu_wp_memory_region(kvm, mem->slot);
2114 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2115 struct kvm_memory_slot *memslot,
2116 const struct kvm_userspace_memory_region *mem,
2117 enum kvm_mr_change change)
2119 hva_t hva = mem->userspace_addr;
2120 hva_t reg_end = hva + mem->memory_size;
2121 bool writable = !(mem->flags & KVM_MEM_READONLY);
2124 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2125 change != KVM_MR_FLAGS_ONLY)
2129 * Prevent userspace from creating a memory region outside of the IPA
2130 * space addressable by the KVM guest IPA space.
2132 if (memslot->base_gfn + memslot->npages >=
2133 (kvm_phys_size(kvm) >> PAGE_SHIFT))
2136 mmap_read_lock(current->mm);
2138 * A memory region could potentially cover multiple VMAs, and any holes
2139 * between them, so iterate over all of them to find out if we can map
2140 * any of them right now.
2142 * +--------------------------------------------+
2143 * +---------------+----------------+ +----------------+
2144 * | : VMA 1 | VMA 2 | | VMA 3 : |
2145 * +---------------+----------------+ +----------------+
2147 * +--------------------------------------------+
2150 struct vm_area_struct *vma = find_vma(current->mm, hva);
2151 hva_t vm_start, vm_end;
2153 if (!vma || vma->vm_start >= reg_end)
2157 * Take the intersection of this VMA with the memory region
2159 vm_start = max(hva, vma->vm_start);
2160 vm_end = min(reg_end, vma->vm_end);
2162 if (vma->vm_flags & VM_PFNMAP) {
2163 gpa_t gpa = mem->guest_phys_addr +
2164 (vm_start - mem->userspace_addr);
2167 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2168 pa += vm_start - vma->vm_start;
2170 /* IO region dirty page logging not allowed */
2171 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2176 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2183 } while (hva < reg_end);
2185 if (change == KVM_MR_FLAGS_ONLY)
2188 spin_lock(&kvm->mmu_lock);
2190 unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
2192 stage2_flush_memslot(kvm, memslot);
2193 spin_unlock(&kvm->mmu_lock);
2195 mmap_read_unlock(current->mm);
2199 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
2203 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2207 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2209 kvm_free_stage2_pgd(&kvm->arch.mmu);
2212 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2213 struct kvm_memory_slot *slot)
2215 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2216 phys_addr_t size = slot->npages << PAGE_SHIFT;
2218 spin_lock(&kvm->mmu_lock);
2219 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
2220 spin_unlock(&kvm->mmu_lock);
2224 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2227 * - S/W ops are local to a CPU (not broadcast)
2228 * - We have line migration behind our back (speculation)
2229 * - System caches don't support S/W at all (damn!)
2231 * In the face of the above, the best we can do is to try and convert
2232 * S/W ops to VA ops. Because the guest is not allowed to infer the
2233 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2234 * which is a rather good thing for us.
2236 * Also, it is only used when turning caches on/off ("The expected
2237 * usage of the cache maintenance instructions that operate by set/way
2238 * is associated with the cache maintenance instructions associated
2239 * with the powerdown and powerup of caches, if this is required by
2240 * the implementation.").
2242 * We use the following policy:
2244 * - If we trap a S/W operation, we enable VM trapping to detect
2245 * caches being turned on/off, and do a full clean.
2247 * - We flush the caches on both caches being turned on and off.
2249 * - Once the caches are enabled, we stop trapping VM ops.
2251 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2253 unsigned long hcr = *vcpu_hcr(vcpu);
2256 * If this is the first time we do a S/W operation
2257 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2260 * Otherwise, rely on the VM trapping to wait for the MMU +
2261 * Caches to be turned off. At that point, we'll be able to
2262 * clean the caches again.
2264 if (!(hcr & HCR_TVM)) {
2265 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2266 vcpu_has_cache_enabled(vcpu));
2267 stage2_flush_vm(vcpu->kvm);
2268 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2272 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2274 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2277 * If switching the MMU+caches on, need to invalidate the caches.
2278 * If switching it off, need to clean the caches.
2279 * Clean + invalidate does the trick always.
2281 if (now_enabled != was_enabled)
2282 stage2_flush_vm(vcpu->kvm);
2284 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2286 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2288 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);