1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_ras.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
24 static pgd_t *boot_hyp_pgd;
25 static pgd_t *hyp_pgd;
26 static pgd_t *merged_hyp_pgd;
27 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
29 static unsigned long hyp_idmap_start;
30 static unsigned long hyp_idmap_end;
31 static phys_addr_t hyp_idmap_vector;
33 static unsigned long io_map_base;
35 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
37 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
38 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
40 static bool is_iomap(unsigned long flags)
42 return flags & KVM_S2PTE_FLAG_IS_IOMAP;
45 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
47 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
51 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
52 * @kvm: pointer to kvm structure.
54 * Interface to HYP function to flush all VM TLB entries
56 void kvm_flush_remote_tlbs(struct kvm *kvm)
58 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
61 static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
64 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, level);
68 * D-Cache management functions. They take the page table entries by
69 * value, as they are flushing the cache using the kernel mapping (or
72 static void kvm_flush_dcache_pte(pte_t pte)
74 __kvm_flush_dcache_pte(pte);
77 static void kvm_flush_dcache_pmd(pmd_t pmd)
79 __kvm_flush_dcache_pmd(pmd);
82 static void kvm_flush_dcache_pud(pud_t pud)
84 __kvm_flush_dcache_pud(pud);
87 static bool kvm_is_device_pfn(unsigned long pfn)
89 return !pfn_valid(pfn);
93 * stage2_dissolve_pmd() - clear and flush huge PMD entry
94 * @mmu: pointer to mmu structure to operate on
96 * @pmd: pmd pointer for IPA
98 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
100 static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t *pmd)
102 if (!pmd_thp_or_huge(*pmd))
106 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
107 put_page(virt_to_page(pmd));
111 * stage2_dissolve_pud() - clear and flush huge PUD entry
112 * @mmu: pointer to mmu structure to operate on
114 * @pud: pud pointer for IPA
116 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
118 static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t *pudp)
120 struct kvm *kvm = mmu->kvm;
122 if (!stage2_pud_huge(kvm, *pudp))
125 stage2_pud_clear(kvm, pudp);
126 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
127 put_page(virt_to_page(pudp));
130 static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr_t addr)
132 struct kvm *kvm = mmu->kvm;
133 p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
134 stage2_pgd_clear(kvm, pgd);
135 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
136 stage2_p4d_free(kvm, p4d_table);
137 put_page(virt_to_page(pgd));
140 static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr_t addr)
142 struct kvm *kvm = mmu->kvm;
143 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
144 stage2_p4d_clear(kvm, p4d);
145 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
146 stage2_pud_free(kvm, pud_table);
147 put_page(virt_to_page(p4d));
150 static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr_t addr)
152 struct kvm *kvm = mmu->kvm;
153 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
155 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
156 stage2_pud_clear(kvm, pud);
157 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
158 stage2_pmd_free(kvm, pmd_table);
159 put_page(virt_to_page(pud));
162 static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr)
164 pte_t *pte_table = pte_offset_kernel(pmd, 0);
165 VM_BUG_ON(pmd_thp_or_huge(*pmd));
167 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
168 free_page((unsigned long)pte_table);
169 put_page(virt_to_page(pmd));
172 static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
174 WRITE_ONCE(*ptep, new_pte);
178 static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
180 WRITE_ONCE(*pmdp, new_pmd);
184 static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
186 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
189 static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
191 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
195 static inline void kvm_p4d_populate(p4d_t *p4dp, pud_t *pudp)
197 WRITE_ONCE(*p4dp, kvm_mk_p4d(pudp));
201 static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp)
203 #ifndef __PAGETABLE_P4D_FOLDED
204 WRITE_ONCE(*pgdp, kvm_mk_pgd(p4dp));
210 * Unmapping vs dcache management:
212 * If a guest maps certain memory pages as uncached, all writes will
213 * bypass the data cache and go directly to RAM. However, the CPUs
214 * can still speculate reads (not writes) and fill cache lines with
217 * Those cache lines will be *clean* cache lines though, so a
218 * clean+invalidate operation is equivalent to an invalidate
219 * operation, because no cache lines are marked dirty.
221 * Those clean cache lines could be filled prior to an uncached write
222 * by the guest, and the cache coherent IO subsystem would therefore
223 * end up writing old data to disk.
225 * This is why right after unmapping a page/section and invalidating
226 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
227 * the IO subsystem will never hit in the cache.
229 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
230 * we then fully enforce cacheability of RAM, no matter what the guest
233 static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
234 phys_addr_t addr, phys_addr_t end)
236 phys_addr_t start_addr = addr;
237 pte_t *pte, *start_pte;
239 start_pte = pte = pte_offset_kernel(pmd, addr);
241 if (!pte_none(*pte)) {
242 pte_t old_pte = *pte;
244 kvm_set_pte(pte, __pte(0));
245 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
247 /* No need to invalidate the cache for device mappings */
248 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
249 kvm_flush_dcache_pte(old_pte);
251 put_page(virt_to_page(pte));
253 } while (pte++, addr += PAGE_SIZE, addr != end);
255 if (stage2_pte_table_empty(mmu->kvm, start_pte))
256 clear_stage2_pmd_entry(mmu, pmd, start_addr);
259 static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
260 phys_addr_t addr, phys_addr_t end)
262 struct kvm *kvm = mmu->kvm;
263 phys_addr_t next, start_addr = addr;
264 pmd_t *pmd, *start_pmd;
266 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
268 next = stage2_pmd_addr_end(kvm, addr, end);
269 if (!pmd_none(*pmd)) {
270 if (pmd_thp_or_huge(*pmd)) {
271 pmd_t old_pmd = *pmd;
274 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
276 kvm_flush_dcache_pmd(old_pmd);
278 put_page(virt_to_page(pmd));
280 unmap_stage2_ptes(mmu, pmd, addr, next);
283 } while (pmd++, addr = next, addr != end);
285 if (stage2_pmd_table_empty(kvm, start_pmd))
286 clear_stage2_pud_entry(mmu, pud, start_addr);
289 static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
290 phys_addr_t addr, phys_addr_t end)
292 struct kvm *kvm = mmu->kvm;
293 phys_addr_t next, start_addr = addr;
294 pud_t *pud, *start_pud;
296 start_pud = pud = stage2_pud_offset(kvm, p4d, addr);
298 next = stage2_pud_addr_end(kvm, addr, end);
299 if (!stage2_pud_none(kvm, *pud)) {
300 if (stage2_pud_huge(kvm, *pud)) {
301 pud_t old_pud = *pud;
303 stage2_pud_clear(kvm, pud);
304 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
305 kvm_flush_dcache_pud(old_pud);
306 put_page(virt_to_page(pud));
308 unmap_stage2_pmds(mmu, pud, addr, next);
311 } while (pud++, addr = next, addr != end);
313 if (stage2_pud_table_empty(kvm, start_pud))
314 clear_stage2_p4d_entry(mmu, p4d, start_addr);
317 static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
318 phys_addr_t addr, phys_addr_t end)
320 struct kvm *kvm = mmu->kvm;
321 phys_addr_t next, start_addr = addr;
322 p4d_t *p4d, *start_p4d;
324 start_p4d = p4d = stage2_p4d_offset(kvm, pgd, addr);
326 next = stage2_p4d_addr_end(kvm, addr, end);
327 if (!stage2_p4d_none(kvm, *p4d))
328 unmap_stage2_puds(mmu, p4d, addr, next);
329 } while (p4d++, addr = next, addr != end);
331 if (stage2_p4d_table_empty(kvm, start_p4d))
332 clear_stage2_pgd_entry(mmu, pgd, start_addr);
336 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
337 * @kvm: The VM pointer
338 * @start: The intermediate physical base address of the range to unmap
339 * @size: The size of the area to unmap
341 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
342 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
343 * destroying the VM), otherwise another faulting VCPU may come in and mess
344 * with things behind our backs.
346 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
349 struct kvm *kvm = mmu->kvm;
351 phys_addr_t addr = start, end = start + size;
354 assert_spin_locked(&kvm->mmu_lock);
355 WARN_ON(size & ~PAGE_MASK);
357 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
360 * Make sure the page table is still active, as another thread
361 * could have possibly freed the page table, while we released
364 if (!READ_ONCE(mmu->pgd))
366 next = stage2_pgd_addr_end(kvm, addr, end);
367 if (!stage2_pgd_none(kvm, *pgd))
368 unmap_stage2_p4ds(mmu, pgd, addr, next);
370 * If the range is too large, release the kvm->mmu_lock
371 * to prevent starvation and lockup detector warnings.
373 if (may_block && next != end)
374 cond_resched_lock(&kvm->mmu_lock);
375 } while (pgd++, addr = next, addr != end);
378 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
380 __unmap_stage2_range(mmu, start, size, true);
383 static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
384 phys_addr_t addr, phys_addr_t end)
388 pte = pte_offset_kernel(pmd, addr);
390 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
391 kvm_flush_dcache_pte(*pte);
392 } while (pte++, addr += PAGE_SIZE, addr != end);
395 static void stage2_flush_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
396 phys_addr_t addr, phys_addr_t end)
398 struct kvm *kvm = mmu->kvm;
402 pmd = stage2_pmd_offset(kvm, pud, addr);
404 next = stage2_pmd_addr_end(kvm, addr, end);
405 if (!pmd_none(*pmd)) {
406 if (pmd_thp_or_huge(*pmd))
407 kvm_flush_dcache_pmd(*pmd);
409 stage2_flush_ptes(mmu, pmd, addr, next);
411 } while (pmd++, addr = next, addr != end);
414 static void stage2_flush_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
415 phys_addr_t addr, phys_addr_t end)
417 struct kvm *kvm = mmu->kvm;
421 pud = stage2_pud_offset(kvm, p4d, addr);
423 next = stage2_pud_addr_end(kvm, addr, end);
424 if (!stage2_pud_none(kvm, *pud)) {
425 if (stage2_pud_huge(kvm, *pud))
426 kvm_flush_dcache_pud(*pud);
428 stage2_flush_pmds(mmu, pud, addr, next);
430 } while (pud++, addr = next, addr != end);
433 static void stage2_flush_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
434 phys_addr_t addr, phys_addr_t end)
436 struct kvm *kvm = mmu->kvm;
440 p4d = stage2_p4d_offset(kvm, pgd, addr);
442 next = stage2_p4d_addr_end(kvm, addr, end);
443 if (!stage2_p4d_none(kvm, *p4d))
444 stage2_flush_puds(mmu, p4d, addr, next);
445 } while (p4d++, addr = next, addr != end);
448 static void stage2_flush_memslot(struct kvm *kvm,
449 struct kvm_memory_slot *memslot)
451 struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
452 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
453 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
457 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
459 next = stage2_pgd_addr_end(kvm, addr, end);
460 if (!stage2_pgd_none(kvm, *pgd))
461 stage2_flush_p4ds(mmu, pgd, addr, next);
464 cond_resched_lock(&kvm->mmu_lock);
465 } while (pgd++, addr = next, addr != end);
469 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
470 * @kvm: The struct kvm pointer
472 * Go through the stage 2 page tables and invalidate any cache lines
473 * backing memory already mapped to the VM.
475 static void stage2_flush_vm(struct kvm *kvm)
477 struct kvm_memslots *slots;
478 struct kvm_memory_slot *memslot;
481 idx = srcu_read_lock(&kvm->srcu);
482 spin_lock(&kvm->mmu_lock);
484 slots = kvm_memslots(kvm);
485 kvm_for_each_memslot(memslot, slots)
486 stage2_flush_memslot(kvm, memslot);
488 spin_unlock(&kvm->mmu_lock);
489 srcu_read_unlock(&kvm->srcu, idx);
492 static void clear_hyp_pgd_entry(pgd_t *pgd)
494 p4d_t *p4d_table __maybe_unused = p4d_offset(pgd, 0UL);
496 p4d_free(NULL, p4d_table);
497 put_page(virt_to_page(pgd));
500 static void clear_hyp_p4d_entry(p4d_t *p4d)
502 pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL);
503 VM_BUG_ON(p4d_huge(*p4d));
505 pud_free(NULL, pud_table);
506 put_page(virt_to_page(p4d));
509 static void clear_hyp_pud_entry(pud_t *pud)
511 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
512 VM_BUG_ON(pud_huge(*pud));
514 pmd_free(NULL, pmd_table);
515 put_page(virt_to_page(pud));
518 static void clear_hyp_pmd_entry(pmd_t *pmd)
520 pte_t *pte_table = pte_offset_kernel(pmd, 0);
521 VM_BUG_ON(pmd_thp_or_huge(*pmd));
523 pte_free_kernel(NULL, pte_table);
524 put_page(virt_to_page(pmd));
527 static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
529 pte_t *pte, *start_pte;
531 start_pte = pte = pte_offset_kernel(pmd, addr);
533 if (!pte_none(*pte)) {
534 kvm_set_pte(pte, __pte(0));
535 put_page(virt_to_page(pte));
537 } while (pte++, addr += PAGE_SIZE, addr != end);
539 if (hyp_pte_table_empty(start_pte))
540 clear_hyp_pmd_entry(pmd);
543 static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
546 pmd_t *pmd, *start_pmd;
548 start_pmd = pmd = pmd_offset(pud, addr);
550 next = pmd_addr_end(addr, end);
551 /* Hyp doesn't use huge pmds */
553 unmap_hyp_ptes(pmd, addr, next);
554 } while (pmd++, addr = next, addr != end);
556 if (hyp_pmd_table_empty(start_pmd))
557 clear_hyp_pud_entry(pud);
560 static void unmap_hyp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end)
563 pud_t *pud, *start_pud;
565 start_pud = pud = pud_offset(p4d, addr);
567 next = pud_addr_end(addr, end);
568 /* Hyp doesn't use huge puds */
570 unmap_hyp_pmds(pud, addr, next);
571 } while (pud++, addr = next, addr != end);
573 if (hyp_pud_table_empty(start_pud))
574 clear_hyp_p4d_entry(p4d);
577 static void unmap_hyp_p4ds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
580 p4d_t *p4d, *start_p4d;
582 start_p4d = p4d = p4d_offset(pgd, addr);
584 next = p4d_addr_end(addr, end);
585 /* Hyp doesn't use huge p4ds */
587 unmap_hyp_puds(p4d, addr, next);
588 } while (p4d++, addr = next, addr != end);
590 if (hyp_p4d_table_empty(start_p4d))
591 clear_hyp_pgd_entry(pgd);
594 static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
596 return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
599 static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
600 phys_addr_t start, u64 size)
603 phys_addr_t addr = start, end = start + size;
607 * We don't unmap anything from HYP, except at the hyp tear down.
608 * Hence, we don't have to invalidate the TLBs here.
610 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
612 next = pgd_addr_end(addr, end);
614 unmap_hyp_p4ds(pgd, addr, next);
615 } while (pgd++, addr = next, addr != end);
618 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
620 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
623 static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
625 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
629 * free_hyp_pgds - free Hyp-mode page tables
631 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
632 * therefore contains either mappings in the kernel memory area (above
633 * PAGE_OFFSET), or device mappings in the idmap range.
635 * boot_hyp_pgd should only map the idmap range, and is only used in
636 * the extended idmap case.
638 void free_hyp_pgds(void)
642 mutex_lock(&kvm_hyp_pgd_mutex);
644 id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
647 /* In case we never called hyp_mmu_init() */
649 io_map_base = hyp_idmap_start;
650 unmap_hyp_idmap_range(id_pgd, io_map_base,
651 hyp_idmap_start + PAGE_SIZE - io_map_base);
655 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
660 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
661 (uintptr_t)high_memory - PAGE_OFFSET);
663 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
666 if (merged_hyp_pgd) {
667 clear_page(merged_hyp_pgd);
668 free_page((unsigned long)merged_hyp_pgd);
669 merged_hyp_pgd = NULL;
672 mutex_unlock(&kvm_hyp_pgd_mutex);
675 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
676 unsigned long end, unsigned long pfn,
684 pte = pte_offset_kernel(pmd, addr);
685 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
686 get_page(virt_to_page(pte));
688 } while (addr += PAGE_SIZE, addr != end);
691 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
692 unsigned long end, unsigned long pfn,
697 unsigned long addr, next;
701 pmd = pmd_offset(pud, addr);
703 BUG_ON(pmd_sect(*pmd));
705 if (pmd_none(*pmd)) {
706 pte = pte_alloc_one_kernel(NULL);
708 kvm_err("Cannot allocate Hyp pte\n");
711 kvm_pmd_populate(pmd, pte);
712 get_page(virt_to_page(pmd));
715 next = pmd_addr_end(addr, end);
717 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
718 pfn += (next - addr) >> PAGE_SHIFT;
719 } while (addr = next, addr != end);
724 static int create_hyp_pud_mappings(p4d_t *p4d, unsigned long start,
725 unsigned long end, unsigned long pfn,
730 unsigned long addr, next;
735 pud = pud_offset(p4d, addr);
737 if (pud_none_or_clear_bad(pud)) {
738 pmd = pmd_alloc_one(NULL, addr);
740 kvm_err("Cannot allocate Hyp pmd\n");
743 kvm_pud_populate(pud, pmd);
744 get_page(virt_to_page(pud));
747 next = pud_addr_end(addr, end);
748 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
751 pfn += (next - addr) >> PAGE_SHIFT;
752 } while (addr = next, addr != end);
757 static int create_hyp_p4d_mappings(pgd_t *pgd, unsigned long start,
758 unsigned long end, unsigned long pfn,
763 unsigned long addr, next;
768 p4d = p4d_offset(pgd, addr);
770 if (p4d_none(*p4d)) {
771 pud = pud_alloc_one(NULL, addr);
773 kvm_err("Cannot allocate Hyp pud\n");
776 kvm_p4d_populate(p4d, pud);
777 get_page(virt_to_page(p4d));
780 next = p4d_addr_end(addr, end);
781 ret = create_hyp_pud_mappings(p4d, addr, next, pfn, prot);
784 pfn += (next - addr) >> PAGE_SHIFT;
785 } while (addr = next, addr != end);
790 static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
791 unsigned long start, unsigned long end,
792 unsigned long pfn, pgprot_t prot)
796 unsigned long addr, next;
799 mutex_lock(&kvm_hyp_pgd_mutex);
800 addr = start & PAGE_MASK;
801 end = PAGE_ALIGN(end);
803 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
805 if (pgd_none(*pgd)) {
806 p4d = p4d_alloc_one(NULL, addr);
808 kvm_err("Cannot allocate Hyp p4d\n");
812 kvm_pgd_populate(pgd, p4d);
813 get_page(virt_to_page(pgd));
816 next = pgd_addr_end(addr, end);
817 err = create_hyp_p4d_mappings(pgd, addr, next, pfn, prot);
820 pfn += (next - addr) >> PAGE_SHIFT;
821 } while (addr = next, addr != end);
823 mutex_unlock(&kvm_hyp_pgd_mutex);
827 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
829 if (!is_vmalloc_addr(kaddr)) {
830 BUG_ON(!virt_addr_valid(kaddr));
833 return page_to_phys(vmalloc_to_page(kaddr)) +
834 offset_in_page(kaddr);
839 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
840 * @from: The virtual kernel start address of the range
841 * @to: The virtual kernel end address of the range (exclusive)
842 * @prot: The protection to be applied to this range
844 * The same virtual address as the kernel virtual address is also used
845 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
848 int create_hyp_mappings(void *from, void *to, pgprot_t prot)
850 phys_addr_t phys_addr;
851 unsigned long virt_addr;
852 unsigned long start = kern_hyp_va((unsigned long)from);
853 unsigned long end = kern_hyp_va((unsigned long)to);
855 if (is_kernel_in_hyp_mode())
858 start = start & PAGE_MASK;
859 end = PAGE_ALIGN(end);
861 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
864 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
865 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
866 virt_addr, virt_addr + PAGE_SIZE,
867 __phys_to_pfn(phys_addr),
876 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
877 unsigned long *haddr, pgprot_t prot)
879 pgd_t *pgd = hyp_pgd;
883 mutex_lock(&kvm_hyp_pgd_mutex);
886 * This assumes that we have enough space below the idmap
887 * page to allocate our VAs. If not, the check below will
888 * kick. A potential alternative would be to detect that
889 * overflow and switch to an allocation above the idmap.
891 * The allocated size is always a multiple of PAGE_SIZE.
893 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
894 base = io_map_base - size;
897 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
898 * allocating the new area, as it would indicate we've
899 * overflowed the idmap/IO address range.
901 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
906 mutex_unlock(&kvm_hyp_pgd_mutex);
911 if (__kvm_cpu_uses_extended_idmap())
914 ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
916 __phys_to_pfn(phys_addr), prot);
920 *haddr = base + offset_in_page(phys_addr);
927 * create_hyp_io_mappings - Map IO into both kernel and HYP
928 * @phys_addr: The physical start address which gets mapped
929 * @size: Size of the region being mapped
930 * @kaddr: Kernel VA for this mapping
931 * @haddr: HYP VA for this mapping
933 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
934 void __iomem **kaddr,
935 void __iomem **haddr)
940 *kaddr = ioremap(phys_addr, size);
944 if (is_kernel_in_hyp_mode()) {
949 ret = __create_hyp_private_mapping(phys_addr, size,
950 &addr, PAGE_HYP_DEVICE);
958 *haddr = (void __iomem *)addr;
963 * create_hyp_exec_mappings - Map an executable range into HYP
964 * @phys_addr: The physical start address which gets mapped
965 * @size: Size of the region being mapped
966 * @haddr: HYP VA for this mapping
968 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
974 BUG_ON(is_kernel_in_hyp_mode());
976 ret = __create_hyp_private_mapping(phys_addr, size,
977 &addr, PAGE_HYP_EXEC);
983 *haddr = (void *)addr;
988 * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
989 * @kvm: The pointer to the KVM structure
990 * @mmu: The pointer to the s2 MMU structure
992 * Allocates only the stage-2 HW PGD level table(s) of size defined by
993 * stage2_pgd_size(mmu->kvm).
995 * Note we don't need locking here as this is only called when the VM is
996 * created, which can only be done once.
998 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
1000 phys_addr_t pgd_phys;
1004 if (mmu->pgd != NULL) {
1005 kvm_err("kvm_arch already initialized?\n");
1009 /* Allocate the HW PGD, making sure that each page gets its own refcount */
1010 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
1014 pgd_phys = virt_to_phys(pgd);
1015 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
1018 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
1019 if (!mmu->last_vcpu_ran) {
1020 free_pages_exact(pgd, stage2_pgd_size(kvm));
1024 for_each_possible_cpu(cpu)
1025 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
1029 mmu->pgd_phys = pgd_phys;
1030 mmu->vmid.vmid_gen = 0;
1035 static void stage2_unmap_memslot(struct kvm *kvm,
1036 struct kvm_memory_slot *memslot)
1038 hva_t hva = memslot->userspace_addr;
1039 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
1040 phys_addr_t size = PAGE_SIZE * memslot->npages;
1041 hva_t reg_end = hva + size;
1044 * A memory region could potentially cover multiple VMAs, and any holes
1045 * between them, so iterate over all of them to find out if we should
1046 * unmap any of them.
1048 * +--------------------------------------------+
1049 * +---------------+----------------+ +----------------+
1050 * | : VMA 1 | VMA 2 | | VMA 3 : |
1051 * +---------------+----------------+ +----------------+
1053 * +--------------------------------------------+
1056 struct vm_area_struct *vma = find_vma(current->mm, hva);
1057 hva_t vm_start, vm_end;
1059 if (!vma || vma->vm_start >= reg_end)
1063 * Take the intersection of this VMA with the memory region
1065 vm_start = max(hva, vma->vm_start);
1066 vm_end = min(reg_end, vma->vm_end);
1068 if (!(vma->vm_flags & VM_PFNMAP)) {
1069 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
1070 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
1073 } while (hva < reg_end);
1077 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
1078 * @kvm: The struct kvm pointer
1080 * Go through the memregions and unmap any regular RAM
1081 * backing memory already mapped to the VM.
1083 void stage2_unmap_vm(struct kvm *kvm)
1085 struct kvm_memslots *slots;
1086 struct kvm_memory_slot *memslot;
1089 idx = srcu_read_lock(&kvm->srcu);
1090 mmap_read_lock(current->mm);
1091 spin_lock(&kvm->mmu_lock);
1093 slots = kvm_memslots(kvm);
1094 kvm_for_each_memslot(memslot, slots)
1095 stage2_unmap_memslot(kvm, memslot);
1097 spin_unlock(&kvm->mmu_lock);
1098 mmap_read_unlock(current->mm);
1099 srcu_read_unlock(&kvm->srcu, idx);
1102 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
1104 struct kvm *kvm = mmu->kvm;
1107 spin_lock(&kvm->mmu_lock);
1109 unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
1110 pgd = READ_ONCE(mmu->pgd);
1113 spin_unlock(&kvm->mmu_lock);
1115 /* Free the HW pgd, one page at a time */
1117 free_pages_exact(pgd, stage2_pgd_size(kvm));
1118 free_percpu(mmu->last_vcpu_ran);
1122 static p4d_t *stage2_get_p4d(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
1125 struct kvm *kvm = mmu->kvm;
1129 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
1130 if (stage2_pgd_none(kvm, *pgd)) {
1133 p4d = kvm_mmu_memory_cache_alloc(cache);
1134 stage2_pgd_populate(kvm, pgd, p4d);
1135 get_page(virt_to_page(pgd));
1138 return stage2_p4d_offset(kvm, pgd, addr);
1141 static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
1144 struct kvm *kvm = mmu->kvm;
1148 p4d = stage2_get_p4d(mmu, cache, addr);
1149 if (stage2_p4d_none(kvm, *p4d)) {
1152 pud = kvm_mmu_memory_cache_alloc(cache);
1153 stage2_p4d_populate(kvm, p4d, pud);
1154 get_page(virt_to_page(p4d));
1157 return stage2_pud_offset(kvm, p4d, addr);
1160 static pmd_t *stage2_get_pmd(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
1163 struct kvm *kvm = mmu->kvm;
1167 pud = stage2_get_pud(mmu, cache, addr);
1168 if (!pud || stage2_pud_huge(kvm, *pud))
1171 if (stage2_pud_none(kvm, *pud)) {
1174 pmd = kvm_mmu_memory_cache_alloc(cache);
1175 stage2_pud_populate(kvm, pud, pmd);
1176 get_page(virt_to_page(pud));
1179 return stage2_pmd_offset(kvm, pud, addr);
1182 static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu,
1183 struct kvm_mmu_memory_cache *cache,
1184 phys_addr_t addr, const pmd_t *new_pmd)
1186 pmd_t *pmd, old_pmd;
1189 pmd = stage2_get_pmd(mmu, cache, addr);
1194 * Multiple vcpus faulting on the same PMD entry, can
1195 * lead to them sequentially updating the PMD with the
1196 * same value. Following the break-before-make
1197 * (pmd_clear() followed by tlb_flush()) process can
1198 * hinder forward progress due to refaults generated
1199 * on missing translations.
1201 * Skip updating the page table if the entry is
1204 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1207 if (pmd_present(old_pmd)) {
1209 * If we already have PTE level mapping for this block,
1210 * we must unmap it to avoid inconsistent TLB state and
1211 * leaking the table page. We could end up in this situation
1212 * if the memory slot was marked for dirty logging and was
1213 * reverted, leaving PTE level mappings for the pages accessed
1214 * during the period. So, unmap the PTE level mapping for this
1215 * block and retry, as we could have released the upper level
1216 * table in the process.
1218 * Normal THP split/merge follows mmu_notifier callbacks and do
1219 * get handled accordingly.
1221 if (!pmd_thp_or_huge(old_pmd)) {
1222 unmap_stage2_range(mmu, addr & S2_PMD_MASK, S2_PMD_SIZE);
1226 * Mapping in huge pages should only happen through a
1227 * fault. If a page is merged into a transparent huge
1228 * page, the individual subpages of that huge page
1229 * should be unmapped through MMU notifiers before we
1232 * Merging of CompoundPages is not supported; they
1233 * should become splitting first, unmapped, merged,
1234 * and mapped back in on-demand.
1236 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1238 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
1240 get_page(virt_to_page(pmd));
1243 kvm_set_pmd(pmd, *new_pmd);
1247 static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu,
1248 struct kvm_mmu_memory_cache *cache,
1249 phys_addr_t addr, const pud_t *new_pudp)
1251 struct kvm *kvm = mmu->kvm;
1252 pud_t *pudp, old_pud;
1255 pudp = stage2_get_pud(mmu, cache, addr);
1261 * A large number of vcpus faulting on the same stage 2 entry,
1262 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1263 * Skip updating the page tables if there is no change.
1265 if (pud_val(old_pud) == pud_val(*new_pudp))
1268 if (stage2_pud_present(kvm, old_pud)) {
1270 * If we already have table level mapping for this block, unmap
1271 * the range for this block and retry.
1273 if (!stage2_pud_huge(kvm, old_pud)) {
1274 unmap_stage2_range(mmu, addr & S2_PUD_MASK, S2_PUD_SIZE);
1278 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
1279 stage2_pud_clear(kvm, pudp);
1280 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
1282 get_page(virt_to_page(pudp));
1285 kvm_set_pud(pudp, *new_pudp);
1290 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1291 * true if a valid and present leaf-entry is found. A pointer to the
1292 * leaf-entry is returned in the appropriate level variable - pudpp,
1295 static bool stage2_get_leaf_entry(struct kvm_s2_mmu *mmu, phys_addr_t addr,
1296 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
1298 struct kvm *kvm = mmu->kvm;
1307 pudp = stage2_get_pud(mmu, NULL, addr);
1308 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1311 if (stage2_pud_huge(kvm, *pudp)) {
1316 pmdp = stage2_pmd_offset(kvm, pudp, addr);
1317 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1320 if (pmd_thp_or_huge(*pmdp)) {
1325 ptep = pte_offset_kernel(pmdp, addr);
1326 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1333 static bool stage2_is_exec(struct kvm_s2_mmu *mmu, phys_addr_t addr, unsigned long sz)
1340 found = stage2_get_leaf_entry(mmu, addr, &pudp, &pmdp, &ptep);
1345 return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
1347 return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
1349 return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
1352 static int stage2_set_pte(struct kvm_s2_mmu *mmu,
1353 struct kvm_mmu_memory_cache *cache,
1354 phys_addr_t addr, const pte_t *new_pte,
1355 unsigned long flags)
1357 struct kvm *kvm = mmu->kvm;
1360 pte_t *pte, old_pte;
1361 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1362 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1364 VM_BUG_ON(logging_active && !cache);
1366 /* Create stage-2 page table mapping - Levels 0 and 1 */
1367 pud = stage2_get_pud(mmu, cache, addr);
1370 * Ignore calls from kvm_set_spte_hva for unallocated
1377 * While dirty page logging - dissolve huge PUD, then continue
1378 * on to allocate page.
1381 stage2_dissolve_pud(mmu, addr, pud);
1383 if (stage2_pud_none(kvm, *pud)) {
1385 return 0; /* ignore calls from kvm_set_spte_hva */
1386 pmd = kvm_mmu_memory_cache_alloc(cache);
1387 stage2_pud_populate(kvm, pud, pmd);
1388 get_page(virt_to_page(pud));
1391 pmd = stage2_pmd_offset(kvm, pud, addr);
1394 * Ignore calls from kvm_set_spte_hva for unallocated
1401 * While dirty page logging - dissolve huge PMD, then continue on to
1405 stage2_dissolve_pmd(mmu, addr, pmd);
1407 /* Create stage-2 page mappings - Level 2 */
1408 if (pmd_none(*pmd)) {
1410 return 0; /* ignore calls from kvm_set_spte_hva */
1411 pte = kvm_mmu_memory_cache_alloc(cache);
1412 kvm_pmd_populate(pmd, pte);
1413 get_page(virt_to_page(pmd));
1416 pte = pte_offset_kernel(pmd, addr);
1418 if (iomap && pte_present(*pte))
1421 /* Create 2nd stage page table mapping - Level 3 */
1423 if (pte_present(old_pte)) {
1424 /* Skip page table update if there is no change */
1425 if (pte_val(old_pte) == pte_val(*new_pte))
1428 kvm_set_pte(pte, __pte(0));
1429 kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
1431 get_page(virt_to_page(pte));
1434 kvm_set_pte(pte, *new_pte);
1438 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1439 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1441 if (pte_young(*pte)) {
1442 *pte = pte_mkold(*pte);
1448 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1450 return __ptep_test_and_clear_young(pte);
1454 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1456 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1459 static int stage2_pudp_test_and_clear_young(pud_t *pud)
1461 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1465 * kvm_phys_addr_ioremap - map a device range to guest IPA
1467 * @kvm: The KVM pointer
1468 * @guest_ipa: The IPA at which to insert the mapping
1469 * @pa: The physical address of the device
1470 * @size: The size of the mapping
1472 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1473 phys_addr_t pa, unsigned long size, bool writable)
1475 phys_addr_t addr, end;
1478 struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
1480 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1481 pfn = __phys_to_pfn(pa);
1483 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1484 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
1487 pte = kvm_s2pte_mkwrite(pte);
1489 ret = kvm_mmu_topup_memory_cache(&cache,
1490 kvm_mmu_cache_min_pages(kvm));
1493 spin_lock(&kvm->mmu_lock);
1494 ret = stage2_set_pte(&kvm->arch.mmu, &cache, addr, &pte,
1495 KVM_S2PTE_FLAG_IS_IOMAP);
1496 spin_unlock(&kvm->mmu_lock);
1504 kvm_mmu_free_memory_cache(&cache);
1509 * stage2_wp_ptes - write protect PMD range
1510 * @pmd: pointer to pmd entry
1511 * @addr: range start address
1512 * @end: range end address
1514 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1518 pte = pte_offset_kernel(pmd, addr);
1520 if (!pte_none(*pte)) {
1521 if (!kvm_s2pte_readonly(pte))
1522 kvm_set_s2pte_readonly(pte);
1524 } while (pte++, addr += PAGE_SIZE, addr != end);
1528 * stage2_wp_pmds - write protect PUD range
1529 * kvm: kvm instance for the VM
1530 * @pud: pointer to pud entry
1531 * @addr: range start address
1532 * @end: range end address
1534 static void stage2_wp_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
1535 phys_addr_t addr, phys_addr_t end)
1537 struct kvm *kvm = mmu->kvm;
1541 pmd = stage2_pmd_offset(kvm, pud, addr);
1544 next = stage2_pmd_addr_end(kvm, addr, end);
1545 if (!pmd_none(*pmd)) {
1546 if (pmd_thp_or_huge(*pmd)) {
1547 if (!kvm_s2pmd_readonly(pmd))
1548 kvm_set_s2pmd_readonly(pmd);
1550 stage2_wp_ptes(pmd, addr, next);
1553 } while (pmd++, addr = next, addr != end);
1557 * stage2_wp_puds - write protect P4D range
1558 * @p4d: pointer to p4d entry
1559 * @addr: range start address
1560 * @end: range end address
1562 static void stage2_wp_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
1563 phys_addr_t addr, phys_addr_t end)
1565 struct kvm *kvm = mmu->kvm;
1569 pud = stage2_pud_offset(kvm, p4d, addr);
1571 next = stage2_pud_addr_end(kvm, addr, end);
1572 if (!stage2_pud_none(kvm, *pud)) {
1573 if (stage2_pud_huge(kvm, *pud)) {
1574 if (!kvm_s2pud_readonly(pud))
1575 kvm_set_s2pud_readonly(pud);
1577 stage2_wp_pmds(mmu, pud, addr, next);
1580 } while (pud++, addr = next, addr != end);
1584 * stage2_wp_p4ds - write protect PGD range
1585 * @pgd: pointer to pgd entry
1586 * @addr: range start address
1587 * @end: range end address
1589 static void stage2_wp_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
1590 phys_addr_t addr, phys_addr_t end)
1592 struct kvm *kvm = mmu->kvm;
1596 p4d = stage2_p4d_offset(kvm, pgd, addr);
1598 next = stage2_p4d_addr_end(kvm, addr, end);
1599 if (!stage2_p4d_none(kvm, *p4d))
1600 stage2_wp_puds(mmu, p4d, addr, next);
1601 } while (p4d++, addr = next, addr != end);
1605 * stage2_wp_range() - write protect stage2 memory region range
1606 * @kvm: The KVM pointer
1607 * @addr: Start address of range
1608 * @end: End address of range
1610 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
1612 struct kvm *kvm = mmu->kvm;
1616 pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
1619 * Release kvm_mmu_lock periodically if the memory region is
1620 * large. Otherwise, we may see kernel panics with
1621 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1622 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1623 * will also starve other vCPUs. We have to also make sure
1624 * that the page tables are not freed while we released
1627 cond_resched_lock(&kvm->mmu_lock);
1628 if (!READ_ONCE(mmu->pgd))
1630 next = stage2_pgd_addr_end(kvm, addr, end);
1631 if (stage2_pgd_present(kvm, *pgd))
1632 stage2_wp_p4ds(mmu, pgd, addr, next);
1633 } while (pgd++, addr = next, addr != end);
1637 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1638 * @kvm: The KVM pointer
1639 * @slot: The memory slot to write protect
1641 * Called to start logging dirty pages after memory region
1642 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1643 * all present PUD, PMD and PTEs are write protected in the memory region.
1644 * Afterwards read of dirty page log can be called.
1646 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1647 * serializing operations for VM memory regions.
1649 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1651 struct kvm_memslots *slots = kvm_memslots(kvm);
1652 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1653 phys_addr_t start, end;
1655 if (WARN_ON_ONCE(!memslot))
1658 start = memslot->base_gfn << PAGE_SHIFT;
1659 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1661 spin_lock(&kvm->mmu_lock);
1662 stage2_wp_range(&kvm->arch.mmu, start, end);
1663 spin_unlock(&kvm->mmu_lock);
1664 kvm_flush_remote_tlbs(kvm);
1668 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1669 * @kvm: The KVM pointer
1670 * @slot: The memory slot associated with mask
1671 * @gfn_offset: The gfn offset in memory slot
1672 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1673 * slot to be write protected
1675 * Walks bits set in mask write protects the associated pte's. Caller must
1676 * acquire kvm_mmu_lock.
1678 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1679 struct kvm_memory_slot *slot,
1680 gfn_t gfn_offset, unsigned long mask)
1682 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1683 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1684 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1686 stage2_wp_range(&kvm->arch.mmu, start, end);
1690 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1693 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1694 * enable dirty logging for them.
1696 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1697 struct kvm_memory_slot *slot,
1698 gfn_t gfn_offset, unsigned long mask)
1700 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1703 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1705 __clean_dcache_guest_page(pfn, size);
1708 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1710 __invalidate_icache_guest_page(pfn, size);
1713 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
1715 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1718 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1720 unsigned long map_size)
1723 hva_t uaddr_start, uaddr_end;
1726 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
1727 if (map_size == PAGE_SIZE)
1730 size = memslot->npages * PAGE_SIZE;
1732 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1734 uaddr_start = memslot->userspace_addr;
1735 uaddr_end = uaddr_start + size;
1738 * Pages belonging to memslots that don't have the same alignment
1739 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1740 * PMD/PUD entries, because we'll end up mapping the wrong pages.
1742 * Consider a layout like the following:
1744 * memslot->userspace_addr:
1745 * +-----+--------------------+--------------------+---+
1746 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1747 * +-----+--------------------+--------------------+---+
1749 * memslot->base_gfn << PAGE_SHIFT:
1750 * +---+--------------------+--------------------+-----+
1751 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1752 * +---+--------------------+--------------------+-----+
1754 * If we create those stage-2 blocks, we'll end up with this incorrect
1760 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1764 * Next, let's make sure we're not trying to map anything not covered
1765 * by the memslot. This means we have to prohibit block size mappings
1766 * for the beginning and end of a non-block aligned and non-block sized
1767 * memory slot (illustrated by the head and tail parts of the
1768 * userspace view above containing pages 'abcde' and 'xyz',
1771 * Note that it doesn't matter if we do the check using the
1772 * userspace_addr or the base_gfn, as both are equally aligned (per
1773 * the check above) and equally sized.
1775 return (hva & ~(map_size - 1)) >= uaddr_start &&
1776 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1780 * Check if the given hva is backed by a transparent huge page (THP) and
1781 * whether it can be mapped using block mapping in stage2. If so, adjust
1782 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
1783 * supported. This will need to be updated to support other THP sizes.
1785 * Returns the size of the mapping.
1787 static unsigned long
1788 transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
1789 unsigned long hva, kvm_pfn_t *pfnp,
1792 kvm_pfn_t pfn = *pfnp;
1795 * Make sure the adjustment is done only for THP pages. Also make
1796 * sure that the HVA and IPA are sufficiently aligned and that the
1797 * block map is contained within the memslot.
1799 if (kvm_is_transparent_hugepage(pfn) &&
1800 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1802 * The address we faulted on is backed by a transparent huge
1803 * page. However, because we map the compound huge page and
1804 * not the individual tail page, we need to transfer the
1805 * refcount to the head page. We have to be careful that the
1806 * THP doesn't start to split while we are adjusting the
1809 * We are sure this doesn't happen, because mmu_notifier_retry
1810 * was successful and we are holding the mmu_lock, so if this
1811 * THP is trying to split, it will be blocked in the mmu
1812 * notifier before touching any of the pages, specifically
1813 * before being able to call __split_huge_page_refcount().
1815 * We can therefore safely transfer the refcount from PG_tail
1816 * to PG_head and switch the pfn from a tail page to the head
1820 kvm_release_pfn_clean(pfn);
1821 pfn &= ~(PTRS_PER_PMD - 1);
1828 /* Use page mapping if we cannot use block mapping. */
1832 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1833 struct kvm_memory_slot *memslot, unsigned long hva,
1834 unsigned long fault_status)
1837 bool write_fault, writable, force_pte = false;
1838 bool exec_fault, needs_exec;
1839 unsigned long mmu_seq;
1840 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1841 struct kvm *kvm = vcpu->kvm;
1842 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1843 struct vm_area_struct *vma;
1846 pgprot_t mem_type = PAGE_S2;
1847 bool logging_active = memslot_is_logging(memslot);
1848 unsigned long vma_pagesize, flags = 0;
1849 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
1851 write_fault = kvm_is_write_fault(vcpu);
1852 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
1853 VM_BUG_ON(write_fault && exec_fault);
1855 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1856 kvm_err("Unexpected L2 read permission error\n");
1860 /* Let's check if we will get back a huge page backed by hugetlbfs */
1861 mmap_read_lock(current->mm);
1862 vma = find_vma_intersection(current->mm, hva, hva + 1);
1863 if (unlikely(!vma)) {
1864 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1865 mmap_read_unlock(current->mm);
1869 if (is_vm_hugetlb_page(vma))
1870 vma_shift = huge_page_shift(hstate_vma(vma));
1872 vma_shift = PAGE_SHIFT;
1874 vma_pagesize = 1ULL << vma_shift;
1875 if (logging_active ||
1876 (vma->vm_flags & VM_PFNMAP) ||
1877 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1879 vma_pagesize = PAGE_SIZE;
1880 vma_shift = PAGE_SHIFT;
1884 * The stage2 has a minimum of 2 level table (For arm64 see
1885 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1886 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1887 * As for PUD huge maps, we must make sure that we have at least
1888 * 3 levels, i.e, PMD is not folded.
1890 if (vma_pagesize == PMD_SIZE ||
1891 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
1892 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1893 mmap_read_unlock(current->mm);
1895 /* We need minimum second+third level pages */
1896 ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
1900 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1902 * Ensure the read of mmu_notifier_seq happens before we call
1903 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1904 * the page we just got a reference to gets unmapped before we have a
1905 * chance to grab the mmu_lock, which ensure that if the page gets
1906 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1907 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1908 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1912 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1913 if (pfn == KVM_PFN_ERR_HWPOISON) {
1914 kvm_send_hwpoison_signal(hva, vma_shift);
1917 if (is_error_noslot_pfn(pfn))
1920 if (kvm_is_device_pfn(pfn)) {
1921 mem_type = PAGE_S2_DEVICE;
1922 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1923 } else if (logging_active) {
1925 * Faults on pages in a memslot with logging enabled
1926 * should not be mapped with huge pages (it introduces churn
1927 * and performance degradation), so force a pte mapping.
1929 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1932 * Only actually map the page as writable if this was a write
1939 if (exec_fault && is_iomap(flags))
1942 spin_lock(&kvm->mmu_lock);
1943 if (mmu_notifier_retry(kvm, mmu_seq))
1947 * If we are not forced to use page mapping, check if we are
1948 * backed by a THP and thus use block mapping if possible.
1950 if (vma_pagesize == PAGE_SIZE && !force_pte)
1951 vma_pagesize = transparent_hugepage_adjust(memslot, hva,
1954 kvm_set_pfn_dirty(pfn);
1956 if (fault_status != FSC_PERM && !is_iomap(flags))
1957 clean_dcache_guest_page(pfn, vma_pagesize);
1960 invalidate_icache_guest_page(pfn, vma_pagesize);
1963 * If we took an execution fault we have made the
1964 * icache/dcache coherent above and should now let the s2
1965 * mapping be executable.
1967 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1968 * execute permissions, and we preserve whatever we have.
1970 needs_exec = exec_fault ||
1971 (fault_status == FSC_PERM &&
1972 stage2_is_exec(mmu, fault_ipa, vma_pagesize));
1975 * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
1976 * all we have is a 2-level page table. Trying to map a PUD in
1977 * this case would be fatally wrong.
1979 if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
1980 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1982 new_pud = kvm_pud_mkhuge(new_pud);
1984 new_pud = kvm_s2pud_mkwrite(new_pud);
1987 new_pud = kvm_s2pud_mkexec(new_pud);
1989 ret = stage2_set_pud_huge(mmu, memcache, fault_ipa, &new_pud);
1990 } else if (vma_pagesize == PMD_SIZE) {
1991 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1993 new_pmd = kvm_pmd_mkhuge(new_pmd);
1996 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1999 new_pmd = kvm_s2pmd_mkexec(new_pmd);
2001 ret = stage2_set_pmd_huge(mmu, memcache, fault_ipa, &new_pmd);
2003 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
2006 new_pte = kvm_s2pte_mkwrite(new_pte);
2007 mark_page_dirty(kvm, gfn);
2011 new_pte = kvm_s2pte_mkexec(new_pte);
2013 ret = stage2_set_pte(mmu, memcache, fault_ipa, &new_pte, flags);
2017 spin_unlock(&kvm->mmu_lock);
2018 kvm_set_pfn_accessed(pfn);
2019 kvm_release_pfn_clean(pfn);
2024 * Resolve the access fault by making the page young again.
2025 * Note that because the faulting entry is guaranteed not to be
2026 * cached in the TLB, we don't need to invalidate anything.
2027 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
2028 * so there is no need for atomic (pte|pmd)_mkyoung operations.
2030 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
2036 bool pfn_valid = false;
2038 trace_kvm_access_fault(fault_ipa);
2040 spin_lock(&vcpu->kvm->mmu_lock);
2042 if (!stage2_get_leaf_entry(vcpu->arch.hw_mmu, fault_ipa, &pud, &pmd, &pte))
2045 if (pud) { /* HugeTLB */
2046 *pud = kvm_s2pud_mkyoung(*pud);
2047 pfn = kvm_pud_pfn(*pud);
2049 } else if (pmd) { /* THP, HugeTLB */
2050 *pmd = pmd_mkyoung(*pmd);
2051 pfn = pmd_pfn(*pmd);
2054 *pte = pte_mkyoung(*pte); /* Just a page... */
2055 pfn = pte_pfn(*pte);
2060 spin_unlock(&vcpu->kvm->mmu_lock);
2062 kvm_set_pfn_accessed(pfn);
2066 * kvm_handle_guest_abort - handles all 2nd stage aborts
2067 * @vcpu: the VCPU pointer
2069 * Any abort that gets to the host is almost guaranteed to be caused by a
2070 * missing second stage translation table entry, which can mean that either the
2071 * guest simply needs more memory and we must allocate an appropriate page or it
2072 * can mean that the guest tried to access I/O memory, which is emulated by user
2073 * space. The distinction is based on the IPA causing the fault and whether this
2074 * memory region has been registered as standard RAM by user space.
2076 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
2078 unsigned long fault_status;
2079 phys_addr_t fault_ipa;
2080 struct kvm_memory_slot *memslot;
2082 bool is_iabt, write_fault, writable;
2086 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
2088 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
2089 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
2091 /* Synchronous External Abort? */
2092 if (kvm_vcpu_abt_issea(vcpu)) {
2094 * For RAS the host kernel may handle this abort.
2095 * There is no need to pass the error into the guest.
2097 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
2098 kvm_inject_vabt(vcpu);
2103 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
2104 kvm_vcpu_get_hfar(vcpu), fault_ipa);
2106 /* Check the stage-2 fault is trans. fault or write fault */
2107 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
2108 fault_status != FSC_ACCESS) {
2109 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
2110 kvm_vcpu_trap_get_class(vcpu),
2111 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
2112 (unsigned long)kvm_vcpu_get_esr(vcpu));
2116 idx = srcu_read_lock(&vcpu->kvm->srcu);
2118 gfn = fault_ipa >> PAGE_SHIFT;
2119 memslot = gfn_to_memslot(vcpu->kvm, gfn);
2120 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
2121 write_fault = kvm_is_write_fault(vcpu);
2122 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
2124 * The guest has put either its instructions or its page-tables
2125 * somewhere it shouldn't have. Userspace won't be able to do
2126 * anything about this (there's no syndrome for a start), so
2127 * re-inject the abort back into the guest.
2134 if (kvm_vcpu_abt_iss1tw(vcpu)) {
2135 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
2141 * Check for a cache maintenance operation. Since we
2142 * ended-up here, we know it is outside of any memory
2143 * slot. But we can't find out if that is for a device,
2144 * or if the guest is just being stupid. The only thing
2145 * we know for sure is that this range cannot be cached.
2147 * So let's assume that the guest is just being
2148 * cautious, and skip the instruction.
2150 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
2151 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2157 * The IPA is reported as [MAX:12], so we need to
2158 * complement it with the bottom 12 bits from the
2159 * faulting VA. This is always 12 bits, irrespective
2162 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
2163 ret = io_mem_abort(vcpu, fault_ipa);
2167 /* Userspace should not be able to register out-of-bounds IPAs */
2168 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
2170 if (fault_status == FSC_ACCESS) {
2171 handle_access_fault(vcpu, fault_ipa);
2176 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
2180 if (ret == -ENOEXEC) {
2181 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
2185 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2189 static int handle_hva_to_gpa(struct kvm *kvm,
2190 unsigned long start,
2192 int (*handler)(struct kvm *kvm,
2193 gpa_t gpa, u64 size,
2197 struct kvm_memslots *slots;
2198 struct kvm_memory_slot *memslot;
2201 slots = kvm_memslots(kvm);
2203 /* we only care about the pages that the guest sees */
2204 kvm_for_each_memslot(memslot, slots) {
2205 unsigned long hva_start, hva_end;
2208 hva_start = max(start, memslot->userspace_addr);
2209 hva_end = min(end, memslot->userspace_addr +
2210 (memslot->npages << PAGE_SHIFT));
2211 if (hva_start >= hva_end)
2214 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2215 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
2221 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2223 unsigned flags = *(unsigned *)data;
2224 bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
2226 __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
2230 int kvm_unmap_hva_range(struct kvm *kvm,
2231 unsigned long start, unsigned long end, unsigned flags)
2233 if (!kvm->arch.mmu.pgd)
2236 trace_kvm_unmap_hva_range(start, end);
2237 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
2241 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2243 pte_t *pte = (pte_t *)data;
2245 WARN_ON(size != PAGE_SIZE);
2247 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2248 * flag clear because MMU notifiers will have unmapped a huge PMD before
2249 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2250 * therefore stage2_set_pte() never needs to clear out a huge PMD
2251 * through this calling path.
2253 stage2_set_pte(&kvm->arch.mmu, NULL, gpa, pte, 0);
2258 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
2260 unsigned long end = hva + PAGE_SIZE;
2261 kvm_pfn_t pfn = pte_pfn(pte);
2264 if (!kvm->arch.mmu.pgd)
2267 trace_kvm_set_spte_hva(hva);
2270 * We've moved a page around, probably through CoW, so let's treat it
2271 * just like a translation fault and clean the cache to the PoC.
2273 clean_dcache_guest_page(pfn, PAGE_SIZE);
2274 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
2275 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
2280 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2286 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2287 if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte))
2291 return stage2_pudp_test_and_clear_young(pud);
2293 return stage2_pmdp_test_and_clear_young(pmd);
2295 return stage2_ptep_test_and_clear_young(pte);
2298 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2304 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2305 if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte))
2309 return kvm_s2pud_young(*pud);
2311 return pmd_young(*pmd);
2313 return pte_young(*pte);
2316 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2318 if (!kvm->arch.mmu.pgd)
2320 trace_kvm_age_hva(start, end);
2321 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2324 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2326 if (!kvm->arch.mmu.pgd)
2328 trace_kvm_test_age_hva(hva);
2329 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
2330 kvm_test_age_hva_handler, NULL);
2333 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2335 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
2338 phys_addr_t kvm_mmu_get_httbr(void)
2340 if (__kvm_cpu_uses_extended_idmap())
2341 return virt_to_phys(merged_hyp_pgd);
2343 return virt_to_phys(hyp_pgd);
2346 phys_addr_t kvm_get_idmap_vector(void)
2348 return hyp_idmap_vector;
2351 static int kvm_map_idmap_text(pgd_t *pgd)
2355 /* Create the idmap in the boot page tables */
2356 err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
2357 hyp_idmap_start, hyp_idmap_end,
2358 __phys_to_pfn(hyp_idmap_start),
2361 kvm_err("Failed to idmap %lx-%lx\n",
2362 hyp_idmap_start, hyp_idmap_end);
2367 int kvm_mmu_init(void)
2371 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
2372 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
2373 hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
2374 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
2375 hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
2378 * We rely on the linker script to ensure at build time that the HYP
2379 * init code does not cross a page boundary.
2381 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
2383 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2384 kvm_debug("HYP VA range: %lx:%lx\n",
2385 kern_hyp_va(PAGE_OFFSET),
2386 kern_hyp_va((unsigned long)high_memory - 1));
2388 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
2389 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
2390 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
2392 * The idmap page is intersecting with the VA space,
2393 * it is not safe to continue further.
2395 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2400 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
2402 kvm_err("Hyp mode PGD not allocated\n");
2407 if (__kvm_cpu_uses_extended_idmap()) {
2408 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2410 if (!boot_hyp_pgd) {
2411 kvm_err("Hyp boot PGD not allocated\n");
2416 err = kvm_map_idmap_text(boot_hyp_pgd);
2420 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2421 if (!merged_hyp_pgd) {
2422 kvm_err("Failed to allocate extra HYP pgd\n");
2425 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2428 err = kvm_map_idmap_text(hyp_pgd);
2433 io_map_base = hyp_idmap_start;
2440 void kvm_arch_commit_memory_region(struct kvm *kvm,
2441 const struct kvm_userspace_memory_region *mem,
2442 struct kvm_memory_slot *old,
2443 const struct kvm_memory_slot *new,
2444 enum kvm_mr_change change)
2447 * At this point memslot has been committed and there is an
2448 * allocated dirty_bitmap[], dirty pages will be tracked while the
2449 * memory slot is write protected.
2451 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2453 * If we're with initial-all-set, we don't need to write
2454 * protect any pages because they're all reported as dirty.
2455 * Huge pages and normal pages will be write protect gradually.
2457 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
2458 kvm_mmu_wp_memory_region(kvm, mem->slot);
2463 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2464 struct kvm_memory_slot *memslot,
2465 const struct kvm_userspace_memory_region *mem,
2466 enum kvm_mr_change change)
2468 hva_t hva = mem->userspace_addr;
2469 hva_t reg_end = hva + mem->memory_size;
2470 bool writable = !(mem->flags & KVM_MEM_READONLY);
2473 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2474 change != KVM_MR_FLAGS_ONLY)
2478 * Prevent userspace from creating a memory region outside of the IPA
2479 * space addressable by the KVM guest IPA space.
2481 if (memslot->base_gfn + memslot->npages >=
2482 (kvm_phys_size(kvm) >> PAGE_SHIFT))
2485 mmap_read_lock(current->mm);
2487 * A memory region could potentially cover multiple VMAs, and any holes
2488 * between them, so iterate over all of them to find out if we can map
2489 * any of them right now.
2491 * +--------------------------------------------+
2492 * +---------------+----------------+ +----------------+
2493 * | : VMA 1 | VMA 2 | | VMA 3 : |
2494 * +---------------+----------------+ +----------------+
2496 * +--------------------------------------------+
2499 struct vm_area_struct *vma = find_vma(current->mm, hva);
2500 hva_t vm_start, vm_end;
2502 if (!vma || vma->vm_start >= reg_end)
2506 * Take the intersection of this VMA with the memory region
2508 vm_start = max(hva, vma->vm_start);
2509 vm_end = min(reg_end, vma->vm_end);
2511 if (vma->vm_flags & VM_PFNMAP) {
2512 gpa_t gpa = mem->guest_phys_addr +
2513 (vm_start - mem->userspace_addr);
2516 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2517 pa += vm_start - vma->vm_start;
2519 /* IO region dirty page logging not allowed */
2520 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2525 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2532 } while (hva < reg_end);
2534 if (change == KVM_MR_FLAGS_ONLY)
2537 spin_lock(&kvm->mmu_lock);
2539 unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
2541 stage2_flush_memslot(kvm, memslot);
2542 spin_unlock(&kvm->mmu_lock);
2544 mmap_read_unlock(current->mm);
2548 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
2552 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2556 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2558 kvm_free_stage2_pgd(&kvm->arch.mmu);
2561 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2562 struct kvm_memory_slot *slot)
2564 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2565 phys_addr_t size = slot->npages << PAGE_SHIFT;
2567 spin_lock(&kvm->mmu_lock);
2568 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
2569 spin_unlock(&kvm->mmu_lock);
2573 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2576 * - S/W ops are local to a CPU (not broadcast)
2577 * - We have line migration behind our back (speculation)
2578 * - System caches don't support S/W at all (damn!)
2580 * In the face of the above, the best we can do is to try and convert
2581 * S/W ops to VA ops. Because the guest is not allowed to infer the
2582 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2583 * which is a rather good thing for us.
2585 * Also, it is only used when turning caches on/off ("The expected
2586 * usage of the cache maintenance instructions that operate by set/way
2587 * is associated with the cache maintenance instructions associated
2588 * with the powerdown and powerup of caches, if this is required by
2589 * the implementation.").
2591 * We use the following policy:
2593 * - If we trap a S/W operation, we enable VM trapping to detect
2594 * caches being turned on/off, and do a full clean.
2596 * - We flush the caches on both caches being turned on and off.
2598 * - Once the caches are enabled, we stop trapping VM ops.
2600 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2602 unsigned long hcr = *vcpu_hcr(vcpu);
2605 * If this is the first time we do a S/W operation
2606 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2609 * Otherwise, rely on the VM trapping to wait for the MMU +
2610 * Caches to be turned off. At that point, we'll be able to
2611 * clean the caches again.
2613 if (!(hcr & HCR_TVM)) {
2614 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2615 vcpu_has_cache_enabled(vcpu));
2616 stage2_flush_vm(vcpu->kvm);
2617 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2621 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2623 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2626 * If switching the MMU+caches on, need to invalidate the caches.
2627 * If switching it off, need to clean the caches.
2628 * Clean + invalidate does the trick always.
2630 if (now_enabled != was_enabled)
2631 stage2_flush_vm(vcpu->kvm);
2633 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2635 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2637 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);