1 // SPDX-License-Identifier: GPL-2.0-only
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
14 #define KVM_PTE_VALID BIT(0)
16 #define KVM_PTE_TYPE BIT(1)
17 #define KVM_PTE_TYPE_BLOCK 0
18 #define KVM_PTE_TYPE_PAGE 1
19 #define KVM_PTE_TYPE_TABLE 1
21 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
22 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
24 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
26 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
27 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
28 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3
29 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1
30 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
31 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
32 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
35 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
36 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
37 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
38 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
39 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
41 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51)
43 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
45 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
47 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
48 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
49 KVM_PTE_LEAF_ATTR_HI_S2_XN)
51 #define KVM_PTE_LEAF_ATTR_S2_IGNORED GENMASK(58, 55)
53 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(63, 56)
54 #define KVM_MAX_OWNER_ID 1
56 struct kvm_pgtable_walk_data {
57 struct kvm_pgtable *pgt;
58 struct kvm_pgtable_walker *walker;
64 static u64 kvm_granule_shift(u32 level)
66 /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
67 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
70 static u64 kvm_granule_size(u32 level)
72 return BIT(kvm_granule_shift(level));
75 #define KVM_PHYS_INVALID (-1ULL)
77 static bool kvm_phys_is_valid(u64 phys)
79 return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
82 static bool kvm_level_supports_block_mapping(u32 level)
85 * Reject invalid block mappings and don't bother with 4TB mappings for
88 return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
91 static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
93 u64 granule = kvm_granule_size(level);
95 if (!kvm_level_supports_block_mapping(level))
98 if (granule > (end - addr))
101 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
104 return IS_ALIGNED(addr, granule);
107 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
109 u64 shift = kvm_granule_shift(level);
110 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
112 return (data->addr >> shift) & mask;
115 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
117 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
118 u64 mask = BIT(pgt->ia_bits) - 1;
120 return (addr & mask) >> shift;
123 static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
125 return __kvm_pgd_page_idx(data->pgt, data->addr);
128 static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
130 struct kvm_pgtable pgt = {
132 .start_level = start_level,
135 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
138 static bool kvm_pte_valid(kvm_pte_t pte)
140 return pte & KVM_PTE_VALID;
143 static bool kvm_pte_table(kvm_pte_t pte, u32 level)
145 if (level == KVM_PGTABLE_MAX_LEVELS - 1)
148 if (!kvm_pte_valid(pte))
151 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
154 static u64 kvm_pte_to_phys(kvm_pte_t pte)
156 u64 pa = pte & KVM_PTE_ADDR_MASK;
158 if (PAGE_SHIFT == 16)
159 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
164 static kvm_pte_t kvm_phys_to_pte(u64 pa)
166 kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
168 if (PAGE_SHIFT == 16)
169 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
174 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
176 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
179 static void kvm_clear_pte(kvm_pte_t *ptep)
181 WRITE_ONCE(*ptep, 0);
184 static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
185 struct kvm_pgtable_mm_ops *mm_ops)
187 kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
189 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
190 pte |= KVM_PTE_VALID;
192 WARN_ON(kvm_pte_valid(old));
193 smp_store_release(ptep, pte);
196 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
198 kvm_pte_t pte = kvm_phys_to_pte(pa);
199 u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
202 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
203 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
204 pte |= KVM_PTE_VALID;
209 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
211 return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
214 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
215 u32 level, kvm_pte_t *ptep,
216 enum kvm_pgtable_walk_flags flag)
218 struct kvm_pgtable_walker *walker = data->walker;
219 return walker->cb(addr, data->end, level, ptep, flag, walker->arg);
222 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
223 kvm_pte_t *pgtable, u32 level);
225 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
226 kvm_pte_t *ptep, u32 level)
229 u64 addr = data->addr;
230 kvm_pte_t *childp, pte = *ptep;
231 bool table = kvm_pte_table(pte, level);
232 enum kvm_pgtable_walk_flags flags = data->walker->flags;
234 if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
235 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
236 KVM_PGTABLE_WALK_TABLE_PRE);
239 if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
240 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
241 KVM_PGTABLE_WALK_LEAF);
243 table = kvm_pte_table(pte, level);
250 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
251 data->addr += kvm_granule_size(level);
255 childp = kvm_pte_follow(pte, data->pgt->mm_ops);
256 ret = __kvm_pgtable_walk(data, childp, level + 1);
260 if (flags & KVM_PGTABLE_WALK_TABLE_POST) {
261 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
262 KVM_PGTABLE_WALK_TABLE_POST);
269 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
270 kvm_pte_t *pgtable, u32 level)
275 if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
278 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
279 kvm_pte_t *ptep = &pgtable[idx];
281 if (data->addr >= data->end)
284 ret = __kvm_pgtable_visit(data, ptep, level);
292 static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
296 struct kvm_pgtable *pgt = data->pgt;
297 u64 limit = BIT(pgt->ia_bits);
299 if (data->addr > limit || data->end > limit)
305 for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
306 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
308 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
316 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
317 struct kvm_pgtable_walker *walker)
319 struct kvm_pgtable_walk_data walk_data = {
321 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
322 .end = PAGE_ALIGN(walk_data.addr + size),
326 return _kvm_pgtable_walk(&walk_data);
329 struct hyp_map_data {
332 struct kvm_pgtable_mm_ops *mm_ops;
335 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
337 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
338 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
339 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
340 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
341 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
342 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
344 if (!(prot & KVM_PGTABLE_PROT_R))
347 if (prot & KVM_PGTABLE_PROT_X) {
348 if (prot & KVM_PGTABLE_PROT_W)
354 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
357 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
358 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
359 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
365 static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
366 kvm_pte_t *ptep, struct hyp_map_data *data)
368 kvm_pte_t new, old = *ptep;
369 u64 granule = kvm_granule_size(level), phys = data->phys;
371 if (!kvm_block_mapping_supported(addr, end, phys, level))
374 /* Tolerate KVM recreating the exact same mapping */
375 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
376 if (old != new && !WARN_ON(kvm_pte_valid(old)))
377 smp_store_release(ptep, new);
379 data->phys += granule;
383 static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
384 enum kvm_pgtable_walk_flags flag, void * const arg)
387 struct hyp_map_data *data = arg;
388 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
390 if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
393 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
396 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
400 kvm_set_table_pte(ptep, childp, mm_ops);
404 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
405 enum kvm_pgtable_prot prot)
408 struct hyp_map_data map_data = {
409 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
410 .mm_ops = pgt->mm_ops,
412 struct kvm_pgtable_walker walker = {
413 .cb = hyp_map_walker,
414 .flags = KVM_PGTABLE_WALK_LEAF,
418 ret = hyp_set_prot_attr(prot, &map_data.attr);
422 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
428 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
429 struct kvm_pgtable_mm_ops *mm_ops)
431 u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
433 pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
437 pgt->ia_bits = va_bits;
438 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
439 pgt->mm_ops = mm_ops;
444 static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
445 enum kvm_pgtable_walk_flags flag, void * const arg)
447 struct kvm_pgtable_mm_ops *mm_ops = arg;
449 mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
453 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
455 struct kvm_pgtable_walker walker = {
456 .cb = hyp_free_walker,
457 .flags = KVM_PGTABLE_WALK_TABLE_POST,
461 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
462 pgt->mm_ops->put_page(pgt->pgd);
466 struct stage2_map_data {
474 struct kvm_s2_mmu *mmu;
477 struct kvm_pgtable_mm_ops *mm_ops;
480 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
482 u64 vtcr = VTCR_EL2_FLAGS;
485 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
486 vtcr |= VTCR_EL2_T0SZ(phys_shift);
488 * Use a minimum 2 level page table to prevent splitting
489 * host PMD huge pages at stage2.
491 lvls = stage2_pgtable_levels(phys_shift);
494 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
497 * Enable the Hardware Access Flag management, unconditionally
498 * on all CPUs. The features is RES0 on CPUs without the support
499 * and must be ignored by the CPUs.
503 /* Set the vmid bits */
504 vtcr |= (get_vmid_bits(mmfr1) == 16) ?
511 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
513 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
516 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
519 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
521 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
524 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
525 kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
526 KVM_S2_MEMATTR(pgt, NORMAL);
527 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
529 if (!(prot & KVM_PGTABLE_PROT_X))
530 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
534 if (prot & KVM_PGTABLE_PROT_R)
535 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
537 if (prot & KVM_PGTABLE_PROT_W)
538 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
540 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
541 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
547 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
549 if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
552 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
555 static bool stage2_pte_is_counted(kvm_pte_t pte)
558 * The refcount tracks valid entries as well as invalid entries if they
559 * encode ownership of a page to another entity than the page-table
560 * owner, whose id is 0.
565 static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
566 u32 level, struct kvm_pgtable_mm_ops *mm_ops)
569 * Clear the existing PTE, and perform break-before-make with
570 * TLB maintenance if it was valid.
572 if (kvm_pte_valid(*ptep)) {
574 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
577 mm_ops->put_page(ptep);
580 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
582 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
583 return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
586 static bool stage2_pte_executable(kvm_pte_t pte)
588 return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
591 static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
593 struct stage2_map_data *data)
595 kvm_pte_t new, old = *ptep;
596 u64 granule = kvm_granule_size(level), phys = data->phys;
597 struct kvm_pgtable *pgt = data->mmu->pgt;
598 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
600 if (!kvm_block_mapping_supported(addr, end, phys, level))
603 if (kvm_phys_is_valid(phys))
604 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
606 new = kvm_init_invalid_leaf_owner(data->owner_id);
608 if (stage2_pte_is_counted(old)) {
610 * Skip updating the PTE if we are trying to recreate the exact
611 * same mapping or only change the access permissions. Instead,
612 * the vCPU will exit one more time from guest if still needed
613 * and then go through the path of relaxing permissions.
615 if (!stage2_pte_needs_update(old, new))
618 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
621 /* Perform CMOs before installation of the guest stage-2 PTE */
622 if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
623 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
626 if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
627 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
629 smp_store_release(ptep, new);
630 if (stage2_pte_is_counted(new))
631 mm_ops->get_page(ptep);
632 if (kvm_phys_is_valid(phys))
633 data->phys += granule;
637 static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
639 struct stage2_map_data *data)
644 if (!kvm_block_mapping_supported(addr, end, data->phys, level))
647 data->childp = kvm_pte_follow(*ptep, data->mm_ops);
651 * Invalidate the whole stage-2, as we may have numerous leaf
652 * entries below us which would otherwise need invalidating
655 kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
660 static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
661 struct stage2_map_data *data)
663 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
664 kvm_pte_t *childp, pte = *ptep;
668 if (stage2_pte_is_counted(pte))
669 mm_ops->put_page(ptep);
674 ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
678 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
684 childp = mm_ops->zalloc_page(data->memcache);
689 * If we've run into an existing block mapping then replace it with
690 * a table. Accesses beyond 'end' that fall within the new table
691 * will be mapped lazily.
693 if (stage2_pte_is_counted(pte))
694 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
696 kvm_set_table_pte(ptep, childp, mm_ops);
697 mm_ops->get_page(ptep);
702 static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
704 struct stage2_map_data *data)
706 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
713 if (data->anchor == ptep) {
714 childp = data->childp;
717 ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
719 childp = kvm_pte_follow(*ptep, mm_ops);
722 mm_ops->put_page(childp);
723 mm_ops->put_page(ptep);
729 * This is a little fiddly, as we use all three of the walk flags. The idea
730 * is that the TABLE_PRE callback runs for table entries on the way down,
731 * looking for table entries which we could conceivably replace with a
732 * block entry for this mapping. If it finds one, then it sets the 'anchor'
733 * field in 'struct stage2_map_data' to point at the table entry, before
734 * clearing the entry to zero and descending into the now detached table.
736 * The behaviour of the LEAF callback then depends on whether or not the
737 * anchor has been set. If not, then we're not using a block mapping higher
738 * up the table and we perform the mapping at the existing leaves instead.
739 * If, on the other hand, the anchor _is_ set, then we drop references to
740 * all valid leaves so that the pages beneath the anchor can be freed.
742 * Finally, the TABLE_POST callback does nothing if the anchor has not
743 * been set, but otherwise frees the page-table pages while walking back up
744 * the page-table, installing the block entry when it revisits the anchor
745 * pointer and clearing the anchor to NULL.
747 static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
748 enum kvm_pgtable_walk_flags flag, void * const arg)
750 struct stage2_map_data *data = arg;
753 case KVM_PGTABLE_WALK_TABLE_PRE:
754 return stage2_map_walk_table_pre(addr, end, level, ptep, data);
755 case KVM_PGTABLE_WALK_LEAF:
756 return stage2_map_walk_leaf(addr, end, level, ptep, data);
757 case KVM_PGTABLE_WALK_TABLE_POST:
758 return stage2_map_walk_table_post(addr, end, level, ptep, data);
764 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
765 u64 phys, enum kvm_pgtable_prot prot,
769 struct stage2_map_data map_data = {
770 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
773 .mm_ops = pgt->mm_ops,
775 struct kvm_pgtable_walker walker = {
776 .cb = stage2_map_walker,
777 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
778 KVM_PGTABLE_WALK_LEAF |
779 KVM_PGTABLE_WALK_TABLE_POST,
783 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
786 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
790 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
795 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
796 void *mc, u8 owner_id)
799 struct stage2_map_data map_data = {
800 .phys = KVM_PHYS_INVALID,
803 .mm_ops = pgt->mm_ops,
804 .owner_id = owner_id,
806 struct kvm_pgtable_walker walker = {
807 .cb = stage2_map_walker,
808 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
809 KVM_PGTABLE_WALK_LEAF |
810 KVM_PGTABLE_WALK_TABLE_POST,
814 if (owner_id > KVM_MAX_OWNER_ID)
817 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
821 static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
822 enum kvm_pgtable_walk_flags flag,
825 struct kvm_pgtable *pgt = arg;
826 struct kvm_s2_mmu *mmu = pgt->mmu;
827 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
828 kvm_pte_t pte = *ptep, *childp = NULL;
829 bool need_flush = false;
831 if (!kvm_pte_valid(pte)) {
832 if (stage2_pte_is_counted(pte)) {
834 mm_ops->put_page(ptep);
839 if (kvm_pte_table(pte, level)) {
840 childp = kvm_pte_follow(pte, mm_ops);
842 if (mm_ops->page_count(childp) != 1)
844 } else if (stage2_pte_cacheable(pgt, pte)) {
845 need_flush = !stage2_has_fwb(pgt);
849 * This is similar to the map() path in that we unmap the entire
850 * block entry and rely on the remaining portions being faulted
853 stage2_put_pte(ptep, mmu, addr, level, mm_ops);
856 kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
858 dcache_clean_inval_poc((unsigned long)pte_follow,
859 (unsigned long)pte_follow +
860 kvm_granule_size(level));
864 mm_ops->put_page(childp);
869 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
871 struct kvm_pgtable_walker walker = {
872 .cb = stage2_unmap_walker,
874 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
877 return kvm_pgtable_walk(pgt, addr, size, &walker);
880 struct stage2_attr_data {
885 struct kvm_pgtable_mm_ops *mm_ops;
888 static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
889 enum kvm_pgtable_walk_flags flag,
892 kvm_pte_t pte = *ptep;
893 struct stage2_attr_data *data = arg;
894 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
896 if (!kvm_pte_valid(pte))
901 pte &= ~data->attr_clr;
902 pte |= data->attr_set;
905 * We may race with the CPU trying to set the access flag here,
906 * but worst-case the access flag update gets lost and will be
907 * set on the next access instead.
909 if (data->pte != pte) {
911 * Invalidate instruction cache before updating the guest
912 * stage-2 PTE if we are going to add executable permission.
914 if (mm_ops->icache_inval_pou &&
915 stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
916 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
917 kvm_granule_size(level));
918 WRITE_ONCE(*ptep, pte);
924 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
925 u64 size, kvm_pte_t attr_set,
926 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
930 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
931 struct stage2_attr_data data = {
932 .attr_set = attr_set & attr_mask,
933 .attr_clr = attr_clr & attr_mask,
934 .mm_ops = pgt->mm_ops,
936 struct kvm_pgtable_walker walker = {
937 .cb = stage2_attr_walker,
939 .flags = KVM_PGTABLE_WALK_LEAF,
942 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
947 *orig_pte = data.pte;
954 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
956 return stage2_update_leaf_attrs(pgt, addr, size, 0,
957 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
961 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
964 stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
970 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
973 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
976 * "But where's the TLBI?!", you scream.
977 * "Over in the core code", I sigh.
979 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
984 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
987 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
988 return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
991 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
992 enum kvm_pgtable_prot prot)
996 kvm_pte_t set = 0, clr = 0;
998 if (prot & KVM_PGTABLE_PROT_R)
999 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1001 if (prot & KVM_PGTABLE_PROT_W)
1002 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1004 if (prot & KVM_PGTABLE_PROT_X)
1005 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
1007 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
1009 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
1013 static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
1014 enum kvm_pgtable_walk_flags flag,
1017 struct kvm_pgtable *pgt = arg;
1018 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1019 kvm_pte_t pte = *ptep;
1020 kvm_pte_t *pte_follow;
1022 if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
1025 pte_follow = kvm_pte_follow(pte, mm_ops);
1026 dcache_clean_inval_poc((unsigned long)pte_follow,
1027 (unsigned long)pte_follow +
1028 kvm_granule_size(level));
1032 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1034 struct kvm_pgtable_walker walker = {
1035 .cb = stage2_flush_walker,
1036 .flags = KVM_PGTABLE_WALK_LEAF,
1040 if (stage2_has_fwb(pgt))
1043 return kvm_pgtable_walk(pgt, addr, size, &walker);
1046 int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
1047 struct kvm_pgtable_mm_ops *mm_ops,
1048 enum kvm_pgtable_stage2_flags flags)
1051 u64 vtcr = arch->vtcr;
1052 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1053 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1054 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1056 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1057 pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
1061 pgt->ia_bits = ia_bits;
1062 pgt->start_level = start_level;
1063 pgt->mm_ops = mm_ops;
1064 pgt->mmu = &arch->mmu;
1067 /* Ensure zeroed PGD pages are visible to the hardware walker */
1072 static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
1073 enum kvm_pgtable_walk_flags flag,
1076 struct kvm_pgtable_mm_ops *mm_ops = arg;
1077 kvm_pte_t pte = *ptep;
1079 if (!stage2_pte_is_counted(pte))
1082 mm_ops->put_page(ptep);
1084 if (kvm_pte_table(pte, level))
1085 mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
1090 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1093 struct kvm_pgtable_walker walker = {
1094 .cb = stage2_free_walker,
1095 .flags = KVM_PGTABLE_WALK_LEAF |
1096 KVM_PGTABLE_WALK_TABLE_POST,
1100 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1101 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1102 pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
1106 #define KVM_PTE_LEAF_S2_COMPAT_MASK (KVM_PTE_LEAF_ATTR_S2_PERMS | \
1107 KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \
1108 KVM_PTE_LEAF_ATTR_S2_IGNORED)
1110 static int stage2_check_permission_walker(u64 addr, u64 end, u32 level,
1112 enum kvm_pgtable_walk_flags flag,
1115 kvm_pte_t old_attr, pte = *ptep, *new_attr = arg;
1118 * Compatible mappings are either invalid and owned by the page-table
1119 * owner (whose id is 0), or valid with matching permission attributes.
1121 if (kvm_pte_valid(pte)) {
1122 old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK;
1123 if (old_attr != *new_attr)
1132 int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
1133 enum kvm_pgtable_prot prot,
1134 struct kvm_mem_range *range)
1137 struct kvm_pgtable_walker check_perm_walker = {
1138 .cb = stage2_check_permission_walker,
1139 .flags = KVM_PGTABLE_WALK_LEAF,
1142 u64 granule, start, end;
1146 ret = stage2_set_prot_attr(pgt, prot, &attr);
1149 attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;
1151 for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) {
1152 granule = kvm_granule_size(level);
1153 start = ALIGN_DOWN(addr, granule);
1154 end = start + granule;
1156 if (!kvm_level_supports_block_mapping(level))
1159 if (start < range->start || range->end < end)
1163 * Check the presence of existing mappings with incompatible
1164 * permissions within the current block range, and try one level
1165 * deeper if one is found.
1167 ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker);
1173 range->start = start;