1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
13 #include <nvhe/early_alloc.h>
15 #include <nvhe/fixed_config.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/trap_handler.h>
23 unsigned long hyp_nr_cpus;
25 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
26 (unsigned long)__per_cpu_start)
28 static void *vmemmap_base;
29 static void *vm_table_base;
30 static void *hyp_pgt_base;
31 static void *host_s2_pgt_base;
32 static void *ffa_proxy_pages;
33 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
34 static struct hyp_pool hpool;
36 static int divide_memory_pool(void *virt, unsigned long size)
38 unsigned long nr_pages;
40 hyp_early_alloc_init(virt, size);
42 nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
43 vmemmap_base = hyp_early_alloc_contig(nr_pages);
47 nr_pages = hyp_vm_table_pages();
48 vm_table_base = hyp_early_alloc_contig(nr_pages);
52 nr_pages = hyp_s1_pgtable_pages();
53 hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
57 nr_pages = host_s2_pgtable_pages();
58 host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
59 if (!host_s2_pgt_base)
62 nr_pages = hyp_ffa_proxy_pages();
63 ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
70 static int pkvm_create_host_sve_mappings(void)
75 if (!system_supports_sve())
78 for (i = 0; i < hyp_nr_cpus; i++) {
79 struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
80 struct cpu_sve_state *sve_state = host_data->sve_state;
82 start = kern_hyp_va(sve_state);
83 end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
84 ret = pkvm_create_mappings(start, end, PAGE_HYP);
92 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
93 unsigned long *per_cpu_base,
96 void *start, *end, *virt = hyp_phys_to_virt(phys);
97 unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
98 enum kvm_pgtable_prot prot;
101 /* Recreate the hyp page-table using the early page allocator */
102 hyp_early_alloc_init(hyp_pgt_base, pgt_size);
103 ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
104 &hyp_early_alloc_mm_ops);
108 ret = hyp_create_idmap(hyp_va_bits);
112 ret = hyp_map_vectors();
116 ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
120 ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
124 ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
128 ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
132 ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
136 for (i = 0; i < hyp_nr_cpus; i++) {
137 struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
139 start = (void *)kern_hyp_va(per_cpu_base[i]);
140 end = start + PAGE_ALIGN(hyp_percpu_size);
141 ret = pkvm_create_mappings(start, end, PAGE_HYP);
145 ret = pkvm_create_stack(params->stack_pa, ¶ms->stack_hyp_va);
150 pkvm_create_host_sve_mappings();
153 * Map the host sections RO in the hypervisor, but transfer the
154 * ownership from the host to the hypervisor itself to make sure they
155 * can't be donated or shared with another entity.
157 * The ownership transition requires matching changes in the host
158 * stage-2. This will be done later (see finalize_host_mappings()) once
159 * the hyp_vmemmap is addressable.
161 prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
162 ret = pkvm_create_mappings(&kvm_vgic_global_state,
163 &kvm_vgic_global_state + 1, prot);
170 static void update_nvhe_init_params(void)
172 struct kvm_nvhe_init_params *params;
175 for (i = 0; i < hyp_nr_cpus; i++) {
176 params = per_cpu_ptr(&kvm_init_params, i);
177 params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
178 dcache_clean_inval_poc((unsigned long)params,
179 (unsigned long)params + sizeof(*params));
183 static void *hyp_zalloc_hyp_page(void *arg)
185 return hyp_alloc_pages(&hpool, 0);
188 static void hpool_get_page(void *addr)
190 hyp_get_page(&hpool, addr);
193 static void hpool_put_page(void *addr)
195 hyp_put_page(&hpool, addr);
198 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
199 enum kvm_pgtable_walk_flags visit)
201 enum kvm_pgtable_prot prot;
202 enum pkvm_page_state state;
205 if (!kvm_pte_valid(ctx->old))
208 if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
211 phys = kvm_pte_to_phys(ctx->old);
212 if (!addr_is_memory(phys))
216 * Adjust the host stage-2 mappings to match the ownership attributes
217 * configured in the hypervisor stage-1.
219 state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
221 case PKVM_PAGE_OWNED:
222 return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
223 case PKVM_PAGE_SHARED_OWNED:
224 prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
226 case PKVM_PAGE_SHARED_BORROWED:
227 prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
233 return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
236 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
237 enum kvm_pgtable_walk_flags visit)
240 * Fix-up the refcount for the page-table pages as the early allocator
241 * was unable to access the hyp_vmemmap and so the buddy allocator has
242 * initialised the refcount to '1'.
244 if (kvm_pte_valid(ctx->old))
245 ctx->mm_ops->get_page(ctx->ptep);
250 static int fix_host_ownership(void)
252 struct kvm_pgtable_walker walker = {
253 .cb = fix_host_ownership_walker,
254 .flags = KVM_PGTABLE_WALK_LEAF,
258 for (i = 0; i < hyp_memblock_nr; i++) {
259 struct memblock_region *reg = &hyp_memory[i];
260 u64 start = (u64)hyp_phys_to_virt(reg->base);
262 ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
270 static int fix_hyp_pgtable_refcnt(void)
272 struct kvm_pgtable_walker walker = {
273 .cb = fix_hyp_pgtable_refcnt_walker,
274 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
275 .arg = pkvm_pgtable.mm_ops,
278 return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
282 void __noreturn __pkvm_init_finalise(void)
284 struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
285 unsigned long nr_pages, reserved_pages, pfn;
288 /* Now that the vmemmap is backed, install the full-fledged allocator */
289 pfn = hyp_virt_to_pfn(hyp_pgt_base);
290 nr_pages = hyp_s1_pgtable_pages();
291 reserved_pages = hyp_early_alloc_nr_used_pages();
292 ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
296 ret = kvm_host_prepare_stage2(host_s2_pgt_base);
300 pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
301 .zalloc_page = hyp_zalloc_hyp_page,
302 .phys_to_virt = hyp_phys_to_virt,
303 .virt_to_phys = hyp_virt_to_phys,
304 .get_page = hpool_get_page,
305 .put_page = hpool_put_page,
306 .page_count = hyp_page_count,
308 pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
310 ret = fix_host_ownership();
314 ret = fix_hyp_pgtable_refcnt();
318 ret = hyp_create_pcpu_fixmap();
322 ret = hyp_ffa_init(ffa_proxy_pages);
326 pkvm_hyp_vm_table_init(vm_table_base);
329 * We tail-called to here from handle___pkvm_init() and will not return,
330 * so make sure to propagate the return value to the host.
332 cpu_reg(host_ctxt, 1) = ret;
334 __host_enter(host_ctxt);
337 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
338 unsigned long *per_cpu_base, u32 hyp_va_bits)
340 struct kvm_nvhe_init_params *params;
341 void *virt = hyp_phys_to_virt(phys);
342 typeof(__pkvm_init_switch_pgd) *fn;
345 BUG_ON(kvm_check_pvm_sysreg_table());
347 if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
350 hyp_spin_lock_init(&pkvm_pgd_lock);
351 hyp_nr_cpus = nr_cpus;
353 ret = divide_memory_pool(virt, size);
357 ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
361 update_nvhe_init_params();
363 /* Jump in the idmap page to switch to the new page-tables */
364 params = this_cpu_ptr(&kvm_init_params);
365 fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
366 fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);