Merge branches 'pm-cpufreq', 'pm-sleep' and 'pm-em'
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / nvhe / setup.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11
12 #include <nvhe/early_alloc.h>
13 #include <nvhe/gfp.h>
14 #include <nvhe/memory.h>
15 #include <nvhe/mem_protect.h>
16 #include <nvhe/mm.h>
17 #include <nvhe/trap_handler.h>
18
19 struct hyp_pool hpool;
20 unsigned long hyp_nr_cpus;
21
22 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
23                          (unsigned long)__per_cpu_start)
24
25 static void *vmemmap_base;
26 static void *hyp_pgt_base;
27 static void *host_s2_pgt_base;
28 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
29
30 static int divide_memory_pool(void *virt, unsigned long size)
31 {
32         unsigned long vstart, vend, nr_pages;
33
34         hyp_early_alloc_init(virt, size);
35
36         hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
37         nr_pages = (vend - vstart) >> PAGE_SHIFT;
38         vmemmap_base = hyp_early_alloc_contig(nr_pages);
39         if (!vmemmap_base)
40                 return -ENOMEM;
41
42         nr_pages = hyp_s1_pgtable_pages();
43         hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
44         if (!hyp_pgt_base)
45                 return -ENOMEM;
46
47         nr_pages = host_s2_pgtable_pages();
48         host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
49         if (!host_s2_pgt_base)
50                 return -ENOMEM;
51
52         return 0;
53 }
54
55 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
56                                  unsigned long *per_cpu_base,
57                                  u32 hyp_va_bits)
58 {
59         void *start, *end, *virt = hyp_phys_to_virt(phys);
60         unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
61         enum kvm_pgtable_prot prot;
62         int ret, i;
63
64         /* Recreate the hyp page-table using the early page allocator */
65         hyp_early_alloc_init(hyp_pgt_base, pgt_size);
66         ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
67                                    &hyp_early_alloc_mm_ops);
68         if (ret)
69                 return ret;
70
71         ret = hyp_create_idmap(hyp_va_bits);
72         if (ret)
73                 return ret;
74
75         ret = hyp_map_vectors();
76         if (ret)
77                 return ret;
78
79         ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
80         if (ret)
81                 return ret;
82
83         ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
84         if (ret)
85                 return ret;
86
87         ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
88         if (ret)
89                 return ret;
90
91         ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
92         if (ret)
93                 return ret;
94
95         ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
96         if (ret)
97                 return ret;
98
99         for (i = 0; i < hyp_nr_cpus; i++) {
100                 start = (void *)kern_hyp_va(per_cpu_base[i]);
101                 end = start + PAGE_ALIGN(hyp_percpu_size);
102                 ret = pkvm_create_mappings(start, end, PAGE_HYP);
103                 if (ret)
104                         return ret;
105
106                 end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
107                 start = end - PAGE_SIZE;
108                 ret = pkvm_create_mappings(start, end, PAGE_HYP);
109                 if (ret)
110                         return ret;
111         }
112
113         /*
114          * Map the host's .bss and .rodata sections RO in the hypervisor, but
115          * transfer the ownership from the host to the hypervisor itself to
116          * make sure it can't be donated or shared with another entity.
117          *
118          * The ownership transition requires matching changes in the host
119          * stage-2. This will be done later (see finalize_host_mappings()) once
120          * the hyp_vmemmap is addressable.
121          */
122         prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
123         ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot);
124         if (ret)
125                 return ret;
126
127         ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot);
128         if (ret)
129                 return ret;
130
131         return 0;
132 }
133
134 static void update_nvhe_init_params(void)
135 {
136         struct kvm_nvhe_init_params *params;
137         unsigned long i;
138
139         for (i = 0; i < hyp_nr_cpus; i++) {
140                 params = per_cpu_ptr(&kvm_init_params, i);
141                 params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
142                 dcache_clean_inval_poc((unsigned long)params,
143                                     (unsigned long)params + sizeof(*params));
144         }
145 }
146
147 static void *hyp_zalloc_hyp_page(void *arg)
148 {
149         return hyp_alloc_pages(&hpool, 0);
150 }
151
152 static void hpool_get_page(void *addr)
153 {
154         hyp_get_page(&hpool, addr);
155 }
156
157 static void hpool_put_page(void *addr)
158 {
159         hyp_put_page(&hpool, addr);
160 }
161
162 static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
163                                          kvm_pte_t *ptep,
164                                          enum kvm_pgtable_walk_flags flag,
165                                          void * const arg)
166 {
167         enum kvm_pgtable_prot prot;
168         enum pkvm_page_state state;
169         kvm_pte_t pte = *ptep;
170         phys_addr_t phys;
171
172         if (!kvm_pte_valid(pte))
173                 return 0;
174
175         if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
176                 return -EINVAL;
177
178         phys = kvm_pte_to_phys(pte);
179         if (!addr_is_memory(phys))
180                 return 0;
181
182         /*
183          * Adjust the host stage-2 mappings to match the ownership attributes
184          * configured in the hypervisor stage-1.
185          */
186         state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
187         switch (state) {
188         case PKVM_PAGE_OWNED:
189                 return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
190         case PKVM_PAGE_SHARED_OWNED:
191                 prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
192                 break;
193         case PKVM_PAGE_SHARED_BORROWED:
194                 prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
195                 break;
196         default:
197                 return -EINVAL;
198         }
199
200         return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
201 }
202
203 static int finalize_host_mappings(void)
204 {
205         struct kvm_pgtable_walker walker = {
206                 .cb     = finalize_host_mappings_walker,
207                 .flags  = KVM_PGTABLE_WALK_LEAF,
208         };
209
210         return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits), &walker);
211 }
212
213 void __noreturn __pkvm_init_finalise(void)
214 {
215         struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
216         struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
217         unsigned long nr_pages, reserved_pages, pfn;
218         int ret;
219
220         /* Now that the vmemmap is backed, install the full-fledged allocator */
221         pfn = hyp_virt_to_pfn(hyp_pgt_base);
222         nr_pages = hyp_s1_pgtable_pages();
223         reserved_pages = hyp_early_alloc_nr_used_pages();
224         ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
225         if (ret)
226                 goto out;
227
228         ret = kvm_host_prepare_stage2(host_s2_pgt_base);
229         if (ret)
230                 goto out;
231
232         ret = finalize_host_mappings();
233         if (ret)
234                 goto out;
235
236         pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
237                 .zalloc_page = hyp_zalloc_hyp_page,
238                 .phys_to_virt = hyp_phys_to_virt,
239                 .virt_to_phys = hyp_virt_to_phys,
240                 .get_page = hpool_get_page,
241                 .put_page = hpool_put_page,
242         };
243         pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
244
245 out:
246         /*
247          * We tail-called to here from handle___pkvm_init() and will not return,
248          * so make sure to propagate the return value to the host.
249          */
250         cpu_reg(host_ctxt, 1) = ret;
251
252         __host_enter(host_ctxt);
253 }
254
255 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
256                 unsigned long *per_cpu_base, u32 hyp_va_bits)
257 {
258         struct kvm_nvhe_init_params *params;
259         void *virt = hyp_phys_to_virt(phys);
260         void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
261         int ret;
262
263         if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
264                 return -EINVAL;
265
266         hyp_spin_lock_init(&pkvm_pgd_lock);
267         hyp_nr_cpus = nr_cpus;
268
269         ret = divide_memory_pool(virt, size);
270         if (ret)
271                 return ret;
272
273         ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
274         if (ret)
275                 return ret;
276
277         update_nvhe_init_params();
278
279         /* Jump in the idmap page to switch to the new page-tables */
280         params = this_cpu_ptr(&kvm_init_params);
281         fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
282         fn(__hyp_pa(params), __pkvm_init_finalise);
283
284         unreachable();
285 }