e342f7f4f4fb71aca6c4ebbf14d9929436af07a5
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / nvhe / mem_protect.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14 #include <hyp/switch.h>
15
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20
21 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
22
23 extern unsigned long hyp_nr_cpus;
24 struct host_kvm host_kvm;
25
26 struct hyp_pool host_s2_mem;
27 struct hyp_pool host_s2_dev;
28
29 /*
30  * Copies of the host's CPU features registers holding sanitized values.
31  */
32 u64 id_aa64mmfr0_el1_sys_val;
33 u64 id_aa64mmfr1_el1_sys_val;
34
35 static const u8 pkvm_hyp_id = 1;
36
37 static void *host_s2_zalloc_pages_exact(size_t size)
38 {
39         return hyp_alloc_pages(&host_s2_mem, get_order(size));
40 }
41
42 static void *host_s2_zalloc_page(void *pool)
43 {
44         return hyp_alloc_pages(pool, 0);
45 }
46
47 static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
48 {
49         unsigned long nr_pages, pfn;
50         int ret;
51
52         pfn = hyp_virt_to_pfn(mem_pgt_pool);
53         nr_pages = host_s2_mem_pgtable_pages();
54         ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
55         if (ret)
56                 return ret;
57
58         pfn = hyp_virt_to_pfn(dev_pgt_pool);
59         nr_pages = host_s2_dev_pgtable_pages();
60         ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
61         if (ret)
62                 return ret;
63
64         host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
65                 .zalloc_pages_exact = host_s2_zalloc_pages_exact,
66                 .zalloc_page = host_s2_zalloc_page,
67                 .phys_to_virt = hyp_phys_to_virt,
68                 .virt_to_phys = hyp_virt_to_phys,
69                 .page_count = hyp_page_count,
70                 .get_page = hyp_get_page,
71                 .put_page = hyp_put_page,
72         };
73
74         return 0;
75 }
76
77 static void prepare_host_vtcr(void)
78 {
79         u32 parange, phys_shift;
80
81         /* The host stage 2 is id-mapped, so use parange for T0SZ */
82         parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
83         phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
84
85         host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
86                                           id_aa64mmfr1_el1_sys_val, phys_shift);
87 }
88
89 int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
90 {
91         struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
92         int ret;
93
94         prepare_host_vtcr();
95         hyp_spin_lock_init(&host_kvm.lock);
96
97         ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
98         if (ret)
99                 return ret;
100
101         ret = kvm_pgtable_stage2_init_flags(&host_kvm.pgt, &host_kvm.arch,
102                                             &host_kvm.mm_ops, KVM_HOST_S2_FLAGS);
103         if (ret)
104                 return ret;
105
106         mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
107         mmu->arch = &host_kvm.arch;
108         mmu->pgt = &host_kvm.pgt;
109         mmu->vmid.vmid_gen = 0;
110         mmu->vmid.vmid = 0;
111
112         return 0;
113 }
114
115 int __pkvm_prot_finalize(void)
116 {
117         struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
118         struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
119
120         params->vttbr = kvm_get_vttbr(mmu);
121         params->vtcr = host_kvm.arch.vtcr;
122         params->hcr_el2 |= HCR_VM;
123         kvm_flush_dcache_to_poc(params, sizeof(*params));
124
125         write_sysreg(params->hcr_el2, hcr_el2);
126         __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
127
128         /*
129          * Make sure to have an ISB before the TLB maintenance below but only
130          * when __load_stage2() doesn't include one already.
131          */
132         asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
133
134         /* Invalidate stale HCR bits that may be cached in TLBs */
135         __tlbi(vmalls12e1);
136         dsb(nsh);
137         isb();
138
139         return 0;
140 }
141
142 static int host_stage2_unmap_dev_all(void)
143 {
144         struct kvm_pgtable *pgt = &host_kvm.pgt;
145         struct memblock_region *reg;
146         u64 addr = 0;
147         int i, ret;
148
149         /* Unmap all non-memory regions to recycle the pages */
150         for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
151                 reg = &hyp_memory[i];
152                 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
153                 if (ret)
154                         return ret;
155         }
156         return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
157 }
158
159 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
160 {
161         int cur, left = 0, right = hyp_memblock_nr;
162         struct memblock_region *reg;
163         phys_addr_t end;
164
165         range->start = 0;
166         range->end = ULONG_MAX;
167
168         /* The list of memblock regions is sorted, binary search it */
169         while (left < right) {
170                 cur = (left + right) >> 1;
171                 reg = &hyp_memory[cur];
172                 end = reg->base + reg->size;
173                 if (addr < reg->base) {
174                         right = cur;
175                         range->end = reg->base;
176                 } else if (addr >= end) {
177                         left = cur + 1;
178                         range->start = end;
179                 } else {
180                         range->start = reg->base;
181                         range->end = end;
182                         return true;
183                 }
184         }
185
186         return false;
187 }
188
189 static bool range_is_memory(u64 start, u64 end)
190 {
191         struct kvm_mem_range r1, r2;
192
193         if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
194                 return false;
195         if (r1.start != r2.start)
196                 return false;
197
198         return true;
199 }
200
201 static inline int __host_stage2_idmap(u64 start, u64 end,
202                                       enum kvm_pgtable_prot prot,
203                                       struct hyp_pool *pool)
204 {
205         return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
206                                       prot, pool);
207 }
208
209 static int host_stage2_idmap(u64 addr)
210 {
211         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
212         struct kvm_mem_range range;
213         bool is_memory = find_mem_range(addr, &range);
214         struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
215         int ret;
216
217         if (is_memory)
218                 prot |= KVM_PGTABLE_PROT_X;
219
220         hyp_spin_lock(&host_kvm.lock);
221         ret = kvm_pgtable_stage2_find_range(&host_kvm.pgt, addr, prot, &range);
222         if (ret)
223                 goto unlock;
224
225         ret = __host_stage2_idmap(range.start, range.end, prot, pool);
226         if (is_memory || ret != -ENOMEM)
227                 goto unlock;
228
229         /*
230          * host_s2_mem has been provided with enough pages to cover all of
231          * memory with page granularity, so we should never hit the ENOMEM case.
232          * However, it is difficult to know how much of the MMIO range we will
233          * need to cover upfront, so we may need to 'recycle' the pages if we
234          * run out.
235          */
236         ret = host_stage2_unmap_dev_all();
237         if (ret)
238                 goto unlock;
239
240         ret = __host_stage2_idmap(range.start, range.end, prot, pool);
241
242 unlock:
243         hyp_spin_unlock(&host_kvm.lock);
244
245         return ret;
246 }
247
248 int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
249 {
250         int ret;
251
252         /*
253          * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
254          * non-persistent, so don't allow changing page ownership in MMIO range.
255          */
256         if (!range_is_memory(start, end))
257                 return -EINVAL;
258
259         hyp_spin_lock(&host_kvm.lock);
260         ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
261                                            &host_s2_mem, pkvm_hyp_id);
262         hyp_spin_unlock(&host_kvm.lock);
263
264         return ret != -EAGAIN ? ret : 0;
265 }
266
267 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
268 {
269         struct kvm_vcpu_fault_info fault;
270         u64 esr, addr;
271         int ret = 0;
272
273         esr = read_sysreg_el2(SYS_ESR);
274         BUG_ON(!__get_fault_info(esr, &fault));
275
276         addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
277         ret = host_stage2_idmap(addr);
278         BUG_ON(ret && ret != -EAGAIN);
279 }