1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 #include <asm/spectre.h>
14 #include <nvhe/early_alloc.h>
16 #include <nvhe/memory.h>
18 #include <nvhe/spinlock.h>
20 struct kvm_pgtable pkvm_pgtable;
21 hyp_spinlock_t pkvm_pgd_lock;
23 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
24 unsigned int hyp_memblock_nr;
26 static u64 __io_map_base;
28 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
29 unsigned long phys, enum kvm_pgtable_prot prot)
33 hyp_spin_lock(&pkvm_pgd_lock);
34 err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
35 hyp_spin_unlock(&pkvm_pgd_lock);
41 * pkvm_alloc_private_va_range - Allocates a private VA range.
42 * @size: The size of the VA range to reserve.
43 * @haddr: The hypervisor virtual start address of the allocation.
45 * The private virtual address (VA) range is allocated above __io_map_base
46 * and aligned based on the order of @size.
48 * Return: 0 on success or negative error code on failure.
50 int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
52 unsigned long base, addr;
55 hyp_spin_lock(&pkvm_pgd_lock);
57 /* Align the allocation based on the order of its size */
58 addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
60 /* The allocated size is always a multiple of PAGE_SIZE */
61 base = addr + PAGE_ALIGN(size);
63 /* Are we overflowing on the vmemmap ? */
64 if (!addr || base > __hyp_vmemmap)
71 hyp_spin_unlock(&pkvm_pgd_lock);
76 int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
77 enum kvm_pgtable_prot prot,
83 size = PAGE_ALIGN(size + offset_in_page(phys));
84 err = pkvm_alloc_private_va_range(size, &addr);
88 err = __pkvm_create_mappings(addr, size, phys, prot);
92 *haddr = addr + offset_in_page(phys);
96 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
98 unsigned long start = (unsigned long)from;
99 unsigned long end = (unsigned long)to;
100 unsigned long virt_addr;
103 hyp_assert_lock_held(&pkvm_pgd_lock);
105 start = start & PAGE_MASK;
106 end = PAGE_ALIGN(end);
108 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
111 phys = hyp_virt_to_phys((void *)virt_addr);
112 err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
121 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
125 hyp_spin_lock(&pkvm_pgd_lock);
126 ret = pkvm_create_mappings_locked(from, to, prot);
127 hyp_spin_unlock(&pkvm_pgd_lock);
132 int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
134 unsigned long start, end;
136 hyp_vmemmap_range(phys, size, &start, &end);
138 return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
141 static void *__hyp_bp_vect_base;
142 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
147 case HYP_VECTOR_DIRECT: {
148 vector = __kvm_hyp_vector;
151 case HYP_VECTOR_SPECTRE_DIRECT: {
152 vector = __bp_harden_hyp_vecs;
155 case HYP_VECTOR_INDIRECT:
156 case HYP_VECTOR_SPECTRE_INDIRECT: {
157 vector = (void *)__hyp_bp_vect_base;
164 vector = __kvm_vector_slot2addr(vector, slot);
165 *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
170 int hyp_map_vectors(void)
173 unsigned long bp_base;
176 if (!kvm_system_needs_idmapped_vectors()) {
177 __hyp_bp_vect_base = __bp_harden_hyp_vecs;
181 phys = __hyp_pa(__bp_harden_hyp_vecs);
182 ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
183 PAGE_HYP_EXEC, &bp_base);
187 __hyp_bp_vect_base = (void *)bp_base;
192 int hyp_create_idmap(u32 hyp_va_bits)
194 unsigned long start, end;
196 start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
197 start = ALIGN_DOWN(start, PAGE_SIZE);
199 end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
200 end = ALIGN(end, PAGE_SIZE);
203 * One half of the VA space is reserved to linearly map portions of
204 * memory -- see va_layout.c for more details. The other half of the VA
205 * space contains the trampoline page, and needs some care. Split that
206 * second half in two and find the quarter of VA space not conflicting
207 * with the idmap to place the IOs and the vmemmap. IOs use the lower
208 * half of the quarter and the vmemmap the upper half.
210 __io_map_base = start & BIT(hyp_va_bits - 2);
211 __io_map_base ^= BIT(hyp_va_bits - 2);
212 __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
214 return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);