1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/spectre.h>
13 #include <nvhe/early_alloc.h>
15 #include <nvhe/memory.h>
17 #include <nvhe/spinlock.h>
19 struct kvm_pgtable pkvm_pgtable;
20 hyp_spinlock_t pkvm_pgd_lock;
23 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
24 unsigned int hyp_memblock_nr;
26 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
27 unsigned long phys, enum kvm_pgtable_prot prot)
31 hyp_spin_lock(&pkvm_pgd_lock);
32 err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
33 hyp_spin_unlock(&pkvm_pgd_lock);
38 unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
39 enum kvm_pgtable_prot prot)
44 hyp_spin_lock(&pkvm_pgd_lock);
46 size = PAGE_ALIGN(size + offset_in_page(phys));
48 __io_map_base += size;
50 /* Are we overflowing on the vmemmap ? */
51 if (__io_map_base > __hyp_vmemmap) {
52 __io_map_base -= size;
53 addr = (unsigned long)ERR_PTR(-ENOMEM);
57 err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
59 addr = (unsigned long)ERR_PTR(err);
63 addr = addr + offset_in_page(phys);
65 hyp_spin_unlock(&pkvm_pgd_lock);
70 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
72 unsigned long start = (unsigned long)from;
73 unsigned long end = (unsigned long)to;
74 unsigned long virt_addr;
77 hyp_assert_lock_held(&pkvm_pgd_lock);
79 start = start & PAGE_MASK;
80 end = PAGE_ALIGN(end);
82 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
85 phys = hyp_virt_to_phys((void *)virt_addr);
86 err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
95 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
99 hyp_spin_lock(&pkvm_pgd_lock);
100 ret = pkvm_create_mappings_locked(from, to, prot);
101 hyp_spin_unlock(&pkvm_pgd_lock);
106 int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
108 unsigned long start, end;
110 hyp_vmemmap_range(phys, size, &start, &end);
112 return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
115 static void *__hyp_bp_vect_base;
116 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
121 case HYP_VECTOR_DIRECT: {
122 vector = __kvm_hyp_vector;
125 case HYP_VECTOR_SPECTRE_DIRECT: {
126 vector = __bp_harden_hyp_vecs;
129 case HYP_VECTOR_INDIRECT:
130 case HYP_VECTOR_SPECTRE_INDIRECT: {
131 vector = (void *)__hyp_bp_vect_base;
138 vector = __kvm_vector_slot2addr(vector, slot);
139 *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
144 int hyp_map_vectors(void)
149 if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
152 phys = __hyp_pa(__bp_harden_hyp_vecs);
153 bp_base = (void *)__pkvm_create_private_mapping(phys,
154 __BP_HARDEN_HYP_VECS_SZ,
156 if (IS_ERR_OR_NULL(bp_base))
157 return PTR_ERR(bp_base);
159 __hyp_bp_vect_base = bp_base;
164 int hyp_create_idmap(u32 hyp_va_bits)
166 unsigned long start, end;
168 start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
169 start = ALIGN_DOWN(start, PAGE_SIZE);
171 end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
172 end = ALIGN(end, PAGE_SIZE);
175 * One half of the VA space is reserved to linearly map portions of
176 * memory -- see va_layout.c for more details. The other half of the VA
177 * space contains the trampoline page, and needs some care. Split that
178 * second half in two and find the quarter of VA space not conflicting
179 * with the idmap to place the IOs and the vmemmap. IOs use the lower
180 * half of the quarter and the vmemmap the upper half.
182 __io_map_base = start & BIT(hyp_va_bits - 2);
183 __io_map_base ^= BIT(hyp_va_bits - 2);
184 __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
186 return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);