1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018, Red Hat, Inc.
8 #include <linux/compiler.h>
11 #include "../kvm_util_internal.h"
12 #include "processor.h"
14 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
16 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
18 return (v + vm->page_size) & ~(vm->page_size - 1);
21 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
23 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
24 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
26 return (gva >> shift) & mask;
29 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
31 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
32 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
34 TEST_ASSERT(vm->pgtable_levels == 4,
35 "Mode %d does not have 4 page table levels", vm->mode);
37 return (gva >> shift) & mask;
40 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
42 unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
43 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
45 TEST_ASSERT(vm->pgtable_levels >= 3,
46 "Mode %d does not have >= 3 page table levels", vm->mode);
48 return (gva >> shift) & mask;
51 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
53 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
54 return (gva >> vm->page_shift) & mask;
57 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
59 uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
63 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
65 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
66 return 1 << (vm->va_bits - shift);
69 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
71 return 1 << (vm->page_shift - 3);
74 void virt_pgd_alloc(struct kvm_vm *vm)
76 if (!vm->pgd_created) {
77 vm_paddr_t paddr = vm_phy_pages_alloc(vm,
78 page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
79 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
81 vm->pgd_created = true;
85 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
88 uint8_t attr_idx = flags & 7;
91 TEST_ASSERT((vaddr % vm->page_size) == 0,
92 "Virtual address not on page boundary,\n"
93 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
94 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
95 (vaddr >> vm->page_shift)),
96 "Invalid virtual address, vaddr: 0x%lx", vaddr);
97 TEST_ASSERT((paddr % vm->page_size) == 0,
98 "Physical address not on page boundary,\n"
99 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
100 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
101 "Physical address beyond beyond maximum supported,\n"
102 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
103 paddr, vm->max_gfn, vm->page_size);
105 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
107 *ptep = vm_alloc_page_table(vm) | 3;
109 switch (vm->pgtable_levels) {
111 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
113 *ptep = vm_alloc_page_table(vm) | 3;
116 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
118 *ptep = vm_alloc_page_table(vm) | 3;
121 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
124 TEST_FAIL("Page table levels must be 2, 3, or 4");
128 *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
131 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
133 uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
135 _virt_pg_map(vm, vaddr, paddr, attr_idx);
138 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
142 if (!vm->pgd_created)
145 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
149 switch (vm->pgtable_levels) {
151 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
156 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
161 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
166 TEST_FAIL("Page table levels must be 2, 3, or 4");
169 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
172 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
176 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
179 static const char * const type[] = { "", "pud", "pmd", "pte" };
185 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
186 ptep = addr_gpa2hva(vm, pte);
189 fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
190 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
195 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
197 int level = 4 - (vm->pgtable_levels - 1);
200 if (!vm->pgd_created)
203 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
204 ptep = addr_gpa2hva(vm, pgd);
207 fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
208 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
212 void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
214 struct kvm_vcpu_init default_init = { .target = -1, };
215 uint64_t sctlr_el1, tcr_el1;
218 init = &default_init;
220 if (init->target == -1) {
221 struct kvm_vcpu_init preferred;
222 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
223 init->target = preferred.target;
226 vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
229 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
230 * registers, which the variable argument list macros do.
232 set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
234 get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
235 get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
238 case VM_MODE_P52V48_4K:
239 TEST_FAIL("AArch64 does not support 4K sized pages "
240 "with 52-bit physical address ranges");
241 case VM_MODE_PXXV48_4K:
242 TEST_FAIL("AArch64 does not support 4K sized pages "
243 "with ANY-bit physical address ranges");
244 case VM_MODE_P52V48_64K:
245 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
246 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
248 case VM_MODE_P48V48_4K:
249 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
250 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
252 case VM_MODE_P48V48_64K:
253 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
254 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
256 case VM_MODE_P40V48_4K:
257 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
258 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
260 case VM_MODE_P40V48_64K:
261 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
262 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
265 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
268 sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
269 /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
270 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
271 tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
273 set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
274 set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
275 set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
276 set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
279 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
283 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
284 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
286 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
287 indent, "", pstate, pc);
290 void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
291 struct kvm_vcpu_init *init, void *guest_code)
293 size_t stack_size = vm->page_size == 4096 ?
294 DEFAULT_STACK_PGS * vm->page_size :
296 uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
297 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
299 vm_vcpu_add(vm, vcpuid);
300 aarch64_vcpu_setup(vm, vcpuid, init);
302 set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
303 set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
306 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
308 aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
311 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
316 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
321 for (i = 0; i < num; i++) {
322 set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
323 va_arg(ap, uint64_t));
329 void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)