1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM selftest s390x library code - CPU-related functions (page tables...)
5 * Copyright (C) 2019, Red Hat, Inc.
10 #include "../kvm_util_internal.h"
12 #define PAGES_PER_REGION 4
14 void virt_pgd_alloc(struct kvm_vm *vm)
18 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
24 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
25 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
29 vm->pgd_created = true;
33 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
34 * a page table (ri == 4). Returns a suitable region/segment table entry
35 * which points to the freshly allocated pages.
37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
41 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
42 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
43 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
45 return (taddr & REGION_ENTRY_ORIGIN)
46 | (((4 - ri) << 2) & REGION_ENTRY_TYPE)
47 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
50 void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
55 TEST_ASSERT((gva % vm->page_size) == 0,
56 "Virtual address not on page boundary,\n"
57 " vaddr: 0x%lx vm->page_size: 0x%x",
59 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
60 (gva >> vm->page_shift)),
61 "Invalid virtual address, vaddr: 0x%lx",
63 TEST_ASSERT((gpa % vm->page_size) == 0,
64 "Physical address not on page boundary,\n"
65 " paddr: 0x%lx vm->page_size: 0x%x",
67 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
68 "Physical address beyond beyond maximum supported,\n"
69 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
70 gva, vm->max_gfn, vm->page_size);
72 /* Walk through region and segment tables */
73 entry = addr_gpa2hva(vm, vm->pgd);
74 for (ri = 1; ri <= 4; ri++) {
75 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
76 if (entry[idx] & REGION_ENTRY_INVALID)
77 entry[idx] = virt_alloc_region(vm, ri);
78 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
81 /* Fill in page table entry */
82 idx = (gva >> 12) & 0x0ffu; /* page index */
83 if (!(entry[idx] & PAGE_INVALID))
85 "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
89 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
94 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
97 entry = addr_gpa2hva(vm, vm->pgd);
98 for (ri = 1; ri <= 4; ri++) {
99 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
100 TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
101 "No region mapping for vm virtual address 0x%lx",
103 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
106 idx = (gva >> 12) & 0x0ffu; /* page index */
108 TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
109 "No page mapping for vm virtual address 0x%lx", gva);
111 return (entry[idx] & ~0xffful) + (gva & 0xffful);
114 static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
119 for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
120 pte = addr_gpa2hva(vm, ptea);
121 if (*pte & PAGE_INVALID)
123 fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
124 indent, "", ptea, *pte);
128 static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
129 uint64_t reg_tab_addr)
131 uint64_t addr, *entry;
133 for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
134 entry = addr_gpa2hva(vm, addr);
135 if (*entry & REGION_ENTRY_INVALID)
137 fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
138 indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
140 if (*entry & REGION_ENTRY_TYPE) {
141 virt_dump_region(stream, vm, indent + 2,
142 *entry & REGION_ENTRY_ORIGIN);
144 virt_dump_ptes(stream, vm, indent + 2,
145 *entry & REGION_ENTRY_ORIGIN);
150 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
152 if (!vm->pgd_created)
155 virt_dump_region(stream, vm, indent, vm->pgd);
158 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
160 size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
161 uint64_t stack_vaddr;
162 struct kvm_regs regs;
163 struct kvm_sregs sregs;
166 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
169 stack_vaddr = vm_vaddr_alloc(vm, stack_size,
170 DEFAULT_GUEST_STACK_VADDR_MIN);
172 vm_vcpu_add(vm, vcpuid);
174 /* Setup guest registers */
175 vcpu_regs_get(vm, vcpuid, ®s);
176 regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
177 vcpu_regs_set(vm, vcpuid, ®s);
179 vcpu_sregs_get(vm, vcpuid, &sregs);
180 sregs.crs[0] |= 0x00040000; /* Enable floating point regs */
181 sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
182 vcpu_sregs_set(vm, vcpuid, &sregs);
184 run = vcpu_state(vm, vcpuid);
185 run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
186 run->psw_addr = (uintptr_t)guest_code;
189 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
192 struct kvm_regs regs;
195 TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
200 vcpu_regs_get(vm, vcpuid, ®s);
202 for (i = 0; i < num; i++)
203 regs.gprs[i + 2] = va_arg(ap, uint64_t);
205 vcpu_regs_set(vm, vcpuid, ®s);
209 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
211 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
216 fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
217 indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
220 void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)