1 // SPDX-License-Identifier: GPL-2.0-only
3 * tools/testing/selftests/kvm/lib/kvm_util.c
5 * Copyright (C) 2018, Google LLC.
8 #define _GNU_SOURCE /* for program_invocation_name */
11 #include "processor.h"
15 #include <sys/types.h>
18 #include <linux/kernel.h>
20 #define KVM_UTIL_MIN_PFN 2
22 static int vcpu_mmap_sz(void);
24 int open_path_or_exit(const char *path, int flags)
28 fd = open(path, flags);
30 print_skip("%s not available (errno: %d)", path, errno);
38 * Open KVM_DEV_PATH if available, otherwise exit the entire program.
41 * flags - The flags to pass when opening KVM_DEV_PATH.
44 * The opened file descriptor of /dev/kvm.
46 static int _open_kvm_dev_path_or_exit(int flags)
48 return open_path_or_exit(KVM_DEV_PATH, flags);
51 int open_kvm_dev_path_or_exit(void)
53 return _open_kvm_dev_path_or_exit(O_RDONLY);
65 * On success, the Value corresponding to the capability (KVM_CAP_*)
66 * specified by the value of cap. On failure a TEST_ASSERT failure
69 * Looks up and returns the value corresponding to the capability
70 * (KVM_CAP_*) given by cap.
72 int kvm_check_cap(long cap)
77 kvm_fd = open_kvm_dev_path_or_exit();
78 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
79 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
86 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
88 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
89 vm->dirty_ring_size = ring_size;
92 static void vm_open(struct kvm_vm *vm)
94 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
96 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
97 print_skip("immediate_exit not available");
101 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
102 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
105 const char *vm_guest_mode_string(uint32_t i)
107 static const char * const strings[] = {
108 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
109 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
110 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
111 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
112 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
113 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
114 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
115 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
116 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
117 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
118 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
119 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
120 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
121 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
122 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
124 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
125 "Missing new mode strings?");
127 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
132 const struct vm_guest_mode_params vm_guest_mode_params[] = {
133 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
134 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
135 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
136 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
137 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
138 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
139 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
140 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
141 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
142 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
143 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
144 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
145 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
146 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
147 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
149 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
150 "Missing new mode params?");
152 struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
156 pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
157 vm_guest_mode_string(mode), nr_pages);
159 vm = calloc(1, sizeof(*vm));
160 TEST_ASSERT(vm != NULL, "Insufficient Memory");
162 INIT_LIST_HEAD(&vm->vcpus);
163 vm->regions.gpa_tree = RB_ROOT;
164 vm->regions.hva_tree = RB_ROOT;
165 hash_init(vm->regions.slot_hash);
170 vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
171 vm->va_bits = vm_guest_mode_params[mode].va_bits;
172 vm->page_size = vm_guest_mode_params[mode].page_size;
173 vm->page_shift = vm_guest_mode_params[mode].page_shift;
175 /* Setup mode specific traits. */
177 case VM_MODE_P52V48_4K:
178 vm->pgtable_levels = 4;
180 case VM_MODE_P52V48_64K:
181 vm->pgtable_levels = 3;
183 case VM_MODE_P48V48_4K:
184 vm->pgtable_levels = 4;
186 case VM_MODE_P48V48_64K:
187 vm->pgtable_levels = 3;
189 case VM_MODE_P40V48_4K:
190 case VM_MODE_P36V48_4K:
191 vm->pgtable_levels = 4;
193 case VM_MODE_P40V48_64K:
194 case VM_MODE_P36V48_64K:
195 vm->pgtable_levels = 3;
197 case VM_MODE_P48V48_16K:
198 case VM_MODE_P40V48_16K:
199 case VM_MODE_P36V48_16K:
200 vm->pgtable_levels = 4;
202 case VM_MODE_P36V47_16K:
203 vm->pgtable_levels = 3;
205 case VM_MODE_PXXV48_4K:
207 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
209 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
210 * it doesn't take effect unless a CR4.LA57 is set, which it
211 * isn't for this VM_MODE.
213 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
214 "Linear address width (%d bits) not supported",
216 pr_debug("Guest physical address width detected: %d\n",
218 vm->pgtable_levels = 4;
221 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
224 case VM_MODE_P47V64_4K:
225 vm->pgtable_levels = 5;
227 case VM_MODE_P44V64_4K:
228 vm->pgtable_levels = 5;
231 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
235 if (vm->pa_bits != 40)
236 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
241 /* Limit to VA-bit canonical virtual addresses. */
242 vm->vpages_valid = sparsebit_alloc();
243 sparsebit_set_num(vm->vpages_valid,
244 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
245 sparsebit_set_num(vm->vpages_valid,
246 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
247 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
249 /* Limit physical addresses to PA-bits. */
250 vm->max_gfn = vm_compute_max_gfn(vm);
252 /* Allocate and setup memory for guest. */
253 vm->vpages_mapped = sparsebit_alloc();
255 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
261 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
262 uint32_t nr_runnable_vcpus,
263 uint64_t extra_mem_pages)
267 TEST_ASSERT(nr_runnable_vcpus,
268 "Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
270 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
271 "nr_vcpus = %d too large for host, max-vcpus = %d",
272 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
274 nr_pages = DEFAULT_GUEST_PHY_PAGES;
275 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
278 * Account for the number of pages needed for the page tables. The
279 * maximum page table size for a memory region will be when the
280 * smallest page size is used. Considering each page contains x page
281 * table descriptors, the total extra size for page tables (for extra
282 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
285 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
287 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
288 "Host doesn't support %d vCPUs, max-vcpus = %d",
289 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
291 return vm_adjust_num_guest_pages(mode, nr_pages);
294 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
295 uint64_t nr_extra_pages)
297 uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
301 vm = ____vm_create(mode, nr_pages);
303 kvm_vm_elf_load(vm, program_invocation_name);
306 vm_create_irqchip(vm);
312 * VM Create with customized parameters
315 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
316 * nr_vcpus - VCPU count
317 * extra_mem_pages - Non-slot0 physical memory total size
318 * guest_code - Guest entry point
324 * Pointer to opaque structure that describes the created VM.
326 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
327 * extra_mem_pages is only used to calculate the maximum page table size,
328 * no real memory allocation for non-slot0 memory in this function.
330 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
331 uint64_t extra_mem_pages,
332 void *guest_code, struct kvm_vcpu *vcpus[])
337 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
339 vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
341 for (i = 0; i < nr_vcpus; ++i)
342 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
347 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
348 uint64_t extra_mem_pages,
351 struct kvm_vcpu *vcpus[1];
354 vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
365 * vm - VM that has been released before
369 * Reopens the file descriptors associated to the VM and reinstates the
370 * global state, such as the irqchip and the memory regions that are mapped
373 void kvm_vm_restart(struct kvm_vm *vmp)
376 struct userspace_mem_region *region;
379 if (vmp->has_irqchip)
380 vm_create_irqchip(vmp);
382 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
383 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
384 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
385 " rc: %i errno: %i\n"
386 " slot: %u flags: 0x%x\n"
387 " guest_phys_addr: 0x%llx size: 0x%llx",
388 ret, errno, region->region.slot,
389 region->region.flags,
390 region->region.guest_phys_addr,
391 region->region.memory_size);
395 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
399 return __vm_vcpu_add(vm, 0);
403 * Userspace Memory Region Find
406 * vm - Virtual Machine
407 * start - Starting VM physical address
408 * end - Ending VM physical address, inclusive.
413 * Pointer to overlapping region, NULL if no such region.
415 * Searches for a region with any physical memory that overlaps with
416 * any portion of the guest physical addresses from start to end
417 * inclusive. If multiple overlapping regions exist, a pointer to any
418 * of the regions is returned. Null is returned only when no overlapping
421 static struct userspace_mem_region *
422 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
424 struct rb_node *node;
426 for (node = vm->regions.gpa_tree.rb_node; node; ) {
427 struct userspace_mem_region *region =
428 container_of(node, struct userspace_mem_region, gpa_node);
429 uint64_t existing_start = region->region.guest_phys_addr;
430 uint64_t existing_end = region->region.guest_phys_addr
431 + region->region.memory_size - 1;
432 if (start <= existing_end && end >= existing_start)
435 if (start < existing_start)
436 node = node->rb_left;
438 node = node->rb_right;
445 * KVM Userspace Memory Region Find
448 * vm - Virtual Machine
449 * start - Starting VM physical address
450 * end - Ending VM physical address, inclusive.
455 * Pointer to overlapping region, NULL if no such region.
457 * Public interface to userspace_mem_region_find. Allows tests to look up
458 * the memslot datastructure for a given range of guest physical memory.
460 struct kvm_userspace_memory_region *
461 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
464 struct userspace_mem_region *region;
466 region = userspace_mem_region_find(vm, start, end);
470 return ®ion->region;
477 * vcpu - VCPU to remove
481 * Return: None, TEST_ASSERT failures for all error conditions
483 * Removes a vCPU from a VM and frees its resources.
485 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
489 if (vcpu->dirty_gfns) {
490 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
491 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
492 vcpu->dirty_gfns = NULL;
495 ret = munmap(vcpu->run, vcpu_mmap_sz());
496 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
498 ret = close(vcpu->fd);
499 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
501 list_del(&vcpu->list);
505 void kvm_vm_release(struct kvm_vm *vmp)
507 struct kvm_vcpu *vcpu, *tmp;
510 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
511 vm_vcpu_rm(vmp, vcpu);
513 ret = close(vmp->fd);
514 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
516 ret = close(vmp->kvm_fd);
517 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
520 static void __vm_mem_region_delete(struct kvm_vm *vm,
521 struct userspace_mem_region *region,
527 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
528 rb_erase(®ion->hva_node, &vm->regions.hva_tree);
529 hash_del(®ion->slot_node);
532 region->region.memory_size = 0;
533 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
535 sparsebit_free(®ion->unused_phy_pages);
536 ret = munmap(region->mmap_start, region->mmap_size);
537 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
543 * Destroys and frees the VM pointed to by vmp.
545 void kvm_vm_free(struct kvm_vm *vmp)
548 struct hlist_node *node;
549 struct userspace_mem_region *region;
554 /* Free userspace_mem_regions. */
555 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
556 __vm_mem_region_delete(vmp, region, false);
558 /* Free sparsebit arrays. */
559 sparsebit_free(&vmp->vpages_valid);
560 sparsebit_free(&vmp->vpages_mapped);
564 /* Free the structure describing the VM. */
568 int kvm_memfd_alloc(size_t size, bool hugepages)
570 int memfd_flags = MFD_CLOEXEC;
574 memfd_flags |= MFD_HUGETLB;
576 fd = memfd_create("kvm_selftest", memfd_flags);
577 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
579 r = ftruncate(fd, size);
580 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
582 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
583 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
589 * Memory Compare, host virtual to guest virtual
592 * hva - Starting host virtual address
593 * vm - Virtual Machine
594 * gva - Starting guest virtual address
595 * len - number of bytes to compare
599 * Input/Output Args: None
602 * Returns 0 if the bytes starting at hva for a length of len
603 * are equal the guest virtual bytes starting at gva. Returns
604 * a value < 0, if bytes at hva are less than those at gva.
605 * Otherwise a value > 0 is returned.
607 * Compares the bytes starting at the host virtual address hva, for
608 * a length of len, to the guest bytes starting at the guest virtual
609 * address given by gva.
611 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
616 * Compare a batch of bytes until either a match is found
617 * or all the bytes have been compared.
619 for (uintptr_t offset = 0; offset < len; offset += amt) {
620 uintptr_t ptr1 = (uintptr_t)hva + offset;
623 * Determine host address for guest virtual address
626 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
629 * Determine amount to compare on this pass.
630 * Don't allow the comparsion to cross a page boundary.
633 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
634 amt = vm->page_size - (ptr1 % vm->page_size);
635 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
636 amt = vm->page_size - (ptr2 % vm->page_size);
638 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
639 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
642 * Perform the comparison. If there is a difference
643 * return that result to the caller, otherwise need
644 * to continue on looking for a mismatch.
646 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
652 * No mismatch found. Let the caller know the two memory
658 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
659 struct userspace_mem_region *region)
661 struct rb_node **cur, *parent;
663 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
664 struct userspace_mem_region *cregion;
666 cregion = container_of(*cur, typeof(*cregion), gpa_node);
668 if (region->region.guest_phys_addr <
669 cregion->region.guest_phys_addr)
670 cur = &(*cur)->rb_left;
672 TEST_ASSERT(region->region.guest_phys_addr !=
673 cregion->region.guest_phys_addr,
674 "Duplicate GPA in region tree");
676 cur = &(*cur)->rb_right;
680 rb_link_node(®ion->gpa_node, parent, cur);
681 rb_insert_color(®ion->gpa_node, gpa_tree);
684 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
685 struct userspace_mem_region *region)
687 struct rb_node **cur, *parent;
689 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
690 struct userspace_mem_region *cregion;
692 cregion = container_of(*cur, typeof(*cregion), hva_node);
694 if (region->host_mem < cregion->host_mem)
695 cur = &(*cur)->rb_left;
697 TEST_ASSERT(region->host_mem !=
699 "Duplicate HVA in region tree");
701 cur = &(*cur)->rb_right;
705 rb_link_node(®ion->hva_node, parent, cur);
706 rb_insert_color(®ion->hva_node, hva_tree);
710 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
711 uint64_t gpa, uint64_t size, void *hva)
713 struct kvm_userspace_memory_region region = {
716 .guest_phys_addr = gpa,
718 .userspace_addr = (uintptr_t)hva,
721 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
724 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
725 uint64_t gpa, uint64_t size, void *hva)
727 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
729 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
730 errno, strerror(errno));
734 * VM Userspace Memory Region Add
737 * vm - Virtual Machine
738 * src_type - Storage source for this region.
739 * NULL to use anonymous memory.
740 * guest_paddr - Starting guest physical address
741 * slot - KVM region slot
742 * npages - Number of physical pages
743 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
749 * Allocates a memory area of the number of pages specified by npages
750 * and maps it to the VM specified by vm, at a starting physical address
751 * given by guest_paddr. The region is created with a KVM region slot
752 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
753 * region is created with the flags given by flags.
755 void vm_userspace_mem_region_add(struct kvm_vm *vm,
756 enum vm_mem_backing_src_type src_type,
757 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
761 struct userspace_mem_region *region;
762 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
765 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
766 "Number of guest pages is not compatible with the host. "
767 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
769 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
770 "address not on a page boundary.\n"
771 " guest_paddr: 0x%lx vm->page_size: 0x%x",
772 guest_paddr, vm->page_size);
773 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
774 <= vm->max_gfn, "Physical range beyond maximum "
775 "supported physical address,\n"
776 " guest_paddr: 0x%lx npages: 0x%lx\n"
777 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
778 guest_paddr, npages, vm->max_gfn, vm->page_size);
781 * Confirm a mem region with an overlapping address doesn't
784 region = (struct userspace_mem_region *) userspace_mem_region_find(
785 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
787 TEST_FAIL("overlapping userspace_mem_region already "
789 " requested guest_paddr: 0x%lx npages: 0x%lx "
791 " existing guest_paddr: 0x%lx size: 0x%lx",
792 guest_paddr, npages, vm->page_size,
793 (uint64_t) region->region.guest_phys_addr,
794 (uint64_t) region->region.memory_size);
796 /* Confirm no region with the requested slot already exists. */
797 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
799 if (region->region.slot != slot)
802 TEST_FAIL("A mem region with the requested slot "
804 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
805 " existing slot: %u paddr: 0x%lx size: 0x%lx",
806 slot, guest_paddr, npages,
808 (uint64_t) region->region.guest_phys_addr,
809 (uint64_t) region->region.memory_size);
812 /* Allocate and initialize new mem region structure. */
813 region = calloc(1, sizeof(*region));
814 TEST_ASSERT(region != NULL, "Insufficient Memory");
815 region->mmap_size = npages * vm->page_size;
818 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
819 alignment = 0x100000;
825 * When using THP mmap is not guaranteed to returned a hugepage aligned
826 * address so we have to pad the mmap. Padding is not needed for HugeTLB
827 * because mmap will always return an address aligned to the HugeTLB
830 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
831 alignment = max(backing_src_pagesz, alignment);
833 ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
835 /* Add enough memory to align up if necessary */
837 region->mmap_size += alignment;
840 if (backing_src_is_shared(src_type))
841 region->fd = kvm_memfd_alloc(region->mmap_size,
842 src_type == VM_MEM_SRC_SHARED_HUGETLB);
844 region->mmap_start = mmap(NULL, region->mmap_size,
845 PROT_READ | PROT_WRITE,
846 vm_mem_backing_src_alias(src_type)->flag,
848 TEST_ASSERT(region->mmap_start != MAP_FAILED,
849 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
851 TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
852 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
853 "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
854 region->mmap_start, backing_src_pagesz);
856 /* Align host address */
857 region->host_mem = align_ptr_up(region->mmap_start, alignment);
859 /* As needed perform madvise */
860 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
861 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
862 ret = madvise(region->host_mem, npages * vm->page_size,
863 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
864 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
865 region->host_mem, npages * vm->page_size,
866 vm_mem_backing_src_alias(src_type)->name);
869 region->unused_phy_pages = sparsebit_alloc();
870 sparsebit_set_num(region->unused_phy_pages,
871 guest_paddr >> vm->page_shift, npages);
872 region->region.slot = slot;
873 region->region.flags = flags;
874 region->region.guest_phys_addr = guest_paddr;
875 region->region.memory_size = npages * vm->page_size;
876 region->region.userspace_addr = (uintptr_t) region->host_mem;
877 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
878 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
879 " rc: %i errno: %i\n"
880 " slot: %u flags: 0x%x\n"
881 " guest_phys_addr: 0x%lx size: 0x%lx",
882 ret, errno, slot, flags,
883 guest_paddr, (uint64_t) region->region.memory_size);
885 /* Add to quick lookup data structures */
886 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
887 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
888 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
890 /* If shared memory, create an alias. */
891 if (region->fd >= 0) {
892 region->mmap_alias = mmap(NULL, region->mmap_size,
893 PROT_READ | PROT_WRITE,
894 vm_mem_backing_src_alias(src_type)->flag,
896 TEST_ASSERT(region->mmap_alias != MAP_FAILED,
897 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
899 /* Align host alias address */
900 region->host_alias = align_ptr_up(region->mmap_alias, alignment);
908 * vm - Virtual Machine
909 * memslot - KVM memory slot ID
914 * Pointer to memory region structure that describe memory region
915 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
916 * on error (e.g. currently no memory region using memslot as a KVM
919 struct userspace_mem_region *
920 memslot2region(struct kvm_vm *vm, uint32_t memslot)
922 struct userspace_mem_region *region;
924 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
926 if (region->region.slot == memslot)
929 fprintf(stderr, "No mem region with the requested slot found,\n"
930 " requested slot: %u\n", memslot);
931 fputs("---- vm dump ----\n", stderr);
932 vm_dump(stderr, vm, 2);
933 TEST_FAIL("Mem region not found");
938 * VM Memory Region Flags Set
941 * vm - Virtual Machine
942 * flags - Starting guest physical address
948 * Sets the flags of the memory region specified by the value of slot,
949 * to the values given by flags.
951 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
954 struct userspace_mem_region *region;
956 region = memslot2region(vm, slot);
958 region->region.flags = flags;
960 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
962 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
963 " rc: %i errno: %i slot: %u flags: 0x%x",
964 ret, errno, slot, flags);
968 * VM Memory Region Move
971 * vm - Virtual Machine
972 * slot - Slot of the memory region to move
973 * new_gpa - Starting guest physical address
979 * Change the gpa of a memory region.
981 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
983 struct userspace_mem_region *region;
986 region = memslot2region(vm, slot);
988 region->region.guest_phys_addr = new_gpa;
990 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
992 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
993 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
994 ret, errno, slot, new_gpa);
998 * VM Memory Region Delete
1001 * vm - Virtual Machine
1002 * slot - Slot of the memory region to delete
1008 * Delete a memory region.
1010 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1012 __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1015 /* Returns the size of a vCPU's kvm_run structure. */
1016 static int vcpu_mmap_sz(void)
1020 dev_fd = open_kvm_dev_path_or_exit();
1022 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1023 TEST_ASSERT(ret >= sizeof(struct kvm_run),
1024 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
1031 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
1033 struct kvm_vcpu *vcpu;
1035 list_for_each_entry(vcpu, &vm->vcpus, list) {
1036 if (vcpu->id == vcpu_id)
1044 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1045 * No additional vCPU setup is done. Returns the vCPU.
1047 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
1049 struct kvm_vcpu *vcpu;
1051 /* Confirm a vcpu with the specified id doesn't already exist. */
1052 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
1054 /* Allocate and initialize new vcpu structure. */
1055 vcpu = calloc(1, sizeof(*vcpu));
1056 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1060 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1061 TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd));
1063 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
1064 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
1065 vcpu_mmap_sz(), sizeof(*vcpu->run));
1066 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1067 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1068 TEST_ASSERT(vcpu->run != MAP_FAILED,
1069 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1071 /* Add to linked-list of VCPUs. */
1072 list_add(&vcpu->list, &vm->vcpus);
1078 * VM Virtual Address Unused Gap
1081 * vm - Virtual Machine
1083 * vaddr_min - Minimum Virtual Address
1088 * Lowest virtual address at or below vaddr_min, with at least
1089 * sz unused bytes. TEST_ASSERT failure if no area of at least
1090 * size sz is available.
1092 * Within the VM specified by vm, locates the lowest starting virtual
1093 * address >= vaddr_min, that has at least sz unallocated bytes. A
1094 * TEST_ASSERT failure occurs for invalid input or no area of at least
1095 * sz unallocated bytes >= vaddr_min is available.
1097 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1098 vm_vaddr_t vaddr_min)
1100 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1102 /* Determine lowest permitted virtual page index. */
1103 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1104 if ((pgidx_start * vm->page_size) < vaddr_min)
1107 /* Loop over section with enough valid virtual page indexes. */
1108 if (!sparsebit_is_set_num(vm->vpages_valid,
1109 pgidx_start, pages))
1110 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1111 pgidx_start, pages);
1114 * Are there enough unused virtual pages available at
1115 * the currently proposed starting virtual page index.
1116 * If not, adjust proposed starting index to next
1119 if (sparsebit_is_clear_num(vm->vpages_mapped,
1120 pgidx_start, pages))
1122 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1123 pgidx_start, pages);
1124 if (pgidx_start == 0)
1128 * If needed, adjust proposed starting virtual address,
1129 * to next range of valid virtual addresses.
1131 if (!sparsebit_is_set_num(vm->vpages_valid,
1132 pgidx_start, pages)) {
1133 pgidx_start = sparsebit_next_set_num(
1134 vm->vpages_valid, pgidx_start, pages);
1135 if (pgidx_start == 0)
1138 } while (pgidx_start != 0);
1141 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
1147 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1148 pgidx_start, pages),
1149 "Unexpected, invalid virtual page index range,\n"
1150 " pgidx_start: 0x%lx\n"
1152 pgidx_start, pages);
1153 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1154 pgidx_start, pages),
1155 "Unexpected, pages already mapped,\n"
1156 " pgidx_start: 0x%lx\n"
1158 pgidx_start, pages);
1160 return pgidx_start * vm->page_size;
1164 * VM Virtual Address Allocate
1167 * vm - Virtual Machine
1168 * sz - Size in bytes
1169 * vaddr_min - Minimum starting virtual address
1170 * data_memslot - Memory region slot for data pages
1171 * pgd_memslot - Memory region slot for new virtual translation tables
1176 * Starting guest virtual address
1178 * Allocates at least sz bytes within the virtual address space of the vm
1179 * given by vm. The allocated bytes are mapped to a virtual address >=
1180 * the address given by vaddr_min. Note that each allocation uses a
1181 * a unique set of pages, with the minimum real allocation being at least
1184 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1186 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1189 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
1190 KVM_UTIL_MIN_PFN * vm->page_size, 0);
1193 * Find an unused range of virtual page addresses of at least
1196 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1198 /* Map the virtual pages. */
1199 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1200 pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1202 virt_pg_map(vm, vaddr, paddr);
1204 sparsebit_set(vm->vpages_mapped,
1205 vaddr >> vm->page_shift);
1212 * VM Virtual Address Allocate Pages
1215 * vm - Virtual Machine
1220 * Starting guest virtual address
1222 * Allocates at least N system pages worth of bytes within the virtual address
1225 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1227 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1231 * VM Virtual Address Allocate Page
1234 * vm - Virtual Machine
1239 * Starting guest virtual address
1241 * Allocates at least one system page worth of bytes within the virtual address
1244 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1246 return vm_vaddr_alloc_pages(vm, 1);
1250 * Map a range of VM virtual address to the VM's physical address
1253 * vm - Virtual Machine
1254 * vaddr - Virtuall address to map
1255 * paddr - VM Physical Address
1256 * npages - The number of pages to map
1257 * pgd_memslot - Memory region slot for new virtual translation tables
1263 * Within the VM given by @vm, creates a virtual translation for
1264 * @npages starting at @vaddr to the page range starting at @paddr.
1266 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1267 unsigned int npages)
1269 size_t page_size = vm->page_size;
1270 size_t size = npages * page_size;
1272 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1273 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1276 virt_pg_map(vm, vaddr, paddr);
1283 * Address VM Physical to Host Virtual
1286 * vm - Virtual Machine
1287 * gpa - VM physical address
1292 * Equivalent host virtual address
1294 * Locates the memory region containing the VM physical address given
1295 * by gpa, within the VM given by vm. When found, the host virtual
1296 * address providing the memory to the vm physical address is returned.
1297 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1299 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1301 struct userspace_mem_region *region;
1303 region = userspace_mem_region_find(vm, gpa, gpa);
1305 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1309 return (void *)((uintptr_t)region->host_mem
1310 + (gpa - region->region.guest_phys_addr));
1314 * Address Host Virtual to VM Physical
1317 * vm - Virtual Machine
1318 * hva - Host virtual address
1323 * Equivalent VM physical address
1325 * Locates the memory region containing the host virtual address given
1326 * by hva, within the VM given by vm. When found, the equivalent
1327 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1328 * region containing hva exists.
1330 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1332 struct rb_node *node;
1334 for (node = vm->regions.hva_tree.rb_node; node; ) {
1335 struct userspace_mem_region *region =
1336 container_of(node, struct userspace_mem_region, hva_node);
1338 if (hva >= region->host_mem) {
1339 if (hva <= (region->host_mem
1340 + region->region.memory_size - 1))
1341 return (vm_paddr_t)((uintptr_t)
1342 region->region.guest_phys_addr
1343 + (hva - (uintptr_t)region->host_mem));
1345 node = node->rb_right;
1347 node = node->rb_left;
1350 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1355 * Address VM physical to Host Virtual *alias*.
1358 * vm - Virtual Machine
1359 * gpa - VM physical address
1364 * Equivalent address within the host virtual *alias* area, or NULL
1365 * (without failing the test) if the guest memory is not shared (so
1368 * Create a writable, shared virtual=>physical alias for the specific GPA.
1369 * The primary use case is to allow the host selftest to manipulate guest
1370 * memory without mapping said memory in the guest's address space. And, for
1371 * userfaultfd-based demand paging, to do so without triggering userfaults.
1373 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1375 struct userspace_mem_region *region;
1378 region = userspace_mem_region_find(vm, gpa, gpa);
1382 if (!region->host_alias)
1385 offset = gpa - region->region.guest_phys_addr;
1386 return (void *) ((uintptr_t) region->host_alias + offset);
1389 /* Create an interrupt controller chip for the specified VM. */
1390 void vm_create_irqchip(struct kvm_vm *vm)
1392 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1394 vm->has_irqchip = true;
1397 int _vcpu_run(struct kvm_vcpu *vcpu)
1402 rc = __vcpu_run(vcpu);
1403 } while (rc == -1 && errno == EINTR);
1405 assert_on_unhandled_exception(vcpu);
1411 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1412 * Assert if the KVM returns an error (other than -EINTR).
1414 void vcpu_run(struct kvm_vcpu *vcpu)
1416 int ret = _vcpu_run(vcpu);
1418 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
1421 void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
1425 vcpu->run->immediate_exit = 1;
1426 ret = __vcpu_run(vcpu);
1427 vcpu->run->immediate_exit = 0;
1429 TEST_ASSERT(ret == -1 && errno == EINTR,
1430 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1435 * Get the list of guest registers which are supported for
1436 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
1437 * it is the callers responsibility to free the list.
1439 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
1441 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1444 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
1445 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1447 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1448 reg_list->n = reg_list_n.n;
1449 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
1453 int __vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, void *arg)
1455 return ioctl(vcpu->fd, cmd, arg);
1458 void _vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, const char *name,
1461 int ret = __vcpu_ioctl(vcpu, cmd, arg);
1463 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));
1466 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
1468 uint32_t page_size = vcpu->vm->page_size;
1469 uint32_t size = vcpu->vm->dirty_ring_size;
1471 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1473 if (!vcpu->dirty_gfns) {
1476 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
1477 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1478 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1480 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
1481 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1482 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1484 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1485 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1486 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1488 vcpu->dirty_gfns = addr;
1489 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1492 return vcpu->dirty_gfns;
1495 int __vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1497 return ioctl(vm->fd, cmd, arg);
1500 void _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, const char *name, void *arg)
1502 int ret = __vm_ioctl(vm, cmd, arg);
1504 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));
1511 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
1513 struct kvm_device_attr attribute = {
1519 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
1522 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
1524 struct kvm_create_device create_dev = {
1526 .flags = KVM_CREATE_DEVICE_TEST,
1529 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1532 int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
1534 struct kvm_create_device create_dev = {
1541 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1542 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
1543 return err ? : create_dev.fd;
1546 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
1548 struct kvm_device_attr kvmattr = {
1552 .addr = (uintptr_t)val,
1555 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
1558 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
1560 struct kvm_device_attr kvmattr = {
1564 .addr = (uintptr_t)val,
1567 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
1571 * IRQ related functions.
1574 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1576 struct kvm_irq_level irq_level = {
1581 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1584 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1586 int ret = _kvm_irq_line(vm, irq, level);
1588 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
1591 struct kvm_irq_routing *kvm_gsi_routing_create(void)
1593 struct kvm_irq_routing *routing;
1596 size = sizeof(struct kvm_irq_routing);
1597 /* Allocate space for the max number of entries: this wastes 196 KBs. */
1598 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
1599 routing = calloc(1, size);
1605 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
1606 uint32_t gsi, uint32_t pin)
1611 assert(routing->nr < KVM_MAX_IRQ_ROUTES);
1614 routing->entries[i].gsi = gsi;
1615 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
1616 routing->entries[i].flags = 0;
1617 routing->entries[i].u.irqchip.irqchip = 0;
1618 routing->entries[i].u.irqchip.pin = pin;
1622 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1627 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1633 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1637 ret = _kvm_gsi_routing_write(vm, routing);
1638 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
1645 * vm - Virtual Machine
1646 * indent - Left margin indent amount
1649 * stream - Output FILE stream
1653 * Dumps the current state of the VM given by vm, to the FILE stream
1656 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1659 struct userspace_mem_region *region;
1660 struct kvm_vcpu *vcpu;
1662 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1663 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1664 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1665 fprintf(stream, "%*sMem Regions:\n", indent, "");
1666 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1667 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1668 "host_virt: %p\n", indent + 2, "",
1669 (uint64_t) region->region.guest_phys_addr,
1670 (uint64_t) region->region.memory_size,
1672 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1673 sparsebit_dump(stream, region->unused_phy_pages, 0);
1675 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1676 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1677 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1679 if (vm->pgd_created) {
1680 fprintf(stream, "%*sVirtual Translation Tables:\n",
1682 virt_dump(stream, vm, indent + 4);
1684 fprintf(stream, "%*sVCPUs:\n", indent, "");
1686 list_for_each_entry(vcpu, &vm->vcpus, list)
1687 vcpu_dump(stream, vcpu, indent + 2);
1690 /* Known KVM exit reasons */
1691 static struct exit_reason {
1692 unsigned int reason;
1694 } exit_reasons_known[] = {
1695 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1696 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1697 {KVM_EXIT_IO, "IO"},
1698 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1699 {KVM_EXIT_DEBUG, "DEBUG"},
1700 {KVM_EXIT_HLT, "HLT"},
1701 {KVM_EXIT_MMIO, "MMIO"},
1702 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1703 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1704 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1705 {KVM_EXIT_INTR, "INTR"},
1706 {KVM_EXIT_SET_TPR, "SET_TPR"},
1707 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1708 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1709 {KVM_EXIT_S390_RESET, "S390_RESET"},
1710 {KVM_EXIT_DCR, "DCR"},
1711 {KVM_EXIT_NMI, "NMI"},
1712 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1713 {KVM_EXIT_OSI, "OSI"},
1714 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1715 {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
1716 {KVM_EXIT_X86_RDMSR, "RDMSR"},
1717 {KVM_EXIT_X86_WRMSR, "WRMSR"},
1718 {KVM_EXIT_XEN, "XEN"},
1719 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1720 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1725 * Exit Reason String
1728 * exit_reason - Exit reason
1733 * Constant string pointer describing the exit reason.
1735 * Locates and returns a constant string that describes the KVM exit
1736 * reason given by exit_reason. If no such string is found, a constant
1737 * string of "Unknown" is returned.
1739 const char *exit_reason_str(unsigned int exit_reason)
1743 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1744 if (exit_reason == exit_reasons_known[n1].reason)
1745 return exit_reasons_known[n1].name;
1752 * Physical Contiguous Page Allocator
1755 * vm - Virtual Machine
1756 * num - number of pages
1757 * paddr_min - Physical address minimum
1758 * memslot - Memory region to allocate page from
1763 * Starting physical address
1765 * Within the VM specified by vm, locates a range of available physical
1766 * pages at or above paddr_min. If found, the pages are marked as in use
1767 * and their base address is returned. A TEST_ASSERT failure occurs if
1768 * not enough pages are available at or above paddr_min.
1770 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1771 vm_paddr_t paddr_min, uint32_t memslot)
1773 struct userspace_mem_region *region;
1774 sparsebit_idx_t pg, base;
1776 TEST_ASSERT(num > 0, "Must allocate at least one page");
1778 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1779 "not divisible by page size.\n"
1780 " paddr_min: 0x%lx page_size: 0x%x",
1781 paddr_min, vm->page_size);
1783 region = memslot2region(vm, memslot);
1784 base = pg = paddr_min >> vm->page_shift;
1787 for (; pg < base + num; ++pg) {
1788 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1789 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1793 } while (pg && pg != base + num);
1796 fprintf(stderr, "No guest physical page available, "
1797 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1798 paddr_min, vm->page_size, memslot);
1799 fputs("---- vm dump ----\n", stderr);
1800 vm_dump(stderr, vm, 2);
1804 for (pg = base; pg < base + num; ++pg)
1805 sparsebit_clear(region->unused_phy_pages, pg);
1807 return base * vm->page_size;
1810 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1813 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1816 /* Arbitrary minimum physical address used for virtual translation tables. */
1817 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
1819 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
1821 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
1825 * Address Guest Virtual to Host Virtual
1828 * vm - Virtual Machine
1829 * gva - VM virtual address
1834 * Equivalent host virtual address
1836 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1838 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1841 unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
1843 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
1846 static unsigned int vm_calc_num_pages(unsigned int num_pages,
1847 unsigned int page_shift,
1848 unsigned int new_page_shift,
1851 unsigned int n = 1 << (new_page_shift - page_shift);
1853 if (page_shift >= new_page_shift)
1854 return num_pages * (1 << (page_shift - new_page_shift));
1856 return num_pages / n + !!(ceil && num_pages % n);
1859 static inline int getpageshift(void)
1861 return __builtin_ffs(getpagesize()) - 1;
1865 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1867 return vm_calc_num_pages(num_guest_pages,
1868 vm_guest_mode_params[mode].page_shift,
1869 getpageshift(), true);
1873 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
1875 return vm_calc_num_pages(num_host_pages, getpageshift(),
1876 vm_guest_mode_params[mode].page_shift, false);
1879 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
1882 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
1883 return vm_adjust_num_guest_pages(mode, n);