1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * tools/testing/selftests/kvm/include/kvm_util.h
5 * Copyright (C) 2018, Google LLC.
7 #ifndef SELFTEST_KVM_UTIL_H
8 #define SELFTEST_KVM_UTIL_H
10 #include "test_util.h"
13 #include "linux/kvm.h"
14 #include <sys/ioctl.h>
16 #include "sparsebit.h"
19 /* Callers of kvm_util only have an incomplete/opaque description of the
20 * structure kvm_util is using to maintain the state of a VM.
24 typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
25 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
27 /* Minimum allocated guest virtual and physical addresses */
28 #define KVM_UTIL_MIN_VADDR 0x2000
30 #define DEFAULT_GUEST_PHY_PAGES 512
31 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
32 #define DEFAULT_STACK_PGS 5
45 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K
47 #define VM_MODE_DEFAULT VM_MODE_P52V48_4K
50 #define vm_guest_mode_string(m) vm_guest_mode_string[m]
51 extern const char * const vm_guest_mode_string[];
53 enum vm_mem_backing_src_type {
55 VM_MEM_SRC_ANONYMOUS_THP,
56 VM_MEM_SRC_ANONYMOUS_HUGETLB,
59 int kvm_check_cap(long cap);
60 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
62 struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
63 struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
64 void kvm_vm_free(struct kvm_vm *vmp);
65 void kvm_vm_restart(struct kvm_vm *vmp, int perm);
66 void kvm_vm_release(struct kvm_vm *vmp);
67 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
68 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
69 uint64_t first_page, uint32_t num_pages);
71 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
74 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
75 uint32_t data_memslot, uint32_t pgd_memslot);
77 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
78 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
81 void vm_create_irqchip(struct kvm_vm *vm);
83 void vm_userspace_mem_region_add(struct kvm_vm *vm,
84 enum vm_mem_backing_src_type src_type,
85 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
88 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
90 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
92 void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
93 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
94 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
95 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
96 uint32_t data_memslot, uint32_t pgd_memslot);
97 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
98 size_t size, uint32_t pgd_memslot);
99 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
100 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
101 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
102 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
104 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
105 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
106 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
107 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
108 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
109 struct kvm_mp_state *mp_state);
110 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
111 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
112 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
113 void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
114 struct kvm_sregs *sregs);
115 void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
116 struct kvm_sregs *sregs);
117 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
118 struct kvm_sregs *sregs);
119 #ifdef __KVM_HAVE_VCPU_EVENTS
120 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
121 struct kvm_vcpu_events *events);
122 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
123 struct kvm_vcpu_events *events);
126 void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
127 struct kvm_nested_state *state);
128 int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
129 struct kvm_nested_state *state, bool ignore_error);
132 const char *exit_reason_str(unsigned int exit_reason);
134 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
135 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
136 uint32_t pgd_memslot);
137 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
139 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
140 vm_paddr_t paddr_min, uint32_t memslot);
142 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
144 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
146 bool vm_is_unrestricted_guest(struct kvm_vm *vm);
148 struct kvm_userspace_memory_region *
149 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
152 struct kvm_dirty_log *
153 allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
155 int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
157 #define sync_global_to_guest(vm, g) ({ \
158 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
159 memcpy(_p, &(g), sizeof(g)); \
162 #define sync_global_from_guest(vm, g) ({ \
163 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
164 memcpy(&(g), _p, sizeof(g)); \
175 #define UCALL_MAX_ARGS 6
179 uint64_t args[UCALL_MAX_ARGS];
182 void ucall_init(struct kvm_vm *vm, void *arg);
183 void ucall_uninit(struct kvm_vm *vm);
184 void ucall(uint64_t cmd, int nargs, ...);
185 uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
187 #define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
188 #define GUEST_DONE() ucall(UCALL_DONE, 0)
189 #define GUEST_ASSERT(_condition) do { \
191 ucall(UCALL_ABORT, 2, \
192 "Failed guest assert: " \
193 #_condition, __LINE__); \
196 #endif /* SELFTEST_KVM_UTIL_H */