1 // SPDX-License-Identifier: GPL-2.0-only
3 * tools/testing/selftests/kvm/lib/x86_64/processor.c
5 * Copyright (C) 2018, Google LLC.
10 #include "../kvm_util_internal.h"
11 #include "processor.h"
13 #ifndef NUM_INTERRUPTS
14 #define NUM_INTERRUPTS 256
17 #define DEFAULT_CODE_SELECTOR 0x8
18 #define DEFAULT_DATA_SELECTOR 0x10
20 /* Minimum physical address used for virtual translation tables. */
21 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
23 vm_vaddr_t exception_handlers;
25 /* Virtual translation table structure declarations */
26 struct pageMapL4Entry {
30 uint64_t write_through:1;
31 uint64_t cache_disable:1;
33 uint64_t ignored_06:1;
35 uint64_t ignored_11_08:4;
37 uint64_t ignored_62_52:11;
38 uint64_t execute_disable:1;
41 struct pageDirectoryPointerEntry {
45 uint64_t write_through:1;
46 uint64_t cache_disable:1;
48 uint64_t ignored_06:1;
50 uint64_t ignored_11_08:4;
52 uint64_t ignored_62_52:11;
53 uint64_t execute_disable:1;
56 struct pageDirectoryEntry {
60 uint64_t write_through:1;
61 uint64_t cache_disable:1;
63 uint64_t ignored_06:1;
65 uint64_t ignored_11_08:4;
67 uint64_t ignored_62_52:11;
68 uint64_t execute_disable:1;
71 struct pageTableEntry {
75 uint64_t write_through:1;
76 uint64_t cache_disable:1;
79 uint64_t reserved_07:1;
81 uint64_t ignored_11_09:3;
83 uint64_t ignored_62_52:11;
84 uint64_t execute_disable:1;
87 void regs_dump(FILE *stream, struct kvm_regs *regs,
90 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
91 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
93 regs->rax, regs->rbx, regs->rcx, regs->rdx);
94 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
95 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
97 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
98 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
99 "r10: 0x%.16llx r11: 0x%.16llx\n",
101 regs->r8, regs->r9, regs->r10, regs->r11);
102 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
103 "r14: 0x%.16llx r15: 0x%.16llx\n",
105 regs->r12, regs->r13, regs->r14, regs->r15);
106 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
108 regs->rip, regs->rflags);
115 * stream - Output FILE stream
116 * segment - KVM segment
117 * indent - Left margin indent amount
123 * Dumps the state of the KVM segment given by @segment, to the FILE stream
126 static void segment_dump(FILE *stream, struct kvm_segment *segment,
129 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
130 "selector: 0x%.4x type: 0x%.2x\n",
131 indent, "", segment->base, segment->limit,
132 segment->selector, segment->type);
133 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
134 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
135 indent, "", segment->present, segment->dpl,
136 segment->db, segment->s, segment->l);
137 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
138 "unusable: 0x%.2x padding: 0x%.2x\n",
139 indent, "", segment->g, segment->avl,
140 segment->unusable, segment->padding);
147 * stream - Output FILE stream
148 * dtable - KVM dtable
149 * indent - Left margin indent amount
155 * Dumps the state of the KVM dtable given by @dtable, to the FILE stream
158 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
161 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
162 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
163 indent, "", dtable->base, dtable->limit,
164 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
167 void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
172 fprintf(stream, "%*scs:\n", indent, "");
173 segment_dump(stream, &sregs->cs, indent + 2);
174 fprintf(stream, "%*sds:\n", indent, "");
175 segment_dump(stream, &sregs->ds, indent + 2);
176 fprintf(stream, "%*ses:\n", indent, "");
177 segment_dump(stream, &sregs->es, indent + 2);
178 fprintf(stream, "%*sfs:\n", indent, "");
179 segment_dump(stream, &sregs->fs, indent + 2);
180 fprintf(stream, "%*sgs:\n", indent, "");
181 segment_dump(stream, &sregs->gs, indent + 2);
182 fprintf(stream, "%*sss:\n", indent, "");
183 segment_dump(stream, &sregs->ss, indent + 2);
184 fprintf(stream, "%*str:\n", indent, "");
185 segment_dump(stream, &sregs->tr, indent + 2);
186 fprintf(stream, "%*sldt:\n", indent, "");
187 segment_dump(stream, &sregs->ldt, indent + 2);
189 fprintf(stream, "%*sgdt:\n", indent, "");
190 dtable_dump(stream, &sregs->gdt, indent + 2);
191 fprintf(stream, "%*sidt:\n", indent, "");
192 dtable_dump(stream, &sregs->idt, indent + 2);
194 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
195 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
197 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
198 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
199 "apic_base: 0x%.16llx\n",
201 sregs->cr8, sregs->efer, sregs->apic_base);
203 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
204 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
205 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
206 sregs->interrupt_bitmap[i]);
210 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
212 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
213 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
215 /* If needed, create page map l4 table. */
216 if (!vm->pgd_created) {
217 vm_paddr_t paddr = vm_phy_page_alloc(vm,
218 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
220 vm->pgd_created = true;
224 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
225 uint32_t pgd_memslot)
228 struct pageMapL4Entry *pml4e;
230 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
231 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
233 TEST_ASSERT((vaddr % vm->page_size) == 0,
234 "Virtual address not on page boundary,\n"
235 " vaddr: 0x%lx vm->page_size: 0x%x",
236 vaddr, vm->page_size);
237 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
238 (vaddr >> vm->page_shift)),
239 "Invalid virtual address, vaddr: 0x%lx",
241 TEST_ASSERT((paddr % vm->page_size) == 0,
242 "Physical address not on page boundary,\n"
243 " paddr: 0x%lx vm->page_size: 0x%x",
244 paddr, vm->page_size);
245 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
246 "Physical address beyond beyond maximum supported,\n"
247 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
248 paddr, vm->max_gfn, vm->page_size);
250 index[0] = (vaddr >> 12) & 0x1ffu;
251 index[1] = (vaddr >> 21) & 0x1ffu;
252 index[2] = (vaddr >> 30) & 0x1ffu;
253 index[3] = (vaddr >> 39) & 0x1ffu;
255 /* Allocate page directory pointer table if not present. */
256 pml4e = addr_gpa2hva(vm, vm->pgd);
257 if (!pml4e[index[3]].present) {
258 pml4e[index[3]].address = vm_phy_page_alloc(vm,
259 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
261 pml4e[index[3]].writable = true;
262 pml4e[index[3]].present = true;
265 /* Allocate page directory table if not present. */
266 struct pageDirectoryPointerEntry *pdpe;
267 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
268 if (!pdpe[index[2]].present) {
269 pdpe[index[2]].address = vm_phy_page_alloc(vm,
270 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
272 pdpe[index[2]].writable = true;
273 pdpe[index[2]].present = true;
276 /* Allocate page table if not present. */
277 struct pageDirectoryEntry *pde;
278 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
279 if (!pde[index[1]].present) {
280 pde[index[1]].address = vm_phy_page_alloc(vm,
281 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
283 pde[index[1]].writable = true;
284 pde[index[1]].present = true;
287 /* Fill in page table entry. */
288 struct pageTableEntry *pte;
289 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
290 pte[index[0]].address = paddr >> vm->page_shift;
291 pte[index[0]].writable = true;
292 pte[index[0]].present = 1;
295 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
297 struct pageMapL4Entry *pml4e, *pml4e_start;
298 struct pageDirectoryPointerEntry *pdpe, *pdpe_start;
299 struct pageDirectoryEntry *pde, *pde_start;
300 struct pageTableEntry *pte, *pte_start;
302 if (!vm->pgd_created)
305 fprintf(stream, "%*s "
306 " no\n", indent, "");
307 fprintf(stream, "%*s index hvaddr gpaddr "
308 "addr w exec dirty\n",
310 pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm,
312 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
313 pml4e = &pml4e_start[n1];
316 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
319 pml4e - pml4e_start, pml4e,
320 addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address,
321 pml4e->writable, pml4e->execute_disable);
323 pdpe_start = addr_gpa2hva(vm, pml4e->address
325 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
326 pdpe = &pdpe_start[n2];
329 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
332 pdpe - pdpe_start, pdpe,
333 addr_hva2gpa(vm, pdpe),
334 (uint64_t) pdpe->address, pdpe->writable,
335 pdpe->execute_disable);
337 pde_start = addr_gpa2hva(vm,
338 pdpe->address * vm->page_size);
339 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
340 pde = &pde_start[n3];
343 fprintf(stream, "%*spde 0x%-3zx %p "
344 "0x%-12lx 0x%-10lx %u %u\n",
345 indent, "", pde - pde_start, pde,
346 addr_hva2gpa(vm, pde),
347 (uint64_t) pde->address, pde->writable,
348 pde->execute_disable);
350 pte_start = addr_gpa2hva(vm,
351 pde->address * vm->page_size);
352 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
353 pte = &pte_start[n4];
356 fprintf(stream, "%*spte 0x%-3zx %p "
357 "0x%-12lx 0x%-10lx %u %u "
360 pte - pte_start, pte,
361 addr_hva2gpa(vm, pte),
362 (uint64_t) pte->address,
364 pte->execute_disable,
366 ((uint64_t) n1 << 27)
367 | ((uint64_t) n2 << 18)
368 | ((uint64_t) n3 << 9)
377 * Set Unusable Segment
382 * segp - Pointer to segment register
386 * Sets the segment register pointed to by @segp to an unusable state.
388 static void kvm_seg_set_unusable(struct kvm_segment *segp)
390 memset(segp, 0, sizeof(*segp));
391 segp->unusable = true;
394 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
396 void *gdt = addr_gva2hva(vm, vm->gdt);
397 struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
399 desc->limit0 = segp->limit & 0xFFFF;
400 desc->base0 = segp->base & 0xFFFF;
401 desc->base1 = segp->base >> 16;
402 desc->type = segp->type;
404 desc->dpl = segp->dpl;
405 desc->p = segp->present;
406 desc->limit1 = segp->limit >> 16;
407 desc->avl = segp->avl;
411 desc->base2 = segp->base >> 24;
413 desc->base3 = segp->base >> 32;
418 * Set Long Mode Flat Kernel Code Segment
421 * vm - VM whose GDT is being filled, or NULL to only write segp
422 * selector - selector value
425 * segp - Pointer to KVM segment
429 * Sets up the KVM segment pointed to by @segp, to be a code segment
430 * with the selector value given by @selector.
432 static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
433 struct kvm_segment *segp)
435 memset(segp, 0, sizeof(*segp));
436 segp->selector = selector;
437 segp->limit = 0xFFFFFFFFu;
438 segp->s = 0x1; /* kTypeCodeData */
439 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
440 * | kFlagCodeReadable
446 kvm_seg_fill_gdt_64bit(vm, segp);
450 * Set Long Mode Flat Kernel Data Segment
453 * vm - VM whose GDT is being filled, or NULL to only write segp
454 * selector - selector value
457 * segp - Pointer to KVM segment
461 * Sets up the KVM segment pointed to by @segp, to be a data segment
462 * with the selector value given by @selector.
464 static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
465 struct kvm_segment *segp)
467 memset(segp, 0, sizeof(*segp));
468 segp->selector = selector;
469 segp->limit = 0xFFFFFFFFu;
470 segp->s = 0x1; /* kTypeCodeData */
471 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
472 * | kFlagDataWritable
475 segp->present = true;
477 kvm_seg_fill_gdt_64bit(vm, segp);
480 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
483 struct pageMapL4Entry *pml4e;
484 struct pageDirectoryPointerEntry *pdpe;
485 struct pageDirectoryEntry *pde;
486 struct pageTableEntry *pte;
488 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
489 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
491 index[0] = (gva >> 12) & 0x1ffu;
492 index[1] = (gva >> 21) & 0x1ffu;
493 index[2] = (gva >> 30) & 0x1ffu;
494 index[3] = (gva >> 39) & 0x1ffu;
496 if (!vm->pgd_created)
498 pml4e = addr_gpa2hva(vm, vm->pgd);
499 if (!pml4e[index[3]].present)
502 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
503 if (!pdpe[index[2]].present)
506 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
507 if (!pde[index[1]].present)
510 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
511 if (!pte[index[0]].present)
514 return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
517 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
521 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt, int gdt_memslot,
525 vm->gdt = vm_vaddr_alloc(vm, getpagesize(),
526 KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
529 dt->limit = getpagesize();
532 static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
533 int selector, int gdt_memslot,
537 vm->tss = vm_vaddr_alloc(vm, getpagesize(),
538 KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
540 memset(segp, 0, sizeof(*segp));
541 segp->base = vm->tss;
543 segp->selector = selector;
546 kvm_seg_fill_gdt_64bit(vm, segp);
549 static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
551 struct kvm_sregs sregs;
553 /* Set mode specific system register values. */
554 vcpu_sregs_get(vm, vcpuid, &sregs);
558 kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
561 case VM_MODE_PXXV48_4K:
562 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
563 sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
564 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
566 kvm_seg_set_unusable(&sregs.ldt);
567 kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
568 kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
569 kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
570 kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot);
574 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
578 vcpu_sregs_set(vm, vcpuid, &sregs);
581 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
583 struct kvm_mp_state mp_state;
584 struct kvm_regs regs;
585 vm_vaddr_t stack_vaddr;
586 stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
587 DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
590 vm_vcpu_add(vm, vcpuid);
591 vcpu_setup(vm, vcpuid, 0, 0);
593 /* Setup guest general purpose registers */
594 vcpu_regs_get(vm, vcpuid, ®s);
595 regs.rflags = regs.rflags | 0x2;
596 regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
597 regs.rip = (unsigned long) guest_code;
598 vcpu_regs_set(vm, vcpuid, ®s);
600 /* Setup the MP state */
601 mp_state.mp_state = 0;
602 vcpu_set_mp_state(vm, vcpuid, &mp_state);
606 * Allocate an instance of struct kvm_cpuid2
612 * Return: A pointer to the allocated struct. The caller is responsible
613 * for freeing this struct.
615 * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
616 * array to be decided at allocation time, allocation is slightly
617 * complicated. This function uses a reasonable default length for
618 * the array and performs the appropriate allocation.
620 static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
622 struct kvm_cpuid2 *cpuid;
626 size = sizeof(*cpuid);
627 size += nent * sizeof(struct kvm_cpuid_entry2);
628 cpuid = malloc(size);
640 * KVM Supported CPUID Get
646 * Return: The supported KVM CPUID
648 * Get the guest CPUID supported by KVM.
650 struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
652 static struct kvm_cpuid2 *cpuid;
659 cpuid = allocate_kvm_cpuid2();
660 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
664 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
665 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
676 * msr_index - Index of MSR
680 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
682 * Get value of MSR for VCPU.
684 uint64_t kvm_get_feature_msr(uint64_t msr_index)
687 struct kvm_msrs header;
688 struct kvm_msr_entry entry;
692 buffer.header.nmsrs = 1;
693 buffer.entry.index = msr_index;
694 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
698 r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
699 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
700 " rc: %i errno: %i", r, errno);
703 return buffer.entry.data;
710 * vm - Virtual Machine
715 * Return: KVM CPUID (KVM_GET_CPUID2)
717 * Set the VCPU's CPUID.
719 struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
721 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
722 struct kvm_cpuid2 *cpuid;
725 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
727 cpuid = allocate_kvm_cpuid2();
728 max_ent = cpuid->nent;
730 for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) {
731 rc = ioctl(vcpu->fd, KVM_GET_CPUID2, cpuid);
735 TEST_ASSERT(rc == -1 && errno == E2BIG,
736 "KVM_GET_CPUID2 should either succeed or give E2BIG: %d %d",
740 TEST_ASSERT(rc == 0, "KVM_GET_CPUID2 failed, rc: %i errno: %i",
749 * Locate a cpuid entry.
752 * function: The function of the cpuid entry to find.
753 * index: The index of the cpuid entry.
757 * Return: A pointer to the cpuid entry. Never returns NULL.
759 struct kvm_cpuid_entry2 *
760 kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
762 struct kvm_cpuid2 *cpuid;
763 struct kvm_cpuid_entry2 *entry = NULL;
766 cpuid = kvm_get_supported_cpuid();
767 for (i = 0; i < cpuid->nent; i++) {
768 if (cpuid->entries[i].function == function &&
769 cpuid->entries[i].index == index) {
770 entry = &cpuid->entries[i];
775 TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
784 * vm - Virtual Machine
786 * cpuid - The CPUID values to set.
792 * Set the VCPU's CPUID.
794 void vcpu_set_cpuid(struct kvm_vm *vm,
795 uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
797 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
800 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
802 rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
803 TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
812 * vm - Virtual Machine
814 * msr_index - Index of MSR
818 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
820 * Get value of MSR for VCPU.
822 uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
824 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
826 struct kvm_msrs header;
827 struct kvm_msr_entry entry;
831 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
832 buffer.header.nmsrs = 1;
833 buffer.entry.index = msr_index;
834 r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
835 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
836 " rc: %i errno: %i", r, errno);
838 return buffer.entry.data;
845 * vm - Virtual Machine
847 * msr_index - Index of MSR
848 * msr_value - New value of MSR
852 * Return: The result of KVM_SET_MSRS.
854 * Sets the value of an MSR for the given VCPU.
856 int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
859 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
861 struct kvm_msrs header;
862 struct kvm_msr_entry entry;
866 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
867 memset(&buffer, 0, sizeof(buffer));
868 buffer.header.nmsrs = 1;
869 buffer.entry.index = msr_index;
870 buffer.entry.data = msr_value;
871 r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
879 * vm - Virtual Machine
881 * msr_index - Index of MSR
882 * msr_value - New value of MSR
886 * Return: On success, nothing. On failure a TEST_ASSERT is produced.
888 * Set value of MSR for VCPU.
890 void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
895 r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value);
896 TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
897 " rc: %i errno: %i", r, errno);
900 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
903 struct kvm_regs regs;
905 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
910 vcpu_regs_get(vm, vcpuid, ®s);
913 regs.rdi = va_arg(ap, uint64_t);
916 regs.rsi = va_arg(ap, uint64_t);
919 regs.rdx = va_arg(ap, uint64_t);
922 regs.rcx = va_arg(ap, uint64_t);
925 regs.r8 = va_arg(ap, uint64_t);
928 regs.r9 = va_arg(ap, uint64_t);
930 vcpu_regs_set(vm, vcpuid, ®s);
934 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
936 struct kvm_regs regs;
937 struct kvm_sregs sregs;
939 fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
941 fprintf(stream, "%*sregs:\n", indent + 2, "");
942 vcpu_regs_get(vm, vcpuid, ®s);
943 regs_dump(stream, ®s, indent + 4);
945 fprintf(stream, "%*ssregs:\n", indent + 2, "");
946 vcpu_sregs_get(vm, vcpuid, &sregs);
947 sregs_dump(stream, &sregs, indent + 4);
950 struct kvm_x86_state {
951 struct kvm_vcpu_events events;
952 struct kvm_mp_state mp_state;
953 struct kvm_regs regs;
954 struct kvm_xsave xsave;
955 struct kvm_xcrs xcrs;
956 struct kvm_sregs sregs;
957 struct kvm_debugregs debugregs;
959 struct kvm_nested_state nested;
962 struct kvm_msrs msrs;
965 static int kvm_get_num_msrs_fd(int kvm_fd)
967 struct kvm_msr_list nmsrs;
971 r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
972 TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
978 static int kvm_get_num_msrs(struct kvm_vm *vm)
980 return kvm_get_num_msrs_fd(vm->kvm_fd);
983 struct kvm_msr_list *kvm_get_msr_index_list(void)
985 struct kvm_msr_list *list;
986 int nmsrs, r, kvm_fd;
988 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
992 nmsrs = kvm_get_num_msrs_fd(kvm_fd);
993 list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
995 r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
998 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
1004 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1006 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1007 struct kvm_msr_list *list;
1008 struct kvm_x86_state *state;
1010 static int nested_size = -1;
1012 if (nested_size == -1) {
1013 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
1014 TEST_ASSERT(nested_size <= sizeof(state->nested_),
1015 "Nested state size too big, %i > %zi",
1016 nested_size, sizeof(state->nested_));
1020 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
1021 * guest state is consistent only after userspace re-enters the
1022 * kernel with KVM_RUN. Complete IO prior to migrating state
1025 vcpu_run_complete_io(vm, vcpuid);
1027 nmsrs = kvm_get_num_msrs(vm);
1028 list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
1029 list->nmsrs = nmsrs;
1030 r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
1031 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
1034 state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0]));
1035 r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
1036 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i",
1039 r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
1040 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i",
1043 r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
1044 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
1047 r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave);
1048 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1051 if (kvm_check_cap(KVM_CAP_XCRS)) {
1052 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1053 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1057 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1058 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
1062 state->nested.size = sizeof(state->nested_);
1063 r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
1064 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
1066 TEST_ASSERT(state->nested.size <= nested_size,
1067 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
1068 state->nested.size, nested_size);
1070 state->nested.size = 0;
1072 state->msrs.nmsrs = nmsrs;
1073 for (i = 0; i < nmsrs; i++)
1074 state->msrs.entries[i].index = list->indices[i];
1075 r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
1076 TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
1077 r, r == nmsrs ? -1 : list->indices[r]);
1079 r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
1080 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i",
1087 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state)
1089 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1092 r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
1093 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1096 if (kvm_check_cap(KVM_CAP_XCRS)) {
1097 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1098 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1102 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1103 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
1106 r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
1107 TEST_ASSERT(r == state->msrs.nmsrs, "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)",
1108 r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index);
1110 r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
1111 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",
1114 r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
1115 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i",
1118 r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
1119 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i",
1122 r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
1123 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
1126 if (state->nested.size) {
1127 r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
1128 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
1133 bool is_intel_cpu(void)
1135 int eax, ebx, ecx, edx;
1136 const uint32_t *chunk;
1139 __asm__ __volatile__(
1141 : /* output */ "=a"(eax), "=b"(ebx),
1142 "=c"(ecx), "=d"(edx)
1143 : /* input */ "0"(leaf), "2"(0));
1145 chunk = (const uint32_t *)("GenuineIntel");
1146 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
1149 uint32_t kvm_get_cpuid_max_basic(void)
1151 return kvm_get_supported_cpuid_entry(0)->eax;
1154 uint32_t kvm_get_cpuid_max_extended(void)
1156 return kvm_get_supported_cpuid_entry(0x80000000)->eax;
1159 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1161 struct kvm_cpuid_entry2 *entry;
1165 if (kvm_get_cpuid_max_extended() < 0x80000008) {
1166 pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
1167 *pa_bits = pae ? 36 : 32;
1170 entry = kvm_get_supported_cpuid_entry(0x80000008);
1171 *pa_bits = entry->eax & 0xff;
1172 *va_bits = (entry->eax >> 8) & 0xff;
1186 uint32_t offset2; uint32_t reserved;
1189 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
1190 int dpl, unsigned short selector)
1192 struct idt_entry *base =
1193 (struct idt_entry *)addr_gva2hva(vm, vm->idt);
1194 struct idt_entry *e = &base[vector];
1196 memset(e, 0, sizeof(*e));
1198 e->selector = selector;
1203 e->offset1 = addr >> 16;
1204 e->offset2 = addr >> 32;
1207 void kvm_exit_unexpected_vector(uint32_t value)
1209 outl(UNEXPECTED_VECTOR_PORT, value);
1212 void route_exception(struct ex_regs *regs)
1214 typedef void(*handler)(struct ex_regs *);
1215 handler *handlers = (handler *)exception_handlers;
1217 if (handlers && handlers[regs->vector]) {
1218 handlers[regs->vector](regs);
1222 kvm_exit_unexpected_vector(regs->vector);
1225 void vm_init_descriptor_tables(struct kvm_vm *vm)
1227 extern void *idt_handlers;
1230 vm->idt = vm_vaddr_alloc(vm, getpagesize(), 0x2000, 0, 0);
1231 vm->handlers = vm_vaddr_alloc(vm, 256 * sizeof(void *), 0x2000, 0, 0);
1232 /* Handlers have the same address in both address spaces.*/
1233 for (i = 0; i < NUM_INTERRUPTS; i++)
1234 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
1235 DEFAULT_CODE_SELECTOR);
1238 void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
1240 struct kvm_sregs sregs;
1242 vcpu_sregs_get(vm, vcpuid, &sregs);
1243 sregs.idt.base = vm->idt;
1244 sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
1245 sregs.gdt.base = vm->gdt;
1246 sregs.gdt.limit = getpagesize() - 1;
1247 kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
1248 vcpu_sregs_set(vm, vcpuid, &sregs);
1249 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
1252 void vm_handle_exception(struct kvm_vm *vm, int vector,
1253 void (*handler)(struct ex_regs *))
1255 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
1257 handlers[vector] = (vm_vaddr_t)handler;
1260 void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
1262 if (vcpu_state(vm, vcpuid)->exit_reason == KVM_EXIT_IO
1263 && vcpu_state(vm, vcpuid)->io.port == UNEXPECTED_VECTOR_PORT
1264 && vcpu_state(vm, vcpuid)->io.size == 4) {
1265 /* Grab pointer to io data */
1266 uint32_t *data = (void *)vcpu_state(vm, vcpuid)
1267 + vcpu_state(vm, vcpuid)->io.data_offset;
1270 "Unexpected vectored event in guest (vector:0x%x)",
1275 bool set_cpuid(struct kvm_cpuid2 *cpuid,
1276 struct kvm_cpuid_entry2 *ent)
1280 for (i = 0; i < cpuid->nent; i++) {
1281 struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
1283 if (cur->function != ent->function || cur->index != ent->index)
1286 memcpy(cur, ent, sizeof(struct kvm_cpuid_entry2));
1293 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1298 asm volatile("vmcall"
1300 : "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1304 struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
1306 static struct kvm_cpuid2 *cpuid;
1313 cpuid = allocate_kvm_cpuid2();
1314 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
1318 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1319 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
1326 void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
1328 static struct kvm_cpuid2 *cpuid_full;
1329 struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
1333 cpuid_sys = kvm_get_supported_cpuid();
1334 cpuid_hv = kvm_get_supported_hv_cpuid();
1336 cpuid_full = malloc(sizeof(*cpuid_full) +
1337 (cpuid_sys->nent + cpuid_hv->nent) *
1338 sizeof(struct kvm_cpuid_entry2));
1344 /* Need to skip KVM CPUID leaves 0x400000xx */
1345 for (i = 0; i < cpuid_sys->nent; i++) {
1346 if (cpuid_sys->entries[i].function >= 0x40000000 &&
1347 cpuid_sys->entries[i].function < 0x40000100)
1349 cpuid_full->entries[nent] = cpuid_sys->entries[i];
1353 memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
1354 cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
1355 cpuid_full->nent = nent + cpuid_hv->nent;
1358 vcpu_set_cpuid(vm, vcpuid, cpuid_full);
1361 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
1363 static struct kvm_cpuid2 *cpuid;
1365 cpuid = allocate_kvm_cpuid2();
1367 vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid);