1 // SPDX-License-Identifier: GPL-2.0-only
3 * tools/testing/selftests/kvm/lib/x86_64/processor.c
5 * Copyright (C) 2018, Google LLC.
10 #include "../kvm_util_internal.h"
11 #include "processor.h"
13 #ifndef NUM_INTERRUPTS
14 #define NUM_INTERRUPTS 256
17 #define DEFAULT_CODE_SELECTOR 0x8
18 #define DEFAULT_DATA_SELECTOR 0x10
20 vm_vaddr_t exception_handlers;
22 /* Virtual translation table structure declarations */
23 struct pageUpperEntry {
27 uint64_t write_through:1;
28 uint64_t cache_disable:1;
30 uint64_t ignored_06:1;
32 uint64_t ignored_11_08:4;
34 uint64_t ignored_62_52:11;
35 uint64_t execute_disable:1;
38 struct pageTableEntry {
42 uint64_t write_through:1;
43 uint64_t cache_disable:1;
46 uint64_t reserved_07:1;
48 uint64_t ignored_11_09:3;
50 uint64_t ignored_62_52:11;
51 uint64_t execute_disable:1;
54 void regs_dump(FILE *stream, struct kvm_regs *regs,
57 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
58 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
60 regs->rax, regs->rbx, regs->rcx, regs->rdx);
61 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
62 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
64 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
65 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
66 "r10: 0x%.16llx r11: 0x%.16llx\n",
68 regs->r8, regs->r9, regs->r10, regs->r11);
69 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
70 "r14: 0x%.16llx r15: 0x%.16llx\n",
72 regs->r12, regs->r13, regs->r14, regs->r15);
73 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
75 regs->rip, regs->rflags);
82 * stream - Output FILE stream
83 * segment - KVM segment
84 * indent - Left margin indent amount
90 * Dumps the state of the KVM segment given by @segment, to the FILE stream
93 static void segment_dump(FILE *stream, struct kvm_segment *segment,
96 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
97 "selector: 0x%.4x type: 0x%.2x\n",
98 indent, "", segment->base, segment->limit,
99 segment->selector, segment->type);
100 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
101 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
102 indent, "", segment->present, segment->dpl,
103 segment->db, segment->s, segment->l);
104 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
105 "unusable: 0x%.2x padding: 0x%.2x\n",
106 indent, "", segment->g, segment->avl,
107 segment->unusable, segment->padding);
114 * stream - Output FILE stream
115 * dtable - KVM dtable
116 * indent - Left margin indent amount
122 * Dumps the state of the KVM dtable given by @dtable, to the FILE stream
125 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
128 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
129 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
130 indent, "", dtable->base, dtable->limit,
131 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
134 void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
139 fprintf(stream, "%*scs:\n", indent, "");
140 segment_dump(stream, &sregs->cs, indent + 2);
141 fprintf(stream, "%*sds:\n", indent, "");
142 segment_dump(stream, &sregs->ds, indent + 2);
143 fprintf(stream, "%*ses:\n", indent, "");
144 segment_dump(stream, &sregs->es, indent + 2);
145 fprintf(stream, "%*sfs:\n", indent, "");
146 segment_dump(stream, &sregs->fs, indent + 2);
147 fprintf(stream, "%*sgs:\n", indent, "");
148 segment_dump(stream, &sregs->gs, indent + 2);
149 fprintf(stream, "%*sss:\n", indent, "");
150 segment_dump(stream, &sregs->ss, indent + 2);
151 fprintf(stream, "%*str:\n", indent, "");
152 segment_dump(stream, &sregs->tr, indent + 2);
153 fprintf(stream, "%*sldt:\n", indent, "");
154 segment_dump(stream, &sregs->ldt, indent + 2);
156 fprintf(stream, "%*sgdt:\n", indent, "");
157 dtable_dump(stream, &sregs->gdt, indent + 2);
158 fprintf(stream, "%*sidt:\n", indent, "");
159 dtable_dump(stream, &sregs->idt, indent + 2);
161 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
162 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
164 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
165 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
166 "apic_base: 0x%.16llx\n",
168 sregs->cr8, sregs->efer, sregs->apic_base);
170 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
171 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
172 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
173 sregs->interrupt_bitmap[i]);
177 void virt_pgd_alloc(struct kvm_vm *vm)
179 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
180 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
182 /* If needed, create page map l4 table. */
183 if (!vm->pgd_created) {
184 vm->pgd = vm_alloc_page_table(vm);
185 vm->pgd_created = true;
189 static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
192 uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
193 int index = vaddr >> (vm->page_shift + level * 9) & 0x1ffu;
195 return &page_table[index];
198 static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
203 enum x86_page_size page_size)
205 struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
208 pte->writable = true;
210 pte->page_size = (level == page_size);
212 pte->pfn = paddr >> vm->page_shift;
214 pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
217 * Entry already present. Assert that the caller doesn't want
218 * a hugepage at this level, and that there isn't a hugepage at
221 TEST_ASSERT(level != page_size,
222 "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
224 TEST_ASSERT(!pte->page_size,
225 "Cannot create page table at level: %u, vaddr: 0x%lx\n",
231 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
232 enum x86_page_size page_size)
234 const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
235 struct pageUpperEntry *pml4e, *pdpe, *pde;
236 struct pageTableEntry *pte;
238 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
239 "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
241 TEST_ASSERT((vaddr % pg_size) == 0,
242 "Virtual address not aligned,\n"
243 "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
244 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
245 "Invalid virtual address, vaddr: 0x%lx", vaddr);
246 TEST_ASSERT((paddr % pg_size) == 0,
247 "Physical address not aligned,\n"
248 " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
249 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
250 "Physical address beyond maximum supported,\n"
251 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
252 paddr, vm->max_gfn, vm->page_size);
255 * Allocate upper level page tables, if not already present. Return
256 * early if a hugepage was created.
258 pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
259 vaddr, paddr, 3, page_size);
260 if (pml4e->page_size)
263 pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
267 pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
271 /* Fill in page table entry. */
272 pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
273 TEST_ASSERT(!pte->present,
274 "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
275 pte->pfn = paddr >> vm->page_shift;
276 pte->writable = true;
280 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
282 __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
285 static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
289 struct pageUpperEntry *pml4e, *pdpe, *pde;
290 struct pageTableEntry *pte;
291 struct kvm_cpuid_entry2 *entry;
292 struct kvm_sregs sregs;
294 /* Set the bottom 52 bits. */
295 uint64_t rsvd_mask = 0x000fffffffffffff;
297 entry = kvm_get_supported_cpuid_index(0x80000008, 0);
298 max_phy_addr = entry->eax & 0x000000ff;
299 /* Clear the bottom bits of the reserved mask. */
300 rsvd_mask = (rsvd_mask >> max_phy_addr) << max_phy_addr;
303 * SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
304 * with 4-Level Paging and 5-Level Paging".
305 * If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1,
306 * the XD flag (bit 63) is reserved.
308 vcpu_sregs_get(vm, vcpuid, &sregs);
309 if ((sregs.efer & EFER_NX) == 0) {
310 rsvd_mask |= (1ull << 63);
313 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
314 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
315 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
316 (vaddr >> vm->page_shift)),
317 "Invalid virtual address, vaddr: 0x%lx",
320 * Based on the mode check above there are 48 bits in the vaddr, so
321 * shift 16 to sign extend the last bit (bit-47),
323 TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
324 "Canonical check failed. The virtual address is invalid.");
326 index[0] = (vaddr >> 12) & 0x1ffu;
327 index[1] = (vaddr >> 21) & 0x1ffu;
328 index[2] = (vaddr >> 30) & 0x1ffu;
329 index[3] = (vaddr >> 39) & 0x1ffu;
331 pml4e = addr_gpa2hva(vm, vm->pgd);
332 TEST_ASSERT(pml4e[index[3]].present,
333 "Expected pml4e to be present for gva: 0x%08lx", vaddr);
334 TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) &
335 (rsvd_mask | (1ull << 7))) == 0,
336 "Unexpected reserved bits set.");
338 pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
339 TEST_ASSERT(pdpe[index[2]].present,
340 "Expected pdpe to be present for gva: 0x%08lx", vaddr);
341 TEST_ASSERT(pdpe[index[2]].page_size == 0,
342 "Expected pdpe to map a pde not a 1-GByte page.");
343 TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0,
344 "Unexpected reserved bits set.");
346 pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
347 TEST_ASSERT(pde[index[1]].present,
348 "Expected pde to be present for gva: 0x%08lx", vaddr);
349 TEST_ASSERT(pde[index[1]].page_size == 0,
350 "Expected pde to map a pte not a 2-MByte page.");
351 TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0,
352 "Unexpected reserved bits set.");
354 pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
355 TEST_ASSERT(pte[index[0]].present,
356 "Expected pte to be present for gva: 0x%08lx", vaddr);
358 return &pte[index[0]];
361 uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
363 struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
365 return *(uint64_t *)pte;
368 void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
371 struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid,
374 *(uint64_t *)new_pte = pte;
377 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
379 struct pageUpperEntry *pml4e, *pml4e_start;
380 struct pageUpperEntry *pdpe, *pdpe_start;
381 struct pageUpperEntry *pde, *pde_start;
382 struct pageTableEntry *pte, *pte_start;
384 if (!vm->pgd_created)
387 fprintf(stream, "%*s "
388 " no\n", indent, "");
389 fprintf(stream, "%*s index hvaddr gpaddr "
390 "addr w exec dirty\n",
392 pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd);
393 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
394 pml4e = &pml4e_start[n1];
397 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
400 pml4e - pml4e_start, pml4e,
401 addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn,
402 pml4e->writable, pml4e->execute_disable);
404 pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size);
405 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
406 pdpe = &pdpe_start[n2];
409 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
412 pdpe - pdpe_start, pdpe,
413 addr_hva2gpa(vm, pdpe),
414 (uint64_t) pdpe->pfn, pdpe->writable,
415 pdpe->execute_disable);
417 pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size);
418 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
419 pde = &pde_start[n3];
422 fprintf(stream, "%*spde 0x%-3zx %p "
423 "0x%-12lx 0x%-10lx %u %u\n",
424 indent, "", pde - pde_start, pde,
425 addr_hva2gpa(vm, pde),
426 (uint64_t) pde->pfn, pde->writable,
427 pde->execute_disable);
429 pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size);
430 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
431 pte = &pte_start[n4];
434 fprintf(stream, "%*spte 0x%-3zx %p "
435 "0x%-12lx 0x%-10lx %u %u "
438 pte - pte_start, pte,
439 addr_hva2gpa(vm, pte),
442 pte->execute_disable,
444 ((uint64_t) n1 << 27)
445 | ((uint64_t) n2 << 18)
446 | ((uint64_t) n3 << 9)
455 * Set Unusable Segment
460 * segp - Pointer to segment register
464 * Sets the segment register pointed to by @segp to an unusable state.
466 static void kvm_seg_set_unusable(struct kvm_segment *segp)
468 memset(segp, 0, sizeof(*segp));
469 segp->unusable = true;
472 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
474 void *gdt = addr_gva2hva(vm, vm->gdt);
475 struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
477 desc->limit0 = segp->limit & 0xFFFF;
478 desc->base0 = segp->base & 0xFFFF;
479 desc->base1 = segp->base >> 16;
480 desc->type = segp->type;
482 desc->dpl = segp->dpl;
483 desc->p = segp->present;
484 desc->limit1 = segp->limit >> 16;
485 desc->avl = segp->avl;
489 desc->base2 = segp->base >> 24;
491 desc->base3 = segp->base >> 32;
496 * Set Long Mode Flat Kernel Code Segment
499 * vm - VM whose GDT is being filled, or NULL to only write segp
500 * selector - selector value
503 * segp - Pointer to KVM segment
507 * Sets up the KVM segment pointed to by @segp, to be a code segment
508 * with the selector value given by @selector.
510 static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
511 struct kvm_segment *segp)
513 memset(segp, 0, sizeof(*segp));
514 segp->selector = selector;
515 segp->limit = 0xFFFFFFFFu;
516 segp->s = 0x1; /* kTypeCodeData */
517 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
518 * | kFlagCodeReadable
524 kvm_seg_fill_gdt_64bit(vm, segp);
528 * Set Long Mode Flat Kernel Data Segment
531 * vm - VM whose GDT is being filled, or NULL to only write segp
532 * selector - selector value
535 * segp - Pointer to KVM segment
539 * Sets up the KVM segment pointed to by @segp, to be a data segment
540 * with the selector value given by @selector.
542 static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
543 struct kvm_segment *segp)
545 memset(segp, 0, sizeof(*segp));
546 segp->selector = selector;
547 segp->limit = 0xFFFFFFFFu;
548 segp->s = 0x1; /* kTypeCodeData */
549 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
550 * | kFlagDataWritable
553 segp->present = true;
555 kvm_seg_fill_gdt_64bit(vm, segp);
558 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
561 struct pageUpperEntry *pml4e, *pdpe, *pde;
562 struct pageTableEntry *pte;
564 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
565 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
567 index[0] = (gva >> 12) & 0x1ffu;
568 index[1] = (gva >> 21) & 0x1ffu;
569 index[2] = (gva >> 30) & 0x1ffu;
570 index[3] = (gva >> 39) & 0x1ffu;
572 if (!vm->pgd_created)
574 pml4e = addr_gpa2hva(vm, vm->pgd);
575 if (!pml4e[index[3]].present)
578 pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
579 if (!pdpe[index[2]].present)
582 pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
583 if (!pde[index[1]].present)
586 pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
587 if (!pte[index[0]].present)
590 return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu);
593 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
597 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
600 vm->gdt = vm_vaddr_alloc_page(vm);
603 dt->limit = getpagesize();
606 static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
610 vm->tss = vm_vaddr_alloc_page(vm);
612 memset(segp, 0, sizeof(*segp));
613 segp->base = vm->tss;
615 segp->selector = selector;
618 kvm_seg_fill_gdt_64bit(vm, segp);
621 static void vcpu_setup(struct kvm_vm *vm, int vcpuid)
623 struct kvm_sregs sregs;
625 /* Set mode specific system register values. */
626 vcpu_sregs_get(vm, vcpuid, &sregs);
630 kvm_setup_gdt(vm, &sregs.gdt);
633 case VM_MODE_PXXV48_4K:
634 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
635 sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
636 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
638 kvm_seg_set_unusable(&sregs.ldt);
639 kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
640 kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
641 kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
642 kvm_setup_tss_64bit(vm, &sregs.tr, 0x18);
646 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
650 vcpu_sregs_set(vm, vcpuid, &sregs);
653 #define CPUID_XFD_BIT (1 << 4)
654 static bool is_xfd_supported(void)
656 int eax, ebx, ecx, edx;
657 const int leaf = 0xd, subleaf = 0x1;
659 __asm__ __volatile__(
661 : /* output */ "=a"(eax), "=b"(ebx),
663 : /* input */ "0"(leaf), "2"(subleaf));
665 return !!(eax & CPUID_XFD_BIT);
668 void vm_xsave_req_perm(void)
670 unsigned long bitmask;
673 if (!is_xfd_supported())
676 rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM,
677 XSTATE_XTILE_DATA_BIT);
679 * The older kernel version(<5.15) can't support
680 * ARCH_REQ_XCOMP_GUEST_PERM and directly return.
685 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
686 TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
687 TEST_ASSERT(bitmask & XFEATURE_XTILE_MASK,
688 "prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx",
692 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
694 struct kvm_mp_state mp_state;
695 struct kvm_regs regs;
696 vm_vaddr_t stack_vaddr;
697 stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
698 DEFAULT_GUEST_STACK_VADDR_MIN);
701 vm_vcpu_add(vm, vcpuid);
702 vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
703 vcpu_setup(vm, vcpuid);
705 /* Setup guest general purpose registers */
706 vcpu_regs_get(vm, vcpuid, ®s);
707 regs.rflags = regs.rflags | 0x2;
708 regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
709 regs.rip = (unsigned long) guest_code;
710 vcpu_regs_set(vm, vcpuid, ®s);
712 /* Setup the MP state */
713 mp_state.mp_state = 0;
714 vcpu_set_mp_state(vm, vcpuid, &mp_state);
718 * Allocate an instance of struct kvm_cpuid2
724 * Return: A pointer to the allocated struct. The caller is responsible
725 * for freeing this struct.
727 * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
728 * array to be decided at allocation time, allocation is slightly
729 * complicated. This function uses a reasonable default length for
730 * the array and performs the appropriate allocation.
732 static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
734 struct kvm_cpuid2 *cpuid;
738 size = sizeof(*cpuid);
739 size += nent * sizeof(struct kvm_cpuid_entry2);
740 cpuid = malloc(size);
752 * KVM Supported CPUID Get
758 * Return: The supported KVM CPUID
760 * Get the guest CPUID supported by KVM.
762 struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
764 static struct kvm_cpuid2 *cpuid;
771 cpuid = allocate_kvm_cpuid2();
772 kvm_fd = open_kvm_dev_path_or_exit();
774 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
775 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
786 * msr_index - Index of MSR
790 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
792 * Get value of MSR for VCPU.
794 uint64_t kvm_get_feature_msr(uint64_t msr_index)
797 struct kvm_msrs header;
798 struct kvm_msr_entry entry;
802 buffer.header.nmsrs = 1;
803 buffer.entry.index = msr_index;
804 kvm_fd = open_kvm_dev_path_or_exit();
806 r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
807 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
808 " rc: %i errno: %i", r, errno);
811 return buffer.entry.data;
818 * vm - Virtual Machine
823 * Return: KVM CPUID (KVM_GET_CPUID2)
825 * Set the VCPU's CPUID.
827 struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
829 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
830 struct kvm_cpuid2 *cpuid;
834 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
836 cpuid = allocate_kvm_cpuid2();
837 max_ent = cpuid->nent;
839 for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) {
840 rc = ioctl(vcpu->fd, KVM_GET_CPUID2, cpuid);
844 TEST_ASSERT(rc == -1 && errno == E2BIG,
845 "KVM_GET_CPUID2 should either succeed or give E2BIG: %d %d",
849 TEST_ASSERT(rc == 0, "KVM_GET_CPUID2 failed, rc: %i errno: %i",
858 * Locate a cpuid entry.
861 * function: The function of the cpuid entry to find.
862 * index: The index of the cpuid entry.
866 * Return: A pointer to the cpuid entry. Never returns NULL.
868 struct kvm_cpuid_entry2 *
869 kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
871 struct kvm_cpuid2 *cpuid;
872 struct kvm_cpuid_entry2 *entry = NULL;
875 cpuid = kvm_get_supported_cpuid();
876 for (i = 0; i < cpuid->nent; i++) {
877 if (cpuid->entries[i].function == function &&
878 cpuid->entries[i].index == index) {
879 entry = &cpuid->entries[i];
884 TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
890 int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
891 struct kvm_cpuid2 *cpuid)
893 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
895 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
897 return ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
904 * vm - Virtual Machine
906 * cpuid - The CPUID values to set.
912 * Set the VCPU's CPUID.
914 void vcpu_set_cpuid(struct kvm_vm *vm,
915 uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
919 rc = __vcpu_set_cpuid(vm, vcpuid, cpuid);
920 TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
929 * vm - Virtual Machine
931 * msr_index - Index of MSR
935 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
937 * Get value of MSR for VCPU.
939 uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
941 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
943 struct kvm_msrs header;
944 struct kvm_msr_entry entry;
948 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
949 buffer.header.nmsrs = 1;
950 buffer.entry.index = msr_index;
951 r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
952 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
953 " rc: %i errno: %i", r, errno);
955 return buffer.entry.data;
962 * vm - Virtual Machine
964 * msr_index - Index of MSR
965 * msr_value - New value of MSR
969 * Return: The result of KVM_SET_MSRS.
971 * Sets the value of an MSR for the given VCPU.
973 int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
976 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
978 struct kvm_msrs header;
979 struct kvm_msr_entry entry;
983 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
984 memset(&buffer, 0, sizeof(buffer));
985 buffer.header.nmsrs = 1;
986 buffer.entry.index = msr_index;
987 buffer.entry.data = msr_value;
988 r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
996 * vm - Virtual Machine
998 * msr_index - Index of MSR
999 * msr_value - New value of MSR
1003 * Return: On success, nothing. On failure a TEST_ASSERT is produced.
1005 * Set value of MSR for VCPU.
1007 void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
1012 r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value);
1013 TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
1014 " rc: %i errno: %i", r, errno);
1017 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
1020 struct kvm_regs regs;
1022 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
1027 vcpu_regs_get(vm, vcpuid, ®s);
1030 regs.rdi = va_arg(ap, uint64_t);
1033 regs.rsi = va_arg(ap, uint64_t);
1036 regs.rdx = va_arg(ap, uint64_t);
1039 regs.rcx = va_arg(ap, uint64_t);
1042 regs.r8 = va_arg(ap, uint64_t);
1045 regs.r9 = va_arg(ap, uint64_t);
1047 vcpu_regs_set(vm, vcpuid, ®s);
1051 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
1053 struct kvm_regs regs;
1054 struct kvm_sregs sregs;
1056 fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
1058 fprintf(stream, "%*sregs:\n", indent + 2, "");
1059 vcpu_regs_get(vm, vcpuid, ®s);
1060 regs_dump(stream, ®s, indent + 4);
1062 fprintf(stream, "%*ssregs:\n", indent + 2, "");
1063 vcpu_sregs_get(vm, vcpuid, &sregs);
1064 sregs_dump(stream, &sregs, indent + 4);
1067 static int kvm_get_num_msrs_fd(int kvm_fd)
1069 struct kvm_msr_list nmsrs;
1073 r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
1074 TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
1080 static int kvm_get_num_msrs(struct kvm_vm *vm)
1082 return kvm_get_num_msrs_fd(vm->kvm_fd);
1085 struct kvm_msr_list *kvm_get_msr_index_list(void)
1087 struct kvm_msr_list *list;
1088 int nmsrs, r, kvm_fd;
1090 kvm_fd = open_kvm_dev_path_or_exit();
1092 nmsrs = kvm_get_num_msrs_fd(kvm_fd);
1093 list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
1094 list->nmsrs = nmsrs;
1095 r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
1098 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
1104 static int vcpu_save_xsave_state(struct kvm_vm *vm, struct vcpu *vcpu,
1105 struct kvm_x86_state *state)
1109 size = vm_check_cap(vm, KVM_CAP_XSAVE2);
1111 size = sizeof(struct kvm_xsave);
1113 state->xsave = malloc(size);
1114 if (size == sizeof(struct kvm_xsave))
1115 return ioctl(vcpu->fd, KVM_GET_XSAVE, state->xsave);
1117 return ioctl(vcpu->fd, KVM_GET_XSAVE2, state->xsave);
1120 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1122 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1123 struct kvm_msr_list *list;
1124 struct kvm_x86_state *state;
1126 static int nested_size = -1;
1128 if (nested_size == -1) {
1129 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
1130 TEST_ASSERT(nested_size <= sizeof(state->nested_),
1131 "Nested state size too big, %i > %zi",
1132 nested_size, sizeof(state->nested_));
1136 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
1137 * guest state is consistent only after userspace re-enters the
1138 * kernel with KVM_RUN. Complete IO prior to migrating state
1141 vcpu_run_complete_io(vm, vcpuid);
1143 nmsrs = kvm_get_num_msrs(vm);
1144 list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
1145 list->nmsrs = nmsrs;
1146 r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
1147 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
1150 state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0]));
1151 r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
1152 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i",
1155 r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
1156 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i",
1159 r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
1160 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
1163 r = vcpu_save_xsave_state(vm, vcpu, state);
1164 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1167 if (kvm_check_cap(KVM_CAP_XCRS)) {
1168 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1169 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1173 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1174 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
1178 state->nested.size = sizeof(state->nested_);
1179 r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
1180 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
1182 TEST_ASSERT(state->nested.size <= nested_size,
1183 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
1184 state->nested.size, nested_size);
1186 state->nested.size = 0;
1188 state->msrs.nmsrs = nmsrs;
1189 for (i = 0; i < nmsrs; i++)
1190 state->msrs.entries[i].index = list->indices[i];
1191 r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
1192 TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
1193 r, r == nmsrs ? -1 : list->indices[r]);
1195 r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
1196 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i",
1203 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state)
1205 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1208 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1209 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
1212 r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
1213 TEST_ASSERT(r == state->msrs.nmsrs,
1214 "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)",
1215 r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index);
1217 if (kvm_check_cap(KVM_CAP_XCRS)) {
1218 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1219 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1223 r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave);
1224 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1227 r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
1228 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",
1231 r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
1232 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i",
1235 r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
1236 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i",
1239 r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
1240 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
1243 if (state->nested.size) {
1244 r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
1245 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
1250 void kvm_x86_state_cleanup(struct kvm_x86_state *state)
1256 static bool cpu_vendor_string_is(const char *vendor)
1258 const uint32_t *chunk = (const uint32_t *)vendor;
1259 int eax, ebx, ecx, edx;
1262 __asm__ __volatile__(
1264 : /* output */ "=a"(eax), "=b"(ebx),
1265 "=c"(ecx), "=d"(edx)
1266 : /* input */ "0"(leaf), "2"(0));
1268 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
1271 bool is_intel_cpu(void)
1273 return cpu_vendor_string_is("GenuineIntel");
1277 * Exclude early K5 samples with a vendor string of "AMDisbetter!"
1279 bool is_amd_cpu(void)
1281 return cpu_vendor_string_is("AuthenticAMD");
1284 uint32_t kvm_get_cpuid_max_basic(void)
1286 return kvm_get_supported_cpuid_entry(0)->eax;
1289 uint32_t kvm_get_cpuid_max_extended(void)
1291 return kvm_get_supported_cpuid_entry(0x80000000)->eax;
1294 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1296 struct kvm_cpuid_entry2 *entry;
1300 if (kvm_get_cpuid_max_extended() < 0x80000008) {
1301 pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
1302 *pa_bits = pae ? 36 : 32;
1305 entry = kvm_get_supported_cpuid_entry(0x80000008);
1306 *pa_bits = entry->eax & 0xff;
1307 *va_bits = (entry->eax >> 8) & 0xff;
1321 uint32_t offset2; uint32_t reserved;
1324 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
1325 int dpl, unsigned short selector)
1327 struct idt_entry *base =
1328 (struct idt_entry *)addr_gva2hva(vm, vm->idt);
1329 struct idt_entry *e = &base[vector];
1331 memset(e, 0, sizeof(*e));
1333 e->selector = selector;
1338 e->offset1 = addr >> 16;
1339 e->offset2 = addr >> 32;
1342 void kvm_exit_unexpected_vector(uint32_t value)
1344 ucall(UCALL_UNHANDLED, 1, value);
1347 void route_exception(struct ex_regs *regs)
1349 typedef void(*handler)(struct ex_regs *);
1350 handler *handlers = (handler *)exception_handlers;
1352 if (handlers && handlers[regs->vector]) {
1353 handlers[regs->vector](regs);
1357 kvm_exit_unexpected_vector(regs->vector);
1360 void vm_init_descriptor_tables(struct kvm_vm *vm)
1362 extern void *idt_handlers;
1365 vm->idt = vm_vaddr_alloc_page(vm);
1366 vm->handlers = vm_vaddr_alloc_page(vm);
1367 /* Handlers have the same address in both address spaces.*/
1368 for (i = 0; i < NUM_INTERRUPTS; i++)
1369 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
1370 DEFAULT_CODE_SELECTOR);
1373 void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
1375 struct kvm_sregs sregs;
1377 vcpu_sregs_get(vm, vcpuid, &sregs);
1378 sregs.idt.base = vm->idt;
1379 sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
1380 sregs.gdt.base = vm->gdt;
1381 sregs.gdt.limit = getpagesize() - 1;
1382 kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
1383 vcpu_sregs_set(vm, vcpuid, &sregs);
1384 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
1387 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1388 void (*handler)(struct ex_regs *))
1390 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
1392 handlers[vector] = (vm_vaddr_t)handler;
1395 void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
1399 if (get_ucall(vm, vcpuid, &uc) == UCALL_UNHANDLED) {
1400 uint64_t vector = uc.args[0];
1402 TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
1407 struct kvm_cpuid_entry2 *get_cpuid(struct kvm_cpuid2 *cpuid, uint32_t function,
1412 for (i = 0; i < cpuid->nent; i++) {
1413 struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
1415 if (cur->function == function && cur->index == index)
1419 TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
1424 bool set_cpuid(struct kvm_cpuid2 *cpuid,
1425 struct kvm_cpuid_entry2 *ent)
1429 for (i = 0; i < cpuid->nent; i++) {
1430 struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
1432 if (cur->function != ent->function || cur->index != ent->index)
1435 memcpy(cur, ent, sizeof(struct kvm_cpuid_entry2));
1442 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1447 asm volatile("vmcall"
1449 : "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1453 struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
1455 static struct kvm_cpuid2 *cpuid;
1462 cpuid = allocate_kvm_cpuid2();
1463 kvm_fd = open_kvm_dev_path_or_exit();
1465 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1466 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
1473 void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
1475 static struct kvm_cpuid2 *cpuid_full;
1476 struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
1480 cpuid_sys = kvm_get_supported_cpuid();
1481 cpuid_hv = kvm_get_supported_hv_cpuid();
1483 cpuid_full = malloc(sizeof(*cpuid_full) +
1484 (cpuid_sys->nent + cpuid_hv->nent) *
1485 sizeof(struct kvm_cpuid_entry2));
1491 /* Need to skip KVM CPUID leaves 0x400000xx */
1492 for (i = 0; i < cpuid_sys->nent; i++) {
1493 if (cpuid_sys->entries[i].function >= 0x40000000 &&
1494 cpuid_sys->entries[i].function < 0x40000100)
1496 cpuid_full->entries[nent] = cpuid_sys->entries[i];
1500 memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
1501 cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
1502 cpuid_full->nent = nent + cpuid_hv->nent;
1505 vcpu_set_cpuid(vm, vcpuid, cpuid_full);
1508 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
1510 static struct kvm_cpuid2 *cpuid;
1512 cpuid = allocate_kvm_cpuid2();
1514 vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1519 unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
1521 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
1522 unsigned long ht_gfn, max_gfn, max_pfn;
1523 uint32_t eax, ebx, ecx, edx, max_ext_leaf;
1525 max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
1527 /* Avoid reserved HyperTransport region on AMD processors. */
1531 /* On parts with <40 physical address bits, the area is fully hidden */
1532 if (vm->pa_bits < 40)
1535 /* Before family 17h, the HyperTransport area is just below 1T. */
1536 ht_gfn = (1 << 28) - num_ht_pages;
1538 cpuid(&eax, &ebx, &ecx, &edx);
1539 if (x86_family(eax) < 0x17)
1543 * Otherwise it's at the top of the physical address space, possibly
1544 * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
1545 * the old conservative value if MAXPHYADDR is not enumerated.
1548 cpuid(&eax, &ebx, &ecx, &edx);
1550 if (max_ext_leaf < 0x80000008)
1554 cpuid(&eax, &ebx, &ecx, &edx);
1555 max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
1556 if (max_ext_leaf >= 0x8000001f) {
1558 cpuid(&eax, &ebx, &ecx, &edx);
1559 max_pfn >>= (ebx >> 6) & 0x3f;
1562 ht_gfn = max_pfn - num_ht_pages;
1564 return min(max_gfn, ht_gfn - 1);