1 // SPDX-License-Identifier: GPL-2.0
8 #define MMIO_GPA 0x100000000ull
10 static void guest_code(void)
12 (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
13 (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
18 static void guest_pf_handler(struct ex_regs *regs)
20 /* PFEC == RSVD | PRESENT (read, kernel). */
21 GUEST_ASSERT(regs->error_code == 0x9);
25 static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
27 u32 good_cpuid_val = *cpuid_reg;
34 vm = vm_create_default(VCPU_ID, 0, guest_code);
35 run = vcpu_state(vm, VCPU_ID);
37 /* Map 1gb page without a backing memlot. */
38 __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G);
40 r = _vcpu_run(vm, VCPU_ID);
42 /* Guest access to the 1gb page should trigger MMIO. */
43 TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
44 TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
45 "Unexpected exit reason: %u (%s), expected MMIO exit (1gb page w/o memslot)\n",
46 run->exit_reason, exit_reason_str(run->exit_reason));
48 TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len);
50 TEST_ASSERT(run->mmio.phys_addr == MMIO_GPA,
51 "Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr);
54 * Effect the CPUID change for the guest and re-enter the guest. Its
55 * access should now #PF due to the PAGE_SIZE bit being reserved or
56 * the resulting GPA being invalid. Note, kvm_get_supported_cpuid()
57 * returns the struct that contains the entry being modified. Eww.
59 *cpuid_reg = evil_cpuid_val;
60 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
63 * Add a dummy memslot to coerce KVM into bumping the MMIO generation.
64 * KVM does not "officially" support mucking with CPUID after KVM_RUN,
65 * and will incorrectly reuse MMIO SPTEs. Don't delete the memslot!
66 * KVM x86 zaps all shadow pages on memslot deletion.
68 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
69 MMIO_GPA << 1, 10, 1, 0);
71 /* Set up a #PF handler to eat the RSVD #PF and signal all done! */
72 vm_init_descriptor_tables(vm);
73 vcpu_init_descriptor_tables(vm, VCPU_ID);
74 vm_handle_exception(vm, PF_VECTOR, guest_pf_handler);
76 r = _vcpu_run(vm, VCPU_ID);
77 TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
79 cmd = get_ucall(vm, VCPU_ID, NULL);
80 TEST_ASSERT(cmd == UCALL_DONE,
81 "Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
82 exit_reason_str(run->exit_reason), cmd);
85 * Restore the happy CPUID value for the next test. Yes, changes are
86 * indeed persistent across VM destruction.
88 *cpuid_reg = good_cpuid_val;
93 int main(int argc, char *argv[])
95 struct kvm_cpuid_entry2 *entry;
99 * All tests are opt-in because TDP doesn't play nice with reserved #PF
100 * in the GVA->GPA translation. The hardware page walker doesn't let
101 * software change GBPAGES or MAXPHYADDR, and KVM doesn't manually walk
102 * the GVA on fault for performance reasons.
104 bool do_gbpages = false;
105 bool do_maxphyaddr = false;
107 setbuf(stdout, NULL);
109 while ((opt = getopt(argc, argv, "gm")) != -1) {
115 do_maxphyaddr = true;
119 printf("usage: %s [-g (GBPAGES)] [-m (MAXPHYADDR)]\n", argv[0]);
124 if (!do_gbpages && !do_maxphyaddr) {
125 print_skip("No sub-tests selected");
129 entry = kvm_get_supported_cpuid_entry(0x80000001);
130 if (!(entry->edx & CPUID_GBPAGES)) {
131 print_skip("1gb hugepages not supported");
136 pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
137 mmu_role_test(&entry->edx, entry->edx & ~CPUID_GBPAGES);
141 pr_info("Test MMIO after changing CPUID.MAXPHYADDR\n\n");
142 entry = kvm_get_supported_cpuid_entry(0x80000008);
143 mmu_role_test(&entry->eax, (entry->eax & ~0xff) | 0x20);