1 // SPDX-License-Identifier: GPL-2.0
3 * KVM dirty page logging test
5 * Copyright (C) 2018, Red Hat, Inc.
8 #define _GNU_SOURCE /* for program_invocation_name */
12 #include <linux/bitmap.h>
13 #include <linux/bitops.h>
15 #include "test_util.h"
17 #include "processor.h"
20 /* The memory slot index to track dirty pages */
21 #define TEST_MEM_SLOT_INDEX 1
22 #define TEST_MEM_PAGES 3
24 /* L1 guest test virtual memory offset */
25 #define GUEST_TEST_MEM 0xc0000000
27 /* L2 guest test virtual memory offset */
28 #define NESTED_TEST_MEM1 0xc0001000
29 #define NESTED_TEST_MEM2 0xc0002000
31 static void l2_guest_code(void)
33 *(volatile uint64_t *)NESTED_TEST_MEM1;
34 *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
38 *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
40 *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
44 /* Exit to L1 and never come back. */
48 void l1_guest_code(struct vmx_pages *vmx)
50 #define L2_GUEST_STACK_SIZE 64
51 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
53 GUEST_ASSERT(vmx->vmcs_gpa);
54 GUEST_ASSERT(prepare_for_vmx_operation(vmx));
55 GUEST_ASSERT(load_vmcs(vmx));
57 prepare_vmcs(vmx, l2_guest_code,
58 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
61 GUEST_ASSERT(!vmlaunch());
63 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
67 int main(int argc, char *argv[])
69 vm_vaddr_t vmx_pages_gva = 0;
70 struct vmx_pages *vmx;
72 uint64_t *host_test_mem;
74 struct kvm_vcpu *vcpu;
79 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
80 TEST_REQUIRE(kvm_cpu_has_ept());
83 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
84 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
85 vcpu_args_set(vcpu, 1, vmx_pages_gva);
87 /* Add an extra memory slot for testing dirty logging */
88 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
92 KVM_MEM_LOG_DIRTY_PAGES);
95 * Add an identity map for GVA range [0xc0000000, 0xc0002000). This
96 * affects both L1 and L2. However...
98 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
101 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
104 * Note that prepare_eptp should be called only L1's GPA map is done,
105 * meaning after the last call to virt_map.
107 prepare_eptp(vmx, vm, 0);
108 nested_map_memslot(vmx, vm, 0);
109 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
110 nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
112 bmap = bitmap_zalloc(TEST_MEM_PAGES);
113 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
116 memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
118 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
120 switch (get_ucall(vcpu, &uc)) {
122 REPORT_GUEST_ASSERT(uc);
126 * The nested guest wrote at offset 0x1000 in the memslot, but the
127 * dirty bitmap must be filled in according to L1 GPA, not L2.
129 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
131 TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean");
132 TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest");
134 TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty");
135 TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest");
138 TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty");
139 TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest");
140 TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty");
141 TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest");
147 TEST_FAIL("Unknown ucall %lu", uc.cmd);