Linux 6.9-rc1
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / x86_64 / vmx_dirty_log_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM dirty page logging test
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7
8 #define _GNU_SOURCE /* for program_invocation_name */
9
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <linux/bitmap.h>
13 #include <linux/bitops.h>
14
15 #include "test_util.h"
16 #include "kvm_util.h"
17 #include "processor.h"
18 #include "vmx.h"
19
20 /* The memory slot index to track dirty pages */
21 #define TEST_MEM_SLOT_INDEX             1
22 #define TEST_MEM_PAGES                  3
23
24 /* L1 guest test virtual memory offset */
25 #define GUEST_TEST_MEM                  0xc0000000
26
27 /* L2 guest test virtual memory offset */
28 #define NESTED_TEST_MEM1                0xc0001000
29 #define NESTED_TEST_MEM2                0xc0002000
30
31 static void l2_guest_code(void)
32 {
33         *(volatile uint64_t *)NESTED_TEST_MEM1;
34         *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
35         GUEST_SYNC(true);
36         GUEST_SYNC(false);
37
38         *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
39         GUEST_SYNC(true);
40         *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
41         GUEST_SYNC(true);
42         GUEST_SYNC(false);
43
44         /* Exit to L1 and never come back.  */
45         vmcall();
46 }
47
48 void l1_guest_code(struct vmx_pages *vmx)
49 {
50 #define L2_GUEST_STACK_SIZE 64
51         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
52
53         GUEST_ASSERT(vmx->vmcs_gpa);
54         GUEST_ASSERT(prepare_for_vmx_operation(vmx));
55         GUEST_ASSERT(load_vmcs(vmx));
56
57         prepare_vmcs(vmx, l2_guest_code,
58                      &l2_guest_stack[L2_GUEST_STACK_SIZE]);
59
60         GUEST_SYNC(false);
61         GUEST_ASSERT(!vmlaunch());
62         GUEST_SYNC(false);
63         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
64         GUEST_DONE();
65 }
66
67 int main(int argc, char *argv[])
68 {
69         vm_vaddr_t vmx_pages_gva = 0;
70         struct vmx_pages *vmx;
71         unsigned long *bmap;
72         uint64_t *host_test_mem;
73
74         struct kvm_vcpu *vcpu;
75         struct kvm_vm *vm;
76         struct ucall uc;
77         bool done = false;
78
79         TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
80         TEST_REQUIRE(kvm_cpu_has_ept());
81
82         /* Create VM */
83         vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
84         vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
85         vcpu_args_set(vcpu, 1, vmx_pages_gva);
86
87         /* Add an extra memory slot for testing dirty logging */
88         vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
89                                     GUEST_TEST_MEM,
90                                     TEST_MEM_SLOT_INDEX,
91                                     TEST_MEM_PAGES,
92                                     KVM_MEM_LOG_DIRTY_PAGES);
93
94         /*
95          * Add an identity map for GVA range [0xc0000000, 0xc0002000).  This
96          * affects both L1 and L2.  However...
97          */
98         virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
99
100         /*
101          * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
102          * 0xc0000000.
103          *
104          * Note that prepare_eptp should be called only L1's GPA map is done,
105          * meaning after the last call to virt_map.
106          */
107         prepare_eptp(vmx, vm, 0);
108         nested_map_memslot(vmx, vm, 0);
109         nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
110         nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
111
112         bmap = bitmap_zalloc(TEST_MEM_PAGES);
113         host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
114
115         while (!done) {
116                 memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
117                 vcpu_run(vcpu);
118                 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
119
120                 switch (get_ucall(vcpu, &uc)) {
121                 case UCALL_ABORT:
122                         REPORT_GUEST_ASSERT(uc);
123                         /* NOT REACHED */
124                 case UCALL_SYNC:
125                         /*
126                          * The nested guest wrote at offset 0x1000 in the memslot, but the
127                          * dirty bitmap must be filled in according to L1 GPA, not L2.
128                          */
129                         kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
130                         if (uc.args[1]) {
131                                 TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean");
132                                 TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest");
133                         } else {
134                                 TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty");
135                                 TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest");
136                         }
137
138                         TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty");
139                         TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest");
140                         TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty");
141                         TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest");
142                         break;
143                 case UCALL_DONE:
144                         done = true;
145                         break;
146                 default:
147                         TEST_FAIL("Unknown ucall %lu", uc.cmd);
148                 }
149         }
150 }