selftests: kvm: Test MSR exiting to userspace
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / x86_64 / user_msr_test.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * tests for KVM_CAP_X86_USER_SPACE_MSR and KVM_X86_SET_MSR_FILTER
4  *
5  * Copyright (C) 2020, Amazon Inc.
6  *
7  * This is a functional test to verify that we can deflect MSR events
8  * into user space.
9  */
10 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/ioctl.h>
16
17 #include "test_util.h"
18
19 #include "kvm_util.h"
20 #include "processor.h"
21
22 #define VCPU_ID                  5
23
24 static u32 msr_reads, msr_writes;
25
26 static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
27 static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
28 static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
29 static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
30 static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
31 static u8 bitmap_deadbeef[1] = { 0x1 };
32
33 static void deny_msr(uint8_t *bitmap, u32 msr)
34 {
35         u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
36
37         bitmap[idx / 8] &= ~(1 << (idx % 8));
38 }
39
40 static void prepare_bitmaps(void)
41 {
42         memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
43         memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
44         memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
45         memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
46         memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
47
48         deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
49         deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
50         deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
51 }
52
53 struct kvm_msr_filter filter = {
54         .flags = KVM_MSR_FILTER_DEFAULT_DENY,
55         .ranges = {
56                 {
57                         .flags = KVM_MSR_FILTER_READ,
58                         .base = 0x00000000,
59                         .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
60                         .bitmap = bitmap_00000000,
61                 }, {
62                         .flags = KVM_MSR_FILTER_WRITE,
63                         .base = 0x00000000,
64                         .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
65                         .bitmap = bitmap_00000000_write,
66                 }, {
67                         .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
68                         .base = 0x40000000,
69                         .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
70                         .bitmap = bitmap_40000000,
71                 }, {
72                         .flags = KVM_MSR_FILTER_READ,
73                         .base = 0xc0000000,
74                         .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
75                         .bitmap = bitmap_c0000000_read,
76                 }, {
77                         .flags = KVM_MSR_FILTER_WRITE,
78                         .base = 0xc0000000,
79                         .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
80                         .bitmap = bitmap_c0000000,
81                 }, {
82                         .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
83                         .base = 0xdeadbeef,
84                         .nmsrs = 1,
85                         .bitmap = bitmap_deadbeef,
86                 },
87         },
88 };
89
90 struct kvm_msr_filter no_filter = {
91         .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
92 };
93
94 static void guest_msr_calls(bool trapped)
95 {
96         /* This goes into the in-kernel emulation */
97         wrmsr(MSR_SYSCALL_MASK, 0);
98
99         if (trapped) {
100                 /* This goes into user space emulation */
101                 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
102                 GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
103         } else {
104                 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
105                 GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
106         }
107
108         /* If trapped == true, this goes into user space emulation */
109         wrmsr(MSR_IA32_POWER_CTL, 0x1234);
110
111         /* This goes into the in-kernel emulation */
112         rdmsr(MSR_IA32_POWER_CTL);
113
114         /* Invalid MSR, should always be handled by user space exit */
115         GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
116         wrmsr(0xdeadbeef, 0x1234);
117 }
118
119 static void guest_code(void)
120 {
121         guest_msr_calls(true);
122
123         /*
124          * Disable msr filtering, so that the kernel
125          * handles everything in the next round
126          */
127         GUEST_SYNC(0);
128
129         guest_msr_calls(false);
130
131         GUEST_DONE();
132 }
133
134 static int handle_ucall(struct kvm_vm *vm)
135 {
136         struct ucall uc;
137
138         switch (get_ucall(vm, VCPU_ID, &uc)) {
139         case UCALL_ABORT:
140                 TEST_FAIL("Guest assertion not met");
141                 break;
142         case UCALL_SYNC:
143                 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter);
144                 break;
145         case UCALL_DONE:
146                 return 1;
147         default:
148                 TEST_FAIL("Unknown ucall %lu", uc.cmd);
149         }
150
151         return 0;
152 }
153
154 static void handle_rdmsr(struct kvm_run *run)
155 {
156         run->msr.data = run->msr.index;
157         msr_reads++;
158
159         if (run->msr.index == MSR_SYSCALL_MASK ||
160             run->msr.index == MSR_GS_BASE) {
161                 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
162                             "MSR read trap w/o access fault");
163         }
164
165         if (run->msr.index == 0xdeadbeef) {
166                 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
167                             "MSR deadbeef read trap w/o inval fault");
168         }
169 }
170
171 static void handle_wrmsr(struct kvm_run *run)
172 {
173         /* ignore */
174         msr_writes++;
175
176         if (run->msr.index == MSR_IA32_POWER_CTL) {
177                 TEST_ASSERT(run->msr.data == 0x1234,
178                             "MSR data for MSR_IA32_POWER_CTL incorrect");
179                 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
180                             "MSR_IA32_POWER_CTL trap w/o access fault");
181         }
182
183         if (run->msr.index == 0xdeadbeef) {
184                 TEST_ASSERT(run->msr.data == 0x1234,
185                             "MSR data for deadbeef incorrect");
186                 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
187                             "deadbeef trap w/o inval fault");
188         }
189 }
190
191 int main(int argc, char *argv[])
192 {
193         struct kvm_enable_cap cap = {
194                 .cap = KVM_CAP_X86_USER_SPACE_MSR,
195                 .args[0] = KVM_MSR_EXIT_REASON_INVAL |
196                            KVM_MSR_EXIT_REASON_UNKNOWN |
197                            KVM_MSR_EXIT_REASON_FILTER,
198         };
199         struct kvm_vm *vm;
200         struct kvm_run *run;
201         int rc;
202
203         /* Tell stdout not to buffer its content */
204         setbuf(stdout, NULL);
205
206         /* Create VM */
207         vm = vm_create_default(VCPU_ID, 0, guest_code);
208         run = vcpu_state(vm, VCPU_ID);
209
210         rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
211         if (!rc) {
212                 print_skip("KVM_CAP_X86_USER_SPACE_MSR not supported");
213                 exit(KSFT_SKIP);
214         }
215
216         vm_enable_cap(vm, &cap);
217
218         rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
219         TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
220
221         prepare_bitmaps();
222         vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
223
224         while (1) {
225                 rc = _vcpu_run(vm, VCPU_ID);
226
227                 TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
228
229                 switch (run->exit_reason) {
230                 case KVM_EXIT_X86_RDMSR:
231                         handle_rdmsr(run);
232                         break;
233                 case KVM_EXIT_X86_WRMSR:
234                         handle_wrmsr(run);
235                         break;
236                 case KVM_EXIT_IO:
237                         if (handle_ucall(vm))
238                                 goto done;
239                         break;
240                 }
241
242         }
243
244 done:
245         TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
246         TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
247
248         kvm_vm_free(vm);
249
250         return 0;
251 }