e8b6918cdea0fe74aaedc0809832cbde051b88a5
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / x86_64 / userspace_msr_exit_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020, Google LLC.
4  *
5  * Tests for exiting into userspace on registered MSRs
6  */
7
8 #define _GNU_SOURCE /* for program_invocation_short_name */
9 #include <sys/ioctl.h>
10
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "vmx.h"
14
15 /* Forced emulation prefix, used to invoke the emulator unconditionally. */
16 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
17 #define KVM_FEP_LENGTH 5
18 static int fep_available = 1;
19
20 #define VCPU_ID       1
21 #define MSR_NON_EXISTENT 0x474f4f00
22
23 u64 deny_bits = 0;
24 struct kvm_msr_filter filter = {
25         .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
26         .ranges = {
27                 {
28                         .flags = KVM_MSR_FILTER_READ |
29                                  KVM_MSR_FILTER_WRITE,
30                         .nmsrs = 1,
31                         /* Test an MSR the kernel knows about. */
32                         .base = MSR_IA32_XSS,
33                         .bitmap = (uint8_t*)&deny_bits,
34                 }, {
35                         .flags = KVM_MSR_FILTER_READ |
36                                  KVM_MSR_FILTER_WRITE,
37                         .nmsrs = 1,
38                         /* Test an MSR the kernel doesn't know about. */
39                         .base = MSR_IA32_FLUSH_CMD,
40                         .bitmap = (uint8_t*)&deny_bits,
41                 }, {
42                         .flags = KVM_MSR_FILTER_READ |
43                                  KVM_MSR_FILTER_WRITE,
44                         .nmsrs = 1,
45                         /* Test a fabricated MSR that no one knows about. */
46                         .base = MSR_NON_EXISTENT,
47                         .bitmap = (uint8_t*)&deny_bits,
48                 },
49         },
50 };
51
52 struct kvm_msr_filter filter_fs = {
53         .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
54         .ranges = {
55                 {
56                         .flags = KVM_MSR_FILTER_READ |
57                                  KVM_MSR_FILTER_WRITE,
58                         .nmsrs = 1,
59                         .base = MSR_FS_BASE,
60                         .bitmap = (uint8_t*)&deny_bits,
61                 },
62         },
63 };
64
65 struct kvm_msr_filter filter_gs = {
66         .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
67         .ranges = {
68                 {
69                         .flags = KVM_MSR_FILTER_READ |
70                                  KVM_MSR_FILTER_WRITE,
71                         .nmsrs = 1,
72                         .base = MSR_GS_BASE,
73                         .bitmap = (uint8_t*)&deny_bits,
74                 },
75         },
76 };
77
78 uint64_t msr_non_existent_data;
79 int guest_exception_count;
80
81 /*
82  * Note: Force test_rdmsr() to not be inlined to prevent the labels,
83  * rdmsr_start and rdmsr_end, from being defined multiple times.
84  */
85 static noinline uint64_t test_rdmsr(uint32_t msr)
86 {
87         uint32_t a, d;
88
89         guest_exception_count = 0;
90
91         __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
92                         "=a"(a), "=d"(d) : "c"(msr) : "memory");
93
94         return a | ((uint64_t) d << 32);
95 }
96
97 /*
98  * Note: Force test_wrmsr() to not be inlined to prevent the labels,
99  * wrmsr_start and wrmsr_end, from being defined multiple times.
100  */
101 static noinline void test_wrmsr(uint32_t msr, uint64_t value)
102 {
103         uint32_t a = value;
104         uint32_t d = value >> 32;
105
106         guest_exception_count = 0;
107
108         __asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
109                         "a"(a), "d"(d), "c"(msr) : "memory");
110 }
111
112 extern char rdmsr_start, rdmsr_end;
113 extern char wrmsr_start, wrmsr_end;
114
115 /*
116  * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
117  * rdmsr_start and rdmsr_end, from being defined multiple times.
118  */
119 static noinline uint64_t test_em_rdmsr(uint32_t msr)
120 {
121         uint32_t a, d;
122
123         guest_exception_count = 0;
124
125         __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
126                         "=a"(a), "=d"(d) : "c"(msr) : "memory");
127
128         return a | ((uint64_t) d << 32);
129 }
130
131 /*
132  * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
133  * wrmsr_start and wrmsr_end, from being defined multiple times.
134  */
135 static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
136 {
137         uint32_t a = value;
138         uint32_t d = value >> 32;
139
140         guest_exception_count = 0;
141
142         __asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
143                         "a"(a), "d"(d), "c"(msr) : "memory");
144 }
145
146 extern char em_rdmsr_start, em_rdmsr_end;
147 extern char em_wrmsr_start, em_wrmsr_end;
148
149 static void guest_code(void)
150 {
151         uint64_t data;
152
153         /*
154          * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
155          *
156          * A GP is thrown if anything other than 0 is written to
157          * MSR_IA32_XSS.
158          */
159         data = test_rdmsr(MSR_IA32_XSS);
160         GUEST_ASSERT(data == 0);
161         GUEST_ASSERT(guest_exception_count == 0);
162
163         test_wrmsr(MSR_IA32_XSS, 0);
164         GUEST_ASSERT(guest_exception_count == 0);
165
166         test_wrmsr(MSR_IA32_XSS, 1);
167         GUEST_ASSERT(guest_exception_count == 1);
168
169         /*
170          * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
171          *
172          * A GP is thrown if MSR_IA32_FLUSH_CMD is read
173          * from or if a value other than 1 is written to it.
174          */
175         test_rdmsr(MSR_IA32_FLUSH_CMD);
176         GUEST_ASSERT(guest_exception_count == 1);
177
178         test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
179         GUEST_ASSERT(guest_exception_count == 1);
180
181         test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
182         GUEST_ASSERT(guest_exception_count == 0);
183
184         /*
185          * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
186          *
187          * Test that a fabricated MSR can pass through the kernel
188          * and be handled in userspace.
189          */
190         test_wrmsr(MSR_NON_EXISTENT, 2);
191         GUEST_ASSERT(guest_exception_count == 0);
192
193         data = test_rdmsr(MSR_NON_EXISTENT);
194         GUEST_ASSERT(data == 2);
195         GUEST_ASSERT(guest_exception_count == 0);
196
197         /*
198          * Test to see if the instruction emulator is available (ie: the module
199          * parameter 'kvm.force_emulation_prefix=1' is set).  This instruction
200          * will #UD if it isn't available.
201          */
202         __asm__ __volatile__(KVM_FEP "nop");
203
204         if (fep_available) {
205                 /* Let userspace know we aren't done. */
206                 GUEST_SYNC(0);
207
208                 /*
209                  * Now run the same tests with the instruction emulator.
210                  */
211                 data = test_em_rdmsr(MSR_IA32_XSS);
212                 GUEST_ASSERT(data == 0);
213                 GUEST_ASSERT(guest_exception_count == 0);
214                 test_em_wrmsr(MSR_IA32_XSS, 0);
215                 GUEST_ASSERT(guest_exception_count == 0);
216                 test_em_wrmsr(MSR_IA32_XSS, 1);
217                 GUEST_ASSERT(guest_exception_count == 1);
218
219                 test_em_rdmsr(MSR_IA32_FLUSH_CMD);
220                 GUEST_ASSERT(guest_exception_count == 1);
221                 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
222                 GUEST_ASSERT(guest_exception_count == 1);
223                 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
224                 GUEST_ASSERT(guest_exception_count == 0);
225
226                 test_em_wrmsr(MSR_NON_EXISTENT, 2);
227                 GUEST_ASSERT(guest_exception_count == 0);
228                 data = test_em_rdmsr(MSR_NON_EXISTENT);
229                 GUEST_ASSERT(data == 2);
230                 GUEST_ASSERT(guest_exception_count == 0);
231         }
232
233         GUEST_DONE();
234 }
235
236
237 static void guest_code_permission_bitmap(void)
238 {
239         uint64_t data;
240
241         test_wrmsr(MSR_FS_BASE, 0);
242         data = test_rdmsr(MSR_FS_BASE);
243         GUEST_ASSERT(data == MSR_FS_BASE);
244
245         test_wrmsr(MSR_GS_BASE, 0);
246         data = test_rdmsr(MSR_GS_BASE);
247         GUEST_ASSERT(data == 0);
248
249         /* Let userspace know to switch the filter */
250         GUEST_SYNC(0);
251
252         test_wrmsr(MSR_FS_BASE, 0);
253         data = test_rdmsr(MSR_FS_BASE);
254         GUEST_ASSERT(data == 0);
255
256         test_wrmsr(MSR_GS_BASE, 0);
257         data = test_rdmsr(MSR_GS_BASE);
258         GUEST_ASSERT(data == MSR_GS_BASE);
259
260         GUEST_DONE();
261 }
262
263 static void __guest_gp_handler(struct ex_regs *regs,
264                                char *r_start, char *r_end,
265                                char *w_start, char *w_end)
266 {
267         if (regs->rip == (uintptr_t)r_start) {
268                 regs->rip = (uintptr_t)r_end;
269                 regs->rax = 0;
270                 regs->rdx = 0;
271         } else if (regs->rip == (uintptr_t)w_start) {
272                 regs->rip = (uintptr_t)w_end;
273         } else {
274                 GUEST_ASSERT(!"RIP is at an unknown location!");
275         }
276
277         ++guest_exception_count;
278 }
279
280 static void guest_gp_handler(struct ex_regs *regs)
281 {
282         __guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
283                            &wrmsr_start, &wrmsr_end);
284 }
285
286 static void guest_fep_gp_handler(struct ex_regs *regs)
287 {
288         __guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
289                            &em_wrmsr_start, &em_wrmsr_end);
290 }
291
292 static void guest_ud_handler(struct ex_regs *regs)
293 {
294         fep_available = 0;
295         regs->rip += KVM_FEP_LENGTH;
296 }
297
298 static void run_guest(struct kvm_vm *vm)
299 {
300         int rc;
301
302         rc = _vcpu_run(vm, VCPU_ID);
303         TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
304 }
305
306 static void check_for_guest_assert(struct kvm_vm *vm)
307 {
308         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
309         struct ucall uc;
310
311         if (run->exit_reason == KVM_EXIT_IO &&
312                 get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
313                         TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
314                                 __FILE__, uc.args[1]);
315         }
316 }
317
318 static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
319 {
320         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
321
322         check_for_guest_assert(vm);
323
324         TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
325                     "Unexpected exit reason: %u (%s),\n",
326                     run->exit_reason,
327                     exit_reason_str(run->exit_reason));
328         TEST_ASSERT(run->msr.index == msr_index,
329                         "Unexpected msr (0x%04x), expected 0x%04x",
330                         run->msr.index, msr_index);
331
332         switch (run->msr.index) {
333         case MSR_IA32_XSS:
334                 run->msr.data = 0;
335                 break;
336         case MSR_IA32_FLUSH_CMD:
337                 run->msr.error = 1;
338                 break;
339         case MSR_NON_EXISTENT:
340                 run->msr.data = msr_non_existent_data;
341                 break;
342         case MSR_FS_BASE:
343                 run->msr.data = MSR_FS_BASE;
344                 break;
345         case MSR_GS_BASE:
346                 run->msr.data = MSR_GS_BASE;
347                 break;
348         default:
349                 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
350         }
351 }
352
353 static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
354 {
355         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
356
357         check_for_guest_assert(vm);
358
359         TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
360                     "Unexpected exit reason: %u (%s),\n",
361                     run->exit_reason,
362                     exit_reason_str(run->exit_reason));
363         TEST_ASSERT(run->msr.index == msr_index,
364                         "Unexpected msr (0x%04x), expected 0x%04x",
365                         run->msr.index, msr_index);
366
367         switch (run->msr.index) {
368         case MSR_IA32_XSS:
369                 if (run->msr.data != 0)
370                         run->msr.error = 1;
371                 break;
372         case MSR_IA32_FLUSH_CMD:
373                 if (run->msr.data != 1)
374                         run->msr.error = 1;
375                 break;
376         case MSR_NON_EXISTENT:
377                 msr_non_existent_data = run->msr.data;
378                 break;
379         case MSR_FS_BASE:
380         case MSR_GS_BASE:
381                 break;
382         default:
383                 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
384         }
385 }
386
387 static void process_ucall_done(struct kvm_vm *vm)
388 {
389         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
390         struct ucall uc;
391
392         check_for_guest_assert(vm);
393
394         TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
395                     "Unexpected exit reason: %u (%s)",
396                     run->exit_reason,
397                     exit_reason_str(run->exit_reason));
398
399         TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
400                     "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
401                     uc.cmd, UCALL_DONE);
402 }
403
404 static uint64_t process_ucall(struct kvm_vm *vm)
405 {
406         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
407         struct ucall uc = {};
408
409         check_for_guest_assert(vm);
410
411         TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
412                     "Unexpected exit reason: %u (%s)",
413                     run->exit_reason,
414                     exit_reason_str(run->exit_reason));
415
416         switch (get_ucall(vm, VCPU_ID, &uc)) {
417         case UCALL_SYNC:
418                 break;
419         case UCALL_ABORT:
420                 check_for_guest_assert(vm);
421                 break;
422         case UCALL_DONE:
423                 process_ucall_done(vm);
424                 break;
425         default:
426                 TEST_ASSERT(false, "Unexpected ucall");
427         }
428
429         return uc.cmd;
430 }
431
432 static void run_guest_then_process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
433 {
434         run_guest(vm);
435         process_rdmsr(vm, msr_index);
436 }
437
438 static void run_guest_then_process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
439 {
440         run_guest(vm);
441         process_wrmsr(vm, msr_index);
442 }
443
444 static uint64_t run_guest_then_process_ucall(struct kvm_vm *vm)
445 {
446         run_guest(vm);
447         return process_ucall(vm);
448 }
449
450 static void run_guest_then_process_ucall_done(struct kvm_vm *vm)
451 {
452         run_guest(vm);
453         process_ucall_done(vm);
454 }
455
456 static void test_msr_filter(void) {
457         struct kvm_enable_cap cap = {
458                 .cap = KVM_CAP_X86_USER_SPACE_MSR,
459                 .args[0] = KVM_MSR_EXIT_REASON_FILTER,
460         };
461         struct kvm_vm *vm;
462         int rc;
463
464         /* Create VM */
465         vm = vm_create_default(VCPU_ID, 0, guest_code);
466         vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
467
468         rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
469         TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
470         vm_enable_cap(vm, &cap);
471
472         rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
473         TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
474
475         vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
476
477         vm_init_descriptor_tables(vm);
478         vcpu_init_descriptor_tables(vm, VCPU_ID);
479
480         vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
481
482         /* Process guest code userspace exits. */
483         run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
484         run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
485         run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
486
487         run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
488         run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
489         run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
490
491         run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
492         run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
493
494         vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
495         run_guest(vm);
496         vm_handle_exception(vm, UD_VECTOR, NULL);
497
498         if (process_ucall(vm) != UCALL_DONE) {
499                 vm_handle_exception(vm, GP_VECTOR, guest_fep_gp_handler);
500
501                 /* Process emulated rdmsr and wrmsr instructions. */
502                 run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
503                 run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
504                 run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
505
506                 run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
507                 run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
508                 run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
509
510                 run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
511                 run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
512
513                 /* Confirm the guest completed without issues. */
514                 run_guest_then_process_ucall_done(vm);
515         } else {
516                 printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
517         }
518
519         kvm_vm_free(vm);
520 }
521
522 static void test_msr_permission_bitmap(void) {
523         struct kvm_enable_cap cap = {
524                 .cap = KVM_CAP_X86_USER_SPACE_MSR,
525                 .args[0] = KVM_MSR_EXIT_REASON_FILTER,
526         };
527         struct kvm_vm *vm;
528         int rc;
529
530         /* Create VM */
531         vm = vm_create_default(VCPU_ID, 0, guest_code_permission_bitmap);
532         vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
533
534         rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
535         TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
536         vm_enable_cap(vm, &cap);
537
538         rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
539         TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
540
541         vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
542         run_guest_then_process_wrmsr(vm, MSR_FS_BASE);
543         run_guest_then_process_rdmsr(vm, MSR_FS_BASE);
544         TEST_ASSERT(run_guest_then_process_ucall(vm) == UCALL_SYNC, "Expected ucall state to be UCALL_SYNC.");
545         vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
546         run_guest_then_process_wrmsr(vm, MSR_GS_BASE);
547         run_guest_then_process_rdmsr(vm, MSR_GS_BASE);
548         run_guest_then_process_ucall_done(vm);
549
550         kvm_vm_free(vm);
551 }
552
553 int main(int argc, char *argv[])
554 {
555         test_msr_filter();
556
557         test_msr_permission_bitmap();
558
559         return 0;
560 }