1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020, Google LLC.
5 * Tests for exiting into userspace on registered MSRs
8 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include "test_util.h"
15 /* Forced emulation prefix, used to invoke the emulator unconditionally. */
16 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
17 #define KVM_FEP_LENGTH 5
18 static int fep_available = 1;
21 #define MSR_NON_EXISTENT 0x474f4f00
24 struct kvm_msr_filter filter = {
25 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
28 .flags = KVM_MSR_FILTER_READ |
31 /* Test an MSR the kernel knows about. */
33 .bitmap = (uint8_t*)&deny_bits,
35 .flags = KVM_MSR_FILTER_READ |
38 /* Test an MSR the kernel doesn't know about. */
39 .base = MSR_IA32_FLUSH_CMD,
40 .bitmap = (uint8_t*)&deny_bits,
42 .flags = KVM_MSR_FILTER_READ |
45 /* Test a fabricated MSR that no one knows about. */
46 .base = MSR_NON_EXISTENT,
47 .bitmap = (uint8_t*)&deny_bits,
52 struct kvm_msr_filter filter_fs = {
53 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
56 .flags = KVM_MSR_FILTER_READ |
60 .bitmap = (uint8_t*)&deny_bits,
65 struct kvm_msr_filter filter_gs = {
66 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
69 .flags = KVM_MSR_FILTER_READ |
73 .bitmap = (uint8_t*)&deny_bits,
78 uint64_t msr_non_existent_data;
79 int guest_exception_count;
82 * Note: Force test_rdmsr() to not be inlined to prevent the labels,
83 * rdmsr_start and rdmsr_end, from being defined multiple times.
85 static noinline uint64_t test_rdmsr(uint32_t msr)
89 guest_exception_count = 0;
91 __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
92 "=a"(a), "=d"(d) : "c"(msr) : "memory");
94 return a | ((uint64_t) d << 32);
98 * Note: Force test_wrmsr() to not be inlined to prevent the labels,
99 * wrmsr_start and wrmsr_end, from being defined multiple times.
101 static noinline void test_wrmsr(uint32_t msr, uint64_t value)
104 uint32_t d = value >> 32;
106 guest_exception_count = 0;
108 __asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
109 "a"(a), "d"(d), "c"(msr) : "memory");
112 extern char rdmsr_start, rdmsr_end;
113 extern char wrmsr_start, wrmsr_end;
116 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
117 * rdmsr_start and rdmsr_end, from being defined multiple times.
119 static noinline uint64_t test_em_rdmsr(uint32_t msr)
123 guest_exception_count = 0;
125 __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
126 "=a"(a), "=d"(d) : "c"(msr) : "memory");
128 return a | ((uint64_t) d << 32);
132 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
133 * wrmsr_start and wrmsr_end, from being defined multiple times.
135 static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
138 uint32_t d = value >> 32;
140 guest_exception_count = 0;
142 __asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
143 "a"(a), "d"(d), "c"(msr) : "memory");
146 extern char em_rdmsr_start, em_rdmsr_end;
147 extern char em_wrmsr_start, em_wrmsr_end;
149 static void guest_code(void)
154 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
156 * A GP is thrown if anything other than 0 is written to
159 data = test_rdmsr(MSR_IA32_XSS);
160 GUEST_ASSERT(data == 0);
161 GUEST_ASSERT(guest_exception_count == 0);
163 test_wrmsr(MSR_IA32_XSS, 0);
164 GUEST_ASSERT(guest_exception_count == 0);
166 test_wrmsr(MSR_IA32_XSS, 1);
167 GUEST_ASSERT(guest_exception_count == 1);
170 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
172 * A GP is thrown if MSR_IA32_FLUSH_CMD is read
173 * from or if a value other than 1 is written to it.
175 test_rdmsr(MSR_IA32_FLUSH_CMD);
176 GUEST_ASSERT(guest_exception_count == 1);
178 test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
179 GUEST_ASSERT(guest_exception_count == 1);
181 test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
182 GUEST_ASSERT(guest_exception_count == 0);
185 * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
187 * Test that a fabricated MSR can pass through the kernel
188 * and be handled in userspace.
190 test_wrmsr(MSR_NON_EXISTENT, 2);
191 GUEST_ASSERT(guest_exception_count == 0);
193 data = test_rdmsr(MSR_NON_EXISTENT);
194 GUEST_ASSERT(data == 2);
195 GUEST_ASSERT(guest_exception_count == 0);
198 * Test to see if the instruction emulator is available (ie: the module
199 * parameter 'kvm.force_emulation_prefix=1' is set). This instruction
200 * will #UD if it isn't available.
202 __asm__ __volatile__(KVM_FEP "nop");
205 /* Let userspace know we aren't done. */
209 * Now run the same tests with the instruction emulator.
211 data = test_em_rdmsr(MSR_IA32_XSS);
212 GUEST_ASSERT(data == 0);
213 GUEST_ASSERT(guest_exception_count == 0);
214 test_em_wrmsr(MSR_IA32_XSS, 0);
215 GUEST_ASSERT(guest_exception_count == 0);
216 test_em_wrmsr(MSR_IA32_XSS, 1);
217 GUEST_ASSERT(guest_exception_count == 1);
219 test_em_rdmsr(MSR_IA32_FLUSH_CMD);
220 GUEST_ASSERT(guest_exception_count == 1);
221 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
222 GUEST_ASSERT(guest_exception_count == 1);
223 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
224 GUEST_ASSERT(guest_exception_count == 0);
226 test_em_wrmsr(MSR_NON_EXISTENT, 2);
227 GUEST_ASSERT(guest_exception_count == 0);
228 data = test_em_rdmsr(MSR_NON_EXISTENT);
229 GUEST_ASSERT(data == 2);
230 GUEST_ASSERT(guest_exception_count == 0);
237 static void guest_code_permission_bitmap(void)
241 test_wrmsr(MSR_FS_BASE, 0);
242 data = test_rdmsr(MSR_FS_BASE);
243 GUEST_ASSERT(data == MSR_FS_BASE);
245 test_wrmsr(MSR_GS_BASE, 0);
246 data = test_rdmsr(MSR_GS_BASE);
247 GUEST_ASSERT(data == 0);
249 /* Let userspace know to switch the filter */
252 test_wrmsr(MSR_FS_BASE, 0);
253 data = test_rdmsr(MSR_FS_BASE);
254 GUEST_ASSERT(data == 0);
256 test_wrmsr(MSR_GS_BASE, 0);
257 data = test_rdmsr(MSR_GS_BASE);
258 GUEST_ASSERT(data == MSR_GS_BASE);
263 static void __guest_gp_handler(struct ex_regs *regs,
264 char *r_start, char *r_end,
265 char *w_start, char *w_end)
267 if (regs->rip == (uintptr_t)r_start) {
268 regs->rip = (uintptr_t)r_end;
271 } else if (regs->rip == (uintptr_t)w_start) {
272 regs->rip = (uintptr_t)w_end;
274 GUEST_ASSERT(!"RIP is at an unknown location!");
277 ++guest_exception_count;
280 static void guest_gp_handler(struct ex_regs *regs)
282 __guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
283 &wrmsr_start, &wrmsr_end);
286 static void guest_fep_gp_handler(struct ex_regs *regs)
288 __guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
289 &em_wrmsr_start, &em_wrmsr_end);
292 static void guest_ud_handler(struct ex_regs *regs)
295 regs->rip += KVM_FEP_LENGTH;
298 static void run_guest(struct kvm_vm *vm)
302 rc = _vcpu_run(vm, VCPU_ID);
303 TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
306 static void check_for_guest_assert(struct kvm_vm *vm)
308 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
311 if (run->exit_reason == KVM_EXIT_IO &&
312 get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
313 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
314 __FILE__, uc.args[1]);
318 static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
320 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
322 check_for_guest_assert(vm);
324 TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
325 "Unexpected exit reason: %u (%s),\n",
327 exit_reason_str(run->exit_reason));
328 TEST_ASSERT(run->msr.index == msr_index,
329 "Unexpected msr (0x%04x), expected 0x%04x",
330 run->msr.index, msr_index);
332 switch (run->msr.index) {
336 case MSR_IA32_FLUSH_CMD:
339 case MSR_NON_EXISTENT:
340 run->msr.data = msr_non_existent_data;
343 run->msr.data = MSR_FS_BASE;
346 run->msr.data = MSR_GS_BASE;
349 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
353 static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
355 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
357 check_for_guest_assert(vm);
359 TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
360 "Unexpected exit reason: %u (%s),\n",
362 exit_reason_str(run->exit_reason));
363 TEST_ASSERT(run->msr.index == msr_index,
364 "Unexpected msr (0x%04x), expected 0x%04x",
365 run->msr.index, msr_index);
367 switch (run->msr.index) {
369 if (run->msr.data != 0)
372 case MSR_IA32_FLUSH_CMD:
373 if (run->msr.data != 1)
376 case MSR_NON_EXISTENT:
377 msr_non_existent_data = run->msr.data;
383 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
387 static void process_ucall_done(struct kvm_vm *vm)
389 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
392 check_for_guest_assert(vm);
394 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
395 "Unexpected exit reason: %u (%s)",
397 exit_reason_str(run->exit_reason));
399 TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
400 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
404 static uint64_t process_ucall(struct kvm_vm *vm)
406 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
407 struct ucall uc = {};
409 check_for_guest_assert(vm);
411 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
412 "Unexpected exit reason: %u (%s)",
414 exit_reason_str(run->exit_reason));
416 switch (get_ucall(vm, VCPU_ID, &uc)) {
420 check_for_guest_assert(vm);
423 process_ucall_done(vm);
426 TEST_ASSERT(false, "Unexpected ucall");
432 static void run_guest_then_process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
435 process_rdmsr(vm, msr_index);
438 static void run_guest_then_process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
441 process_wrmsr(vm, msr_index);
444 static uint64_t run_guest_then_process_ucall(struct kvm_vm *vm)
447 return process_ucall(vm);
450 static void run_guest_then_process_ucall_done(struct kvm_vm *vm)
453 process_ucall_done(vm);
456 static void test_msr_filter(void) {
457 struct kvm_enable_cap cap = {
458 .cap = KVM_CAP_X86_USER_SPACE_MSR,
459 .args[0] = KVM_MSR_EXIT_REASON_FILTER,
465 vm = vm_create_default(VCPU_ID, 0, guest_code);
466 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
468 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
469 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
470 vm_enable_cap(vm, &cap);
472 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
473 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
475 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
477 vm_init_descriptor_tables(vm);
478 vcpu_init_descriptor_tables(vm, VCPU_ID);
480 vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
482 /* Process guest code userspace exits. */
483 run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
484 run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
485 run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
487 run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
488 run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
489 run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
491 run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
492 run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
494 vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
496 vm_handle_exception(vm, UD_VECTOR, NULL);
498 if (process_ucall(vm) != UCALL_DONE) {
499 vm_handle_exception(vm, GP_VECTOR, guest_fep_gp_handler);
501 /* Process emulated rdmsr and wrmsr instructions. */
502 run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
503 run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
504 run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
506 run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
507 run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
508 run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
510 run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
511 run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
513 /* Confirm the guest completed without issues. */
514 run_guest_then_process_ucall_done(vm);
516 printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
522 static void test_msr_permission_bitmap(void) {
523 struct kvm_enable_cap cap = {
524 .cap = KVM_CAP_X86_USER_SPACE_MSR,
525 .args[0] = KVM_MSR_EXIT_REASON_FILTER,
531 vm = vm_create_default(VCPU_ID, 0, guest_code_permission_bitmap);
532 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
534 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
535 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
536 vm_enable_cap(vm, &cap);
538 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
539 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
541 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
542 run_guest_then_process_wrmsr(vm, MSR_FS_BASE);
543 run_guest_then_process_rdmsr(vm, MSR_FS_BASE);
544 TEST_ASSERT(run_guest_then_process_ucall(vm) == UCALL_SYNC, "Expected ucall state to be UCALL_SYNC.");
545 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
546 run_guest_then_process_wrmsr(vm, MSR_GS_BASE);
547 run_guest_then_process_rdmsr(vm, MSR_GS_BASE);
548 run_guest_then_process_ucall_done(vm);
553 int main(int argc, char *argv[])
557 test_msr_permission_bitmap();