Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / x86_64 / smm_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018, Red Hat, Inc.
4  *
5  * Tests for SMM.
6  */
7 #define _GNU_SOURCE /* for program_invocation_short_name */
8 #include <fcntl.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <stdint.h>
12 #include <string.h>
13 #include <sys/ioctl.h>
14
15 #include "test_util.h"
16
17 #include "kvm_util.h"
18
19 #include "vmx.h"
20 #include "svm_util.h"
21
22 #define VCPU_ID       1
23
24 #define PAGE_SIZE  4096
25
26 #define SMRAM_SIZE 65536
27 #define SMRAM_MEMSLOT ((1 << 16) | 1)
28 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
29 #define SMRAM_GPA 0x1000000
30 #define SMRAM_STAGE 0xfe
31
32 #define STR(x) #x
33 #define XSTR(s) STR(s)
34
35 #define SYNC_PORT 0xe
36 #define DONE 0xff
37
38 /*
39  * This is compiled as normal 64-bit code, however, SMI handler is executed
40  * in real-address mode. To stay simple we're limiting ourselves to a mode
41  * independent subset of asm here.
42  * SMI handler always report back fixed stage SMRAM_STAGE.
43  */
44 uint8_t smi_handler[] = {
45         0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
46         0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
47         0x0f, 0xaa,           /* rsm */
48 };
49
50 static inline void sync_with_host(uint64_t phase)
51 {
52         asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
53                      : "+a" (phase));
54 }
55
56 static void self_smi(void)
57 {
58         x2apic_write_reg(APIC_ICR,
59                          APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
60 }
61
62 static void l2_guest_code(void)
63 {
64         sync_with_host(8);
65
66         sync_with_host(10);
67
68         vmcall();
69 }
70
71 static void guest_code(void *arg)
72 {
73         #define L2_GUEST_STACK_SIZE 64
74         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
75         uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
76         struct svm_test_data *svm = arg;
77         struct vmx_pages *vmx_pages = arg;
78
79         sync_with_host(1);
80
81         wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
82
83         sync_with_host(2);
84
85         self_smi();
86
87         sync_with_host(4);
88
89         if (arg) {
90                 if (cpu_has_svm()) {
91                         generic_svm_setup(svm, l2_guest_code,
92                                           &l2_guest_stack[L2_GUEST_STACK_SIZE]);
93                 } else {
94                         GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
95                         GUEST_ASSERT(load_vmcs(vmx_pages));
96                         prepare_vmcs(vmx_pages, l2_guest_code,
97                                      &l2_guest_stack[L2_GUEST_STACK_SIZE]);
98                 }
99
100                 sync_with_host(5);
101
102                 self_smi();
103
104                 sync_with_host(7);
105
106                 if (cpu_has_svm()) {
107                         run_guest(svm->vmcb, svm->vmcb_gpa);
108                         svm->vmcb->save.rip += 3;
109                         run_guest(svm->vmcb, svm->vmcb_gpa);
110                 } else {
111                         vmlaunch();
112                         vmresume();
113                 }
114
115                 /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
116                 sync_with_host(12);
117         }
118
119         sync_with_host(DONE);
120 }
121
122 void inject_smi(struct kvm_vm *vm)
123 {
124         struct kvm_vcpu_events events;
125
126         vcpu_events_get(vm, VCPU_ID, &events);
127
128         events.smi.pending = 1;
129         events.flags |= KVM_VCPUEVENT_VALID_SMM;
130
131         vcpu_events_set(vm, VCPU_ID, &events);
132 }
133
134 int main(int argc, char *argv[])
135 {
136         vm_vaddr_t nested_gva = 0;
137
138         struct kvm_regs regs;
139         struct kvm_vm *vm;
140         struct kvm_run *run;
141         struct kvm_x86_state *state;
142         int stage, stage_reported;
143
144         /* Create VM */
145         vm = vm_create_default(VCPU_ID, 0, guest_code);
146
147         run = vcpu_state(vm, VCPU_ID);
148
149         vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
150                                     SMRAM_MEMSLOT, SMRAM_PAGES, 0);
151         TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
152                     == SMRAM_GPA, "could not allocate guest physical addresses?");
153
154         memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
155         memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
156                sizeof(smi_handler));
157
158         vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
159
160         if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
161                 if (nested_svm_supported())
162                         vcpu_alloc_svm(vm, &nested_gva);
163                 else if (nested_vmx_supported())
164                         vcpu_alloc_vmx(vm, &nested_gva);
165         }
166
167         if (!nested_gva)
168                 pr_info("will skip SMM test with VMX enabled\n");
169
170         vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
171
172         for (stage = 1;; stage++) {
173                 _vcpu_run(vm, VCPU_ID);
174                 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
175                             "Stage %d: unexpected exit reason: %u (%s),\n",
176                             stage, run->exit_reason,
177                             exit_reason_str(run->exit_reason));
178
179                 memset(&regs, 0, sizeof(regs));
180                 vcpu_regs_get(vm, VCPU_ID, &regs);
181
182                 stage_reported = regs.rax & 0xff;
183
184                 if (stage_reported == DONE)
185                         goto done;
186
187                 TEST_ASSERT(stage_reported == stage ||
188                             stage_reported == SMRAM_STAGE,
189                             "Unexpected stage: #%x, got %x",
190                             stage, stage_reported);
191
192                 /*
193                  * Enter SMM during L2 execution and check that we correctly
194                  * return from it. Do not perform save/restore while in SMM yet.
195                  */
196                 if (stage == 8) {
197                         inject_smi(vm);
198                         continue;
199                 }
200
201                 /*
202                  * Perform save/restore while the guest is in SMM triggered
203                  * during L2 execution.
204                  */
205                 if (stage == 10)
206                         inject_smi(vm);
207
208                 state = vcpu_save_state(vm, VCPU_ID);
209                 kvm_vm_release(vm);
210                 kvm_vm_restart(vm, O_RDWR);
211                 vm_vcpu_add(vm, VCPU_ID);
212                 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
213                 vcpu_load_state(vm, VCPU_ID, state);
214                 run = vcpu_state(vm, VCPU_ID);
215                 free(state);
216         }
217
218 done:
219         kvm_vm_free(vm);
220 }