x86/sev-es: Add CPUID handling to #VC handler
[linux-2.6-microblaze.git] / arch / x86 / kernel / sev-es-shared.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AMD Encrypted Register State Support
4  *
5  * Author: Joerg Roedel <jroedel@suse.de>
6  *
7  * This file is not compiled stand-alone. It contains code shared
8  * between the pre-decompression boot code and the running Linux kernel
9  * and is included directly into both code-bases.
10  */
11
12 static void sev_es_terminate(unsigned int reason)
13 {
14         u64 val = GHCB_SEV_TERMINATE;
15
16         /*
17          * Tell the hypervisor what went wrong - only reason-set 0 is
18          * currently supported.
19          */
20         val |= GHCB_SEV_TERMINATE_REASON(0, reason);
21
22         /* Request Guest Termination from Hypvervisor */
23         sev_es_wr_ghcb_msr(val);
24         VMGEXIT();
25
26         while (true)
27                 asm volatile("hlt\n" : : : "memory");
28 }
29
30 static bool sev_es_negotiate_protocol(void)
31 {
32         u64 val;
33
34         /* Do the GHCB protocol version negotiation */
35         sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
36         VMGEXIT();
37         val = sev_es_rd_ghcb_msr();
38
39         if (GHCB_INFO(val) != GHCB_SEV_INFO)
40                 return false;
41
42         if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
43             GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
44                 return false;
45
46         return true;
47 }
48
49 static void vc_ghcb_invalidate(struct ghcb *ghcb)
50 {
51         memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
52 }
53
54 static bool vc_decoding_needed(unsigned long exit_code)
55 {
56         /* Exceptions don't require to decode the instruction */
57         return !(exit_code >= SVM_EXIT_EXCP_BASE &&
58                  exit_code <= SVM_EXIT_LAST_EXCP);
59 }
60
61 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
62                                       struct pt_regs *regs,
63                                       unsigned long exit_code)
64 {
65         enum es_result ret = ES_OK;
66
67         memset(ctxt, 0, sizeof(*ctxt));
68         ctxt->regs = regs;
69
70         if (vc_decoding_needed(exit_code))
71                 ret = vc_decode_insn(ctxt);
72
73         return ret;
74 }
75
76 static void vc_finish_insn(struct es_em_ctxt *ctxt)
77 {
78         ctxt->regs->ip += ctxt->insn.length;
79 }
80
81 static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
82                                           struct es_em_ctxt *ctxt,
83                                           u64 exit_code, u64 exit_info_1,
84                                           u64 exit_info_2)
85 {
86         enum es_result ret;
87
88         /* Fill in protocol and format specifiers */
89         ghcb->protocol_version = GHCB_PROTOCOL_MAX;
90         ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
91
92         ghcb_set_sw_exit_code(ghcb, exit_code);
93         ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
94         ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
95
96         sev_es_wr_ghcb_msr(__pa(ghcb));
97         VMGEXIT();
98
99         if ((ghcb->save.sw_exit_info_1 & 0xffffffff) == 1) {
100                 u64 info = ghcb->save.sw_exit_info_2;
101                 unsigned long v;
102
103                 info = ghcb->save.sw_exit_info_2;
104                 v = info & SVM_EVTINJ_VEC_MASK;
105
106                 /* Check if exception information from hypervisor is sane. */
107                 if ((info & SVM_EVTINJ_VALID) &&
108                     ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) &&
109                     ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
110                         ctxt->fi.vector = v;
111                         if (info & SVM_EVTINJ_VALID_ERR)
112                                 ctxt->fi.error_code = info >> 32;
113                         ret = ES_EXCEPTION;
114                 } else {
115                         ret = ES_VMM_ERROR;
116                 }
117         } else {
118                 ret = ES_OK;
119         }
120
121         return ret;
122 }
123
124 /*
125  * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
126  * page yet, so it only supports the MSR based communication with the
127  * hypervisor and only the CPUID exit-code.
128  */
129 void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
130 {
131         unsigned int fn = lower_bits(regs->ax, 32);
132         unsigned long val;
133
134         /* Only CPUID is supported via MSR protocol */
135         if (exit_code != SVM_EXIT_CPUID)
136                 goto fail;
137
138         sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
139         VMGEXIT();
140         val = sev_es_rd_ghcb_msr();
141         if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
142                 goto fail;
143         regs->ax = val >> 32;
144
145         sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
146         VMGEXIT();
147         val = sev_es_rd_ghcb_msr();
148         if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
149                 goto fail;
150         regs->bx = val >> 32;
151
152         sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
153         VMGEXIT();
154         val = sev_es_rd_ghcb_msr();
155         if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
156                 goto fail;
157         regs->cx = val >> 32;
158
159         sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
160         VMGEXIT();
161         val = sev_es_rd_ghcb_msr();
162         if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
163                 goto fail;
164         regs->dx = val >> 32;
165
166         /* Skip over the CPUID two-byte opcode */
167         regs->ip += 2;
168
169         return;
170
171 fail:
172         sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE);
173         VMGEXIT();
174
175         /* Shouldn't get here - if we do halt the machine */
176         while (true)
177                 asm volatile("hlt\n");
178 }
179
180 static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
181                                           void *src, char *buf,
182                                           unsigned int data_size,
183                                           unsigned int count,
184                                           bool backwards)
185 {
186         int i, b = backwards ? -1 : 1;
187         enum es_result ret = ES_OK;
188
189         for (i = 0; i < count; i++) {
190                 void *s = src + (i * data_size * b);
191                 char *d = buf + (i * data_size);
192
193                 ret = vc_read_mem(ctxt, s, d, data_size);
194                 if (ret != ES_OK)
195                         break;
196         }
197
198         return ret;
199 }
200
201 static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
202                                            void *dst, char *buf,
203                                            unsigned int data_size,
204                                            unsigned int count,
205                                            bool backwards)
206 {
207         int i, s = backwards ? -1 : 1;
208         enum es_result ret = ES_OK;
209
210         for (i = 0; i < count; i++) {
211                 void *d = dst + (i * data_size * s);
212                 char *b = buf + (i * data_size);
213
214                 ret = vc_write_mem(ctxt, d, b, data_size);
215                 if (ret != ES_OK)
216                         break;
217         }
218
219         return ret;
220 }
221
222 #define IOIO_TYPE_STR  BIT(2)
223 #define IOIO_TYPE_IN   1
224 #define IOIO_TYPE_INS  (IOIO_TYPE_IN | IOIO_TYPE_STR)
225 #define IOIO_TYPE_OUT  0
226 #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
227
228 #define IOIO_REP       BIT(3)
229
230 #define IOIO_ADDR_64   BIT(9)
231 #define IOIO_ADDR_32   BIT(8)
232 #define IOIO_ADDR_16   BIT(7)
233
234 #define IOIO_DATA_32   BIT(6)
235 #define IOIO_DATA_16   BIT(5)
236 #define IOIO_DATA_8    BIT(4)
237
238 #define IOIO_SEG_ES    (0 << 10)
239 #define IOIO_SEG_DS    (3 << 10)
240
241 static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
242 {
243         struct insn *insn = &ctxt->insn;
244         *exitinfo = 0;
245
246         switch (insn->opcode.bytes[0]) {
247         /* INS opcodes */
248         case 0x6c:
249         case 0x6d:
250                 *exitinfo |= IOIO_TYPE_INS;
251                 *exitinfo |= IOIO_SEG_ES;
252                 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
253                 break;
254
255         /* OUTS opcodes */
256         case 0x6e:
257         case 0x6f:
258                 *exitinfo |= IOIO_TYPE_OUTS;
259                 *exitinfo |= IOIO_SEG_DS;
260                 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
261                 break;
262
263         /* IN immediate opcodes */
264         case 0xe4:
265         case 0xe5:
266                 *exitinfo |= IOIO_TYPE_IN;
267                 *exitinfo |= (u64)insn->immediate.value << 16;
268                 break;
269
270         /* OUT immediate opcodes */
271         case 0xe6:
272         case 0xe7:
273                 *exitinfo |= IOIO_TYPE_OUT;
274                 *exitinfo |= (u64)insn->immediate.value << 16;
275                 break;
276
277         /* IN register opcodes */
278         case 0xec:
279         case 0xed:
280                 *exitinfo |= IOIO_TYPE_IN;
281                 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
282                 break;
283
284         /* OUT register opcodes */
285         case 0xee:
286         case 0xef:
287                 *exitinfo |= IOIO_TYPE_OUT;
288                 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
289                 break;
290
291         default:
292                 return ES_DECODE_FAILED;
293         }
294
295         switch (insn->opcode.bytes[0]) {
296         case 0x6c:
297         case 0x6e:
298         case 0xe4:
299         case 0xe6:
300         case 0xec:
301         case 0xee:
302                 /* Single byte opcodes */
303                 *exitinfo |= IOIO_DATA_8;
304                 break;
305         default:
306                 /* Length determined by instruction parsing */
307                 *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
308                                                      : IOIO_DATA_32;
309         }
310         switch (insn->addr_bytes) {
311         case 2:
312                 *exitinfo |= IOIO_ADDR_16;
313                 break;
314         case 4:
315                 *exitinfo |= IOIO_ADDR_32;
316                 break;
317         case 8:
318                 *exitinfo |= IOIO_ADDR_64;
319                 break;
320         }
321
322         if (insn_has_rep_prefix(insn))
323                 *exitinfo |= IOIO_REP;
324
325         return ES_OK;
326 }
327
328 static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
329 {
330         struct pt_regs *regs = ctxt->regs;
331         u64 exit_info_1, exit_info_2;
332         enum es_result ret;
333
334         ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
335         if (ret != ES_OK)
336                 return ret;
337
338         if (exit_info_1 & IOIO_TYPE_STR) {
339
340                 /* (REP) INS/OUTS */
341
342                 bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
343                 unsigned int io_bytes, exit_bytes;
344                 unsigned int ghcb_count, op_count;
345                 unsigned long es_base;
346                 u64 sw_scratch;
347
348                 /*
349                  * For the string variants with rep prefix the amount of in/out
350                  * operations per #VC exception is limited so that the kernel
351                  * has a chance to take interrupts and re-schedule while the
352                  * instruction is emulated.
353                  */
354                 io_bytes   = (exit_info_1 >> 4) & 0x7;
355                 ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
356
357                 op_count    = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
358                 exit_info_2 = min(op_count, ghcb_count);
359                 exit_bytes  = exit_info_2 * io_bytes;
360
361                 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
362
363                 /* Read bytes of OUTS into the shared buffer */
364                 if (!(exit_info_1 & IOIO_TYPE_IN)) {
365                         ret = vc_insn_string_read(ctxt,
366                                                (void *)(es_base + regs->si),
367                                                ghcb->shared_buffer, io_bytes,
368                                                exit_info_2, df);
369                         if (ret)
370                                 return ret;
371                 }
372
373                 /*
374                  * Issue an VMGEXIT to the HV to consume the bytes from the
375                  * shared buffer or to have it write them into the shared buffer
376                  * depending on the instruction: OUTS or INS.
377                  */
378                 sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
379                 ghcb_set_sw_scratch(ghcb, sw_scratch);
380                 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
381                                           exit_info_1, exit_info_2);
382                 if (ret != ES_OK)
383                         return ret;
384
385                 /* Read bytes from shared buffer into the guest's destination. */
386                 if (exit_info_1 & IOIO_TYPE_IN) {
387                         ret = vc_insn_string_write(ctxt,
388                                                    (void *)(es_base + regs->di),
389                                                    ghcb->shared_buffer, io_bytes,
390                                                    exit_info_2, df);
391                         if (ret)
392                                 return ret;
393
394                         if (df)
395                                 regs->di -= exit_bytes;
396                         else
397                                 regs->di += exit_bytes;
398                 } else {
399                         if (df)
400                                 regs->si -= exit_bytes;
401                         else
402                                 regs->si += exit_bytes;
403                 }
404
405                 if (exit_info_1 & IOIO_REP)
406                         regs->cx -= exit_info_2;
407
408                 ret = regs->cx ? ES_RETRY : ES_OK;
409
410         } else {
411
412                 /* IN/OUT into/from rAX */
413
414                 int bits = (exit_info_1 & 0x70) >> 1;
415                 u64 rax = 0;
416
417                 if (!(exit_info_1 & IOIO_TYPE_IN))
418                         rax = lower_bits(regs->ax, bits);
419
420                 ghcb_set_rax(ghcb, rax);
421
422                 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
423                 if (ret != ES_OK)
424                         return ret;
425
426                 if (exit_info_1 & IOIO_TYPE_IN) {
427                         if (!ghcb_rax_is_valid(ghcb))
428                                 return ES_VMM_ERROR;
429                         regs->ax = lower_bits(ghcb->save.rax, bits);
430                 }
431         }
432
433         return ret;
434 }
435
436 static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
437                                       struct es_em_ctxt *ctxt)
438 {
439         struct pt_regs *regs = ctxt->regs;
440         u32 cr4 = native_read_cr4();
441         enum es_result ret;
442
443         ghcb_set_rax(ghcb, regs->ax);
444         ghcb_set_rcx(ghcb, regs->cx);
445
446         if (cr4 & X86_CR4_OSXSAVE)
447                 /* Safe to read xcr0 */
448                 ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
449         else
450                 /* xgetbv will cause #GP - use reset value for xcr0 */
451                 ghcb_set_xcr0(ghcb, 1);
452
453         ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
454         if (ret != ES_OK)
455                 return ret;
456
457         if (!(ghcb_rax_is_valid(ghcb) &&
458               ghcb_rbx_is_valid(ghcb) &&
459               ghcb_rcx_is_valid(ghcb) &&
460               ghcb_rdx_is_valid(ghcb)))
461                 return ES_VMM_ERROR;
462
463         regs->ax = ghcb->save.rax;
464         regs->bx = ghcb->save.rbx;
465         regs->cx = ghcb->save.rcx;
466         regs->dx = ghcb->save.rdx;
467
468         return ES_OK;
469 }