1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Memory Encryption Support
5 * Copyright (C) 2019 SUSE
7 * Author: Joerg Roedel <jroedel@suse.de>
10 #define pr_fmt(fmt) "SEV-ES: " fmt
12 #include <linux/sched/debug.h> /* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/mem_encrypt.h>
15 #include <linux/lockdep.h>
16 #include <linux/printk.h>
17 #include <linux/mm_types.h>
18 #include <linux/set_memory.h>
19 #include <linux/memblock.h>
20 #include <linux/kernel.h>
23 #include <asm/cpu_entry_area.h>
24 #include <asm/stacktrace.h>
26 #include <asm/insn-eval.h>
27 #include <asm/fpu/internal.h>
28 #include <asm/processor.h>
29 #include <asm/realmode.h>
30 #include <asm/traps.h>
35 #define DR7_RESET_VALUE 0x400
37 /* For early boot hypervisor communication in SEV-ES enabled guests */
38 static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
41 * Needs to be in the .data section because we need it NULL before bss is
44 static struct ghcb __initdata *boot_ghcb;
46 /* #VC handler runtime per-CPU data */
47 struct sev_es_runtime_data {
48 struct ghcb ghcb_page;
50 /* Physical storage for the per-CPU IST stack of the #VC handler */
51 char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
54 * Physical storage for the per-CPU fall-back stack of the #VC handler.
55 * The fall-back stack is used when it is not safe to switch back to the
56 * interrupted stack in the #VC entry code.
58 char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
61 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
62 * It is needed when an NMI happens while the #VC handler uses the real
63 * GHCB, and the NMI handler itself is causing another #VC exception. In
64 * that case the GHCB content of the first handler needs to be backed up
67 struct ghcb backup_ghcb;
70 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
71 * There is no need for it to be atomic, because nothing is written to
72 * the GHCB between the read and the write of ghcb_active. So it is safe
73 * to use it when a nested #VC exception happens before the write.
75 * This is necessary for example in the #VC->NMI->#VC case when the NMI
76 * happens while the first #VC handler uses the GHCB. When the NMI code
77 * raises a second #VC handler it might overwrite the contents of the
78 * GHCB written by the first handler. To avoid this the content of the
79 * GHCB is saved and restored when the GHCB is detected to be in use
83 bool backup_ghcb_active;
86 * Cached DR7 value - write it on DR7 writes and return it on reads.
87 * That value will never make it to the real hardware DR7 as debugging
88 * is currently unsupported in SEV-ES guests.
97 static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
98 DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
100 /* Needed in vc_early_forward_exception */
101 void do_early_exception(struct pt_regs *regs, int trapnr);
103 static void __init setup_vc_stacks(int cpu)
105 struct sev_es_runtime_data *data;
106 struct cpu_entry_area *cea;
110 data = per_cpu(runtime_data, cpu);
111 cea = get_cpu_entry_area(cpu);
113 /* Map #VC IST stack */
114 vaddr = CEA_ESTACK_BOT(&cea->estacks, VC);
115 pa = __pa(data->ist_stack);
116 cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
118 /* Map VC fall-back stack */
119 vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2);
120 pa = __pa(data->fallback_stack);
121 cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
124 static __always_inline bool on_vc_stack(struct pt_regs *regs)
126 unsigned long sp = regs->sp;
128 /* User-mode RSP is not trusted */
132 /* SYSCALL gap still has user-mode RSP */
133 if (ip_within_syscall_gap(regs))
136 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
140 * This function handles the case when an NMI is raised in the #VC
141 * exception handler entry code, before the #VC handler has switched off
142 * its IST stack. In this case, the IST entry for #VC must be adjusted,
143 * so that any nested #VC exception will not overwrite the stack
144 * contents of the interrupted #VC handler.
146 * The IST entry is adjusted unconditionally so that it can be also be
147 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
148 * nested sev_es_ist_exit() call may adjust back the IST entry too
151 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
152 * on the NMI IST stack, as they are only called from NMI handling code
155 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
157 unsigned long old_ist, new_ist;
159 /* Read old IST entry */
160 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
163 * If NMI happened while on the #VC IST stack, set the new IST
164 * value below regs->sp, so that the interrupted stack frame is
165 * not overwritten by subsequent #VC exceptions.
167 if (on_vc_stack(regs))
171 * Reserve additional 8 bytes and store old IST value so this
172 * adjustment can be unrolled in __sev_es_ist_exit().
174 new_ist -= sizeof(old_ist);
175 *(unsigned long *)new_ist = old_ist;
177 /* Set new IST entry */
178 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
181 void noinstr __sev_es_ist_exit(void)
186 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
188 if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
191 /* Read back old IST entry and write it to the TSS */
192 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
195 static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
197 struct sev_es_runtime_data *data;
200 data = this_cpu_read(runtime_data);
201 ghcb = &data->ghcb_page;
203 if (unlikely(data->ghcb_active)) {
204 /* GHCB is already in use - save its contents */
206 if (unlikely(data->backup_ghcb_active))
209 /* Mark backup_ghcb active before writing to it */
210 data->backup_ghcb_active = true;
212 state->ghcb = &data->backup_ghcb;
214 /* Backup GHCB content */
215 *state->ghcb = *ghcb;
218 data->ghcb_active = true;
224 /* Needed in vc_early_forward_exception */
225 void do_early_exception(struct pt_regs *regs, int trapnr);
227 static inline u64 sev_es_rd_ghcb_msr(void)
229 return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
232 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
237 high = (u32)(val >> 32);
239 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
242 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
243 unsigned char *buffer)
245 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
248 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
250 char buffer[MAX_INSN_SIZE];
253 res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
255 ctxt->fi.vector = X86_TRAP_PF;
256 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
257 ctxt->fi.cr2 = ctxt->regs->ip;
261 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res))
262 return ES_DECODE_FAILED;
264 if (ctxt->insn.immediate.got)
267 return ES_DECODE_FAILED;
270 static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
272 char buffer[MAX_INSN_SIZE];
275 res = vc_fetch_insn_kernel(ctxt, buffer);
277 ctxt->fi.vector = X86_TRAP_PF;
278 ctxt->fi.error_code = X86_PF_INSTR;
279 ctxt->fi.cr2 = ctxt->regs->ip;
283 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
285 return ES_DECODE_FAILED;
290 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
292 if (user_mode(ctxt->regs))
293 return __vc_decode_user_insn(ctxt);
295 return __vc_decode_kern_insn(ctxt);
298 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
299 char *dst, char *buf, size_t size)
301 unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
302 char __user *target = (char __user *)dst;
308 /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
309 if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
310 memcpy(dst, buf, size);
317 if (put_user(d1, target))
322 if (put_user(d2, target))
327 if (put_user(d4, target))
332 if (put_user(d8, target))
336 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
337 return ES_UNSUPPORTED;
343 if (user_mode(ctxt->regs))
344 error_code |= X86_PF_USER;
346 ctxt->fi.vector = X86_TRAP_PF;
347 ctxt->fi.error_code = error_code;
348 ctxt->fi.cr2 = (unsigned long)dst;
353 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
354 char *src, char *buf, size_t size)
356 unsigned long error_code = X86_PF_PROT;
357 char __user *s = (char __user *)src;
363 /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
364 if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
365 memcpy(buf, src, size);
391 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
392 return ES_UNSUPPORTED;
398 if (user_mode(ctxt->regs))
399 error_code |= X86_PF_USER;
401 ctxt->fi.vector = X86_TRAP_PF;
402 ctxt->fi.error_code = error_code;
403 ctxt->fi.cr2 = (unsigned long)src;
408 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
409 unsigned long vaddr, phys_addr_t *paddr)
411 unsigned long va = (unsigned long)vaddr;
417 pgd = __va(read_cr3_pa());
418 pgd = &pgd[pgd_index(va)];
419 pte = lookup_address_in_pgd(pgd, va, &level);
421 ctxt->fi.vector = X86_TRAP_PF;
422 ctxt->fi.cr2 = vaddr;
423 ctxt->fi.error_code = 0;
425 if (user_mode(ctxt->regs))
426 ctxt->fi.error_code |= X86_PF_USER;
431 if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
432 /* Emulated MMIO to/from encrypted memory not supported */
433 return ES_UNSUPPORTED;
435 pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
436 pa |= va & ~page_level_mask(level);
443 /* Include code shared with pre-decompression boot stage */
444 #include "sev-shared.c"
446 static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
448 struct sev_es_runtime_data *data;
451 data = this_cpu_read(runtime_data);
452 ghcb = &data->ghcb_page;
455 /* Restore GHCB from Backup */
456 *ghcb = *state->ghcb;
457 data->backup_ghcb_active = false;
461 * Invalidate the GHCB so a VMGEXIT instruction issued
462 * from userspace won't appear to be valid.
464 vc_ghcb_invalidate(ghcb);
465 data->ghcb_active = false;
469 void noinstr __sev_es_nmi_complete(void)
471 struct ghcb_state state;
474 ghcb = sev_es_get_ghcb(&state);
476 vc_ghcb_invalidate(ghcb);
477 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
478 ghcb_set_sw_exit_info_1(ghcb, 0);
479 ghcb_set_sw_exit_info_2(ghcb, 0);
481 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
484 sev_es_put_ghcb(&state);
487 static u64 get_jump_table_addr(void)
489 struct ghcb_state state;
494 local_irq_save(flags);
496 ghcb = sev_es_get_ghcb(&state);
498 vc_ghcb_invalidate(ghcb);
499 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
500 ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
501 ghcb_set_sw_exit_info_2(ghcb, 0);
503 sev_es_wr_ghcb_msr(__pa(ghcb));
506 if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
507 ghcb_sw_exit_info_2_is_valid(ghcb))
508 ret = ghcb->save.sw_exit_info_2;
510 sev_es_put_ghcb(&state);
512 local_irq_restore(flags);
517 int sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
519 u16 startup_cs, startup_ip;
520 phys_addr_t jump_table_pa;
522 u16 __iomem *jump_table;
524 jump_table_addr = get_jump_table_addr();
526 /* On UP guests there is no jump table so this is not a failure */
527 if (!jump_table_addr)
530 /* Check if AP Jump Table is page-aligned */
531 if (jump_table_addr & ~PAGE_MASK)
534 jump_table_pa = jump_table_addr & PAGE_MASK;
536 startup_cs = (u16)(rmh->trampoline_start >> 4);
537 startup_ip = (u16)(rmh->sev_es_trampoline_start -
538 rmh->trampoline_start);
540 jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
544 writew(startup_ip, &jump_table[0]);
545 writew(startup_cs, &jump_table[1]);
553 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
554 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
555 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
557 int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
559 struct sev_es_runtime_data *data;
560 unsigned long address, pflags;
564 if (!sev_es_active())
567 pflags = _PAGE_NX | _PAGE_RW;
569 for_each_possible_cpu(cpu) {
570 data = per_cpu(runtime_data, cpu);
572 address = __pa(&data->ghcb_page);
573 pfn = address >> PAGE_SHIFT;
575 if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
582 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
584 struct pt_regs *regs = ctxt->regs;
589 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
591 ghcb_set_rcx(ghcb, regs->cx);
593 ghcb_set_rax(ghcb, regs->ax);
594 ghcb_set_rdx(ghcb, regs->dx);
597 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
599 if ((ret == ES_OK) && (!exit_info_1)) {
600 regs->ax = ghcb->save.rax;
601 regs->dx = ghcb->save.rdx;
608 * This function runs on the first #VC exception after the kernel
609 * switched to virtual addresses.
611 static bool __init sev_es_setup_ghcb(void)
613 /* First make sure the hypervisor talks a supported protocol. */
614 if (!sev_es_negotiate_protocol())
618 * Clear the boot_ghcb. The first exception comes in before the bss
619 * section is cleared.
621 memset(&boot_ghcb_page, 0, PAGE_SIZE);
623 /* Alright - Make the boot-ghcb public */
624 boot_ghcb = &boot_ghcb_page;
629 #ifdef CONFIG_HOTPLUG_CPU
630 static void sev_es_ap_hlt_loop(void)
632 struct ghcb_state state;
635 ghcb = sev_es_get_ghcb(&state);
638 vc_ghcb_invalidate(ghcb);
639 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
640 ghcb_set_sw_exit_info_1(ghcb, 0);
641 ghcb_set_sw_exit_info_2(ghcb, 0);
643 sev_es_wr_ghcb_msr(__pa(ghcb));
647 if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
648 ghcb->save.sw_exit_info_2)
652 sev_es_put_ghcb(&state);
656 * Play_dead handler when running under SEV-ES. This is needed because
657 * the hypervisor can't deliver an SIPI request to restart the AP.
658 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
659 * hypervisor wakes it up again.
661 static void sev_es_play_dead(void)
665 /* IRQs now disabled */
667 sev_es_ap_hlt_loop();
670 * If we get here, the VCPU was woken up again. Jump to CPU
671 * startup code to get it back online.
675 #else /* CONFIG_HOTPLUG_CPU */
676 #define sev_es_play_dead native_play_dead
677 #endif /* CONFIG_HOTPLUG_CPU */
680 static void __init sev_es_setup_play_dead(void)
682 smp_ops.play_dead = sev_es_play_dead;
685 static inline void sev_es_setup_play_dead(void) { }
688 static void __init alloc_runtime_data(int cpu)
690 struct sev_es_runtime_data *data;
692 data = memblock_alloc(sizeof(*data), PAGE_SIZE);
694 panic("Can't allocate SEV-ES runtime data");
696 per_cpu(runtime_data, cpu) = data;
699 static void __init init_ghcb(int cpu)
701 struct sev_es_runtime_data *data;
704 data = per_cpu(runtime_data, cpu);
706 err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
707 sizeof(data->ghcb_page));
709 panic("Can't map GHCBs unencrypted");
711 memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
713 data->ghcb_active = false;
714 data->backup_ghcb_active = false;
717 void __init sev_es_init_vc_handling(void)
721 BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
723 if (!sev_es_active())
726 if (!sev_es_check_cpu_features())
727 panic("SEV-ES CPU Features missing");
729 /* Enable SEV-ES special handling */
730 static_branch_enable(&sev_es_enable_key);
732 /* Initialize per-cpu GHCB pages */
733 for_each_possible_cpu(cpu) {
734 alloc_runtime_data(cpu);
736 setup_vc_stacks(cpu);
739 sev_es_setup_play_dead();
741 /* Secondary CPUs use the runtime #VC handler */
742 initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
745 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
747 int trapnr = ctxt->fi.vector;
749 if (trapnr == X86_TRAP_PF)
750 native_write_cr2(ctxt->fi.cr2);
752 ctxt->regs->orig_ax = ctxt->fi.error_code;
753 do_early_exception(ctxt->regs, trapnr);
756 static long *vc_insn_get_reg(struct es_em_ctxt *ctxt)
761 reg_array = (long *)ctxt->regs;
762 offset = insn_get_modrm_reg_off(&ctxt->insn, ctxt->regs);
767 offset /= sizeof(long);
769 return reg_array + offset;
772 static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
777 reg_array = (long *)ctxt->regs;
778 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
783 offset /= sizeof(long);
785 return reg_array + offset;
787 static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
788 unsigned int bytes, bool read)
790 u64 exit_code, exit_info_1, exit_info_2;
791 unsigned long ghcb_pa = __pa(ghcb);
796 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
797 if (ref == (void __user *)-1L)
798 return ES_UNSUPPORTED;
800 exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
802 res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
804 if (res == ES_EXCEPTION && !read)
805 ctxt->fi.error_code |= X86_PF_WRITE;
811 /* Can never be greater than 8 */
814 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
816 return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
819 static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb,
820 struct es_em_ctxt *ctxt)
822 struct insn *insn = &ctxt->insn;
823 unsigned int bytes = 0;
828 switch (insn->opcode.bytes[1]) {
829 /* MMIO Read w/ zero-extension */
837 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
841 /* Zero extend based on operand size */
842 reg_data = vc_insn_get_reg(ctxt);
844 return ES_DECODE_FAILED;
846 memset(reg_data, 0, insn->opnd_bytes);
848 memcpy(reg_data, ghcb->shared_buffer, bytes);
851 /* MMIO Read w/ sign-extension */
859 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
863 /* Sign extend based on operand size */
864 reg_data = vc_insn_get_reg(ctxt);
866 return ES_DECODE_FAILED;
869 u8 *val = (u8 *)ghcb->shared_buffer;
871 sign_byte = (*val & 0x80) ? 0xff : 0x00;
873 u16 *val = (u16 *)ghcb->shared_buffer;
875 sign_byte = (*val & 0x8000) ? 0xff : 0x00;
877 memset(reg_data, sign_byte, insn->opnd_bytes);
879 memcpy(reg_data, ghcb->shared_buffer, bytes);
883 ret = ES_UNSUPPORTED;
890 * The MOVS instruction has two memory operands, which raises the
891 * problem that it is not known whether the access to the source or the
892 * destination caused the #VC exception (and hence whether an MMIO read
893 * or write operation needs to be emulated).
895 * Instead of playing games with walking page-tables and trying to guess
896 * whether the source or destination is an MMIO range, split the move
897 * into two operations, a read and a write with only one memory operand.
898 * This will cause a nested #VC exception on the MMIO address which can
901 * This implementation has the benefit that it also supports MOVS where
902 * source _and_ destination are MMIO regions.
904 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
905 * rare operation. If it turns out to be a performance problem the split
906 * operations can be moved to memcpy_fromio() and memcpy_toio().
908 static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
911 unsigned long ds_base, es_base;
912 unsigned char *src, *dst;
913 unsigned char buffer[8];
918 ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
919 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
921 if (ds_base == -1L || es_base == -1L) {
922 ctxt->fi.vector = X86_TRAP_GP;
923 ctxt->fi.error_code = 0;
927 src = ds_base + (unsigned char *)ctxt->regs->si;
928 dst = es_base + (unsigned char *)ctxt->regs->di;
930 ret = vc_read_mem(ctxt, src, buffer, bytes);
934 ret = vc_write_mem(ctxt, dst, buffer, bytes);
938 if (ctxt->regs->flags & X86_EFLAGS_DF)
943 ctxt->regs->si += off;
944 ctxt->regs->di += off;
946 rep = insn_has_rep_prefix(&ctxt->insn);
950 if (!rep || ctxt->regs->cx == 0)
956 static enum es_result vc_handle_mmio(struct ghcb *ghcb,
957 struct es_em_ctxt *ctxt)
959 struct insn *insn = &ctxt->insn;
960 unsigned int bytes = 0;
964 switch (insn->opcode.bytes[0]) {
971 bytes = insn->opnd_bytes;
973 reg_data = vc_insn_get_reg(ctxt);
975 return ES_DECODE_FAILED;
977 memcpy(ghcb->shared_buffer, reg_data, bytes);
979 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
987 bytes = insn->opnd_bytes;
989 memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
991 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1000 bytes = insn->opnd_bytes;
1002 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1006 reg_data = vc_insn_get_reg(ctxt);
1008 return ES_DECODE_FAILED;
1010 /* Zero-extend for 32-bit operation */
1014 memcpy(reg_data, ghcb->shared_buffer, bytes);
1017 /* MOVS instruction */
1023 bytes = insn->opnd_bytes;
1025 ret = vc_handle_mmio_movs(ctxt, bytes);
1027 /* Two-Byte Opcodes */
1029 ret = vc_handle_mmio_twobyte_ops(ghcb, ctxt);
1032 ret = ES_UNSUPPORTED;
1038 static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1039 struct es_em_ctxt *ctxt)
1041 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1042 long val, *reg = vc_insn_get_rm(ctxt);
1046 return ES_DECODE_FAILED;
1050 /* Upper 32 bits must be written as zeroes */
1052 ctxt->fi.vector = X86_TRAP_GP;
1053 ctxt->fi.error_code = 0;
1054 return ES_EXCEPTION;
1057 /* Clear out other reserved bits and set bit 10 */
1058 val = (val & 0xffff23ffL) | BIT(10);
1060 /* Early non-zero writes to DR7 are not supported */
1061 if (!data && (val & ~DR7_RESET_VALUE))
1062 return ES_UNSUPPORTED;
1064 /* Using a value of 0 for ExitInfo1 means RAX holds the value */
1065 ghcb_set_rax(ghcb, val);
1066 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1076 static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1077 struct es_em_ctxt *ctxt)
1079 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1080 long *reg = vc_insn_get_rm(ctxt);
1083 return ES_DECODE_FAILED;
1088 *reg = DR7_RESET_VALUE;
1093 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1094 struct es_em_ctxt *ctxt)
1096 return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1099 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1103 ghcb_set_rcx(ghcb, ctxt->regs->cx);
1105 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1109 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1110 return ES_VMM_ERROR;
1112 ctxt->regs->ax = ghcb->save.rax;
1113 ctxt->regs->dx = ghcb->save.rdx;
1118 static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1119 struct es_em_ctxt *ctxt)
1122 * Treat it as a NOP and do not leak a physical address to the
1128 static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1129 struct es_em_ctxt *ctxt)
1131 /* Treat the same as MONITOR/MONITORX */
1135 static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1136 struct es_em_ctxt *ctxt)
1140 ghcb_set_rax(ghcb, ctxt->regs->ax);
1141 ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1143 if (x86_platform.hyper.sev_es_hcall_prepare)
1144 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1146 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1150 if (!ghcb_rax_is_valid(ghcb))
1151 return ES_VMM_ERROR;
1153 ctxt->regs->ax = ghcb->save.rax;
1156 * Call sev_es_hcall_finish() after regs->ax is already set.
1157 * This allows the hypervisor handler to overwrite it again if
1160 if (x86_platform.hyper.sev_es_hcall_finish &&
1161 !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1162 return ES_VMM_ERROR;
1167 static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1168 struct es_em_ctxt *ctxt)
1171 * Calling ecx_alignment_check() directly does not work, because it
1172 * enables IRQs and the GHCB is active. Forward the exception and call
1173 * it later from vc_forward_exception().
1175 ctxt->fi.vector = X86_TRAP_AC;
1176 ctxt->fi.error_code = 0;
1177 return ES_EXCEPTION;
1180 static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
1182 if (user_mode(regs))
1183 noist_exc_debug(regs);
1188 static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1190 unsigned long exit_code)
1192 enum es_result result;
1194 switch (exit_code) {
1195 case SVM_EXIT_READ_DR7:
1196 result = vc_handle_dr7_read(ghcb, ctxt);
1198 case SVM_EXIT_WRITE_DR7:
1199 result = vc_handle_dr7_write(ghcb, ctxt);
1201 case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1202 result = vc_handle_trap_ac(ghcb, ctxt);
1204 case SVM_EXIT_RDTSC:
1205 case SVM_EXIT_RDTSCP:
1206 result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1208 case SVM_EXIT_RDPMC:
1209 result = vc_handle_rdpmc(ghcb, ctxt);
1212 pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1213 result = ES_UNSUPPORTED;
1215 case SVM_EXIT_CPUID:
1216 result = vc_handle_cpuid(ghcb, ctxt);
1219 result = vc_handle_ioio(ghcb, ctxt);
1222 result = vc_handle_msr(ghcb, ctxt);
1224 case SVM_EXIT_VMMCALL:
1225 result = vc_handle_vmmcall(ghcb, ctxt);
1227 case SVM_EXIT_WBINVD:
1228 result = vc_handle_wbinvd(ghcb, ctxt);
1230 case SVM_EXIT_MONITOR:
1231 result = vc_handle_monitor(ghcb, ctxt);
1233 case SVM_EXIT_MWAIT:
1234 result = vc_handle_mwait(ghcb, ctxt);
1237 result = vc_handle_mmio(ghcb, ctxt);
1241 * Unexpected #VC exception
1243 result = ES_UNSUPPORTED;
1249 static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1251 long error_code = ctxt->fi.error_code;
1252 int trapnr = ctxt->fi.vector;
1254 ctxt->regs->orig_ax = ctxt->fi.error_code;
1258 exc_general_protection(ctxt->regs, error_code);
1261 exc_invalid_op(ctxt->regs);
1264 exc_alignment_check(ctxt->regs, error_code);
1267 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1272 static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
1274 unsigned long sp = (unsigned long)regs;
1276 return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1280 * Main #VC exception handler. It is called when the entry code was able to
1281 * switch off the IST to a safe kernel stack.
1283 * With the current implementation it is always possible to switch to a safe
1284 * stack because #VC exceptions only happen at known places, like intercepted
1285 * instructions or accesses to MMIO areas/IO ports. They can also happen with
1286 * code instrumentation when the hypervisor intercepts #DB, but the critical
1287 * paths are forbidden to be instrumented, so #DB exceptions currently also
1288 * only happen in safe places.
1290 DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
1292 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1293 irqentry_state_t irq_state;
1294 struct ghcb_state state;
1295 struct es_em_ctxt ctxt;
1296 enum es_result result;
1300 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1302 if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
1303 vc_handle_trap_db(regs);
1307 irq_state = irqentry_nmi_enter(regs);
1308 lockdep_assert_irqs_disabled();
1309 instrumentation_begin();
1312 * This is invoked through an interrupt gate, so IRQs are disabled. The
1313 * code below might walk page-tables for user or kernel addresses, so
1314 * keep the IRQs disabled to protect us against concurrent TLB flushes.
1317 ghcb = sev_es_get_ghcb(&state);
1320 * Mark GHCBs inactive so that panic() is able to print the
1323 data->ghcb_active = false;
1324 data->backup_ghcb_active = false;
1326 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
1329 vc_ghcb_invalidate(ghcb);
1330 result = vc_init_em_ctxt(&ctxt, regs, error_code);
1332 if (result == ES_OK)
1333 result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1335 sev_es_put_ghcb(&state);
1337 /* Done - now check the result */
1340 vc_finish_insn(&ctxt);
1342 case ES_UNSUPPORTED:
1343 pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1344 error_code, regs->ip);
1347 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1348 error_code, regs->ip);
1350 case ES_DECODE_FAILED:
1351 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1352 error_code, regs->ip);
1355 vc_forward_exception(&ctxt);
1361 pr_emerg("Unknown result in %s():%d\n", __func__, result);
1363 * Emulating the instruction which caused the #VC exception
1364 * failed - can't continue so print debug information
1370 instrumentation_end();
1371 irqentry_nmi_exit(regs, irq_state);
1376 if (user_mode(regs)) {
1378 * Do not kill the machine if user-space triggered the
1379 * exception. Send SIGBUS instead and let user-space deal with
1382 force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
1384 pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
1387 /* Show some debug info */
1390 /* Ask hypervisor to sev_es_terminate */
1391 sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
1393 /* If that fails and we get here - just panic */
1394 panic("Returned from Terminate-Request to Hypervisor\n");
1400 /* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
1401 DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
1403 instrumentation_begin();
1404 panic("Can't handle #VC exception from unsupported context\n");
1405 instrumentation_end();
1408 DEFINE_IDTENTRY_VC(exc_vmm_communication)
1410 if (likely(!on_vc_fallback_stack(regs)))
1411 safe_stack_exc_vmm_communication(regs, error_code);
1413 ist_exc_vmm_communication(regs, error_code);
1416 bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
1418 unsigned long exit_code = regs->orig_ax;
1419 struct es_em_ctxt ctxt;
1420 enum es_result result;
1422 /* Do initial setup or terminate the guest */
1423 if (unlikely(boot_ghcb == NULL && !sev_es_setup_ghcb()))
1424 sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
1426 vc_ghcb_invalidate(boot_ghcb);
1428 result = vc_init_em_ctxt(&ctxt, regs, exit_code);
1429 if (result == ES_OK)
1430 result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
1432 /* Done - now check the result */
1435 vc_finish_insn(&ctxt);
1437 case ES_UNSUPPORTED:
1438 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1439 exit_code, regs->ip);
1442 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1443 exit_code, regs->ip);
1445 case ES_DECODE_FAILED:
1446 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1447 exit_code, regs->ip);
1450 vc_early_forward_exception(&ctxt);