1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Memory Encryption Support
5 * Copyright (C) 2019 SUSE
7 * Author: Joerg Roedel <jroedel@suse.de>
10 #define pr_fmt(fmt) "SEV: " fmt
12 #include <linux/sched/debug.h> /* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/cc_platform.h>
15 #include <linux/printk.h>
16 #include <linux/mm_types.h>
17 #include <linux/set_memory.h>
18 #include <linux/memblock.h>
19 #include <linux/kernel.h>
21 #include <linux/cpumask.h>
22 #include <linux/efi.h>
23 #include <linux/platform_device.h>
26 #include <asm/cpu_entry_area.h>
27 #include <asm/stacktrace.h>
29 #include <asm/insn-eval.h>
30 #include <asm/fpu/xcr.h>
31 #include <asm/processor.h>
32 #include <asm/realmode.h>
33 #include <asm/setup.h>
34 #include <asm/traps.h>
39 #include <asm/cpuid.h>
40 #include <asm/cmdline.h>
42 #define DR7_RESET_VALUE 0x400
44 /* AP INIT values as documented in the APM2 section "Processor Initialization State" */
45 #define AP_INIT_CS_LIMIT 0xffff
46 #define AP_INIT_DS_LIMIT 0xffff
47 #define AP_INIT_LDTR_LIMIT 0xffff
48 #define AP_INIT_GDTR_LIMIT 0xffff
49 #define AP_INIT_IDTR_LIMIT 0xffff
50 #define AP_INIT_TR_LIMIT 0xffff
51 #define AP_INIT_RFLAGS_DEFAULT 0x2
52 #define AP_INIT_DR6_DEFAULT 0xffff0ff0
53 #define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
54 #define AP_INIT_XCR0_DEFAULT 0x1
55 #define AP_INIT_X87_FTW_DEFAULT 0x5555
56 #define AP_INIT_X87_FCW_DEFAULT 0x0040
57 #define AP_INIT_CR0_DEFAULT 0x60000010
58 #define AP_INIT_MXCSR_DEFAULT 0x1f80
60 /* For early boot hypervisor communication in SEV-ES enabled guests */
61 static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
64 * Needs to be in the .data section because we need it NULL before bss is
67 static struct ghcb *boot_ghcb __section(".data");
69 /* Bitmap of SEV features supported by the hypervisor */
70 static u64 sev_hv_features __ro_after_init;
72 /* #VC handler runtime per-CPU data */
73 struct sev_es_runtime_data {
74 struct ghcb ghcb_page;
77 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
78 * It is needed when an NMI happens while the #VC handler uses the real
79 * GHCB, and the NMI handler itself is causing another #VC exception. In
80 * that case the GHCB content of the first handler needs to be backed up
83 struct ghcb backup_ghcb;
86 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
87 * There is no need for it to be atomic, because nothing is written to
88 * the GHCB between the read and the write of ghcb_active. So it is safe
89 * to use it when a nested #VC exception happens before the write.
91 * This is necessary for example in the #VC->NMI->#VC case when the NMI
92 * happens while the first #VC handler uses the GHCB. When the NMI code
93 * raises a second #VC handler it might overwrite the contents of the
94 * GHCB written by the first handler. To avoid this the content of the
95 * GHCB is saved and restored when the GHCB is detected to be in use
99 bool backup_ghcb_active;
102 * Cached DR7 value - write it on DR7 writes and return it on reads.
103 * That value will never make it to the real hardware DR7 as debugging
104 * is currently unsupported in SEV-ES guests.
113 static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
114 DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
116 static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
123 static struct sev_config sev_cfg __read_mostly;
125 static __always_inline bool on_vc_stack(struct pt_regs *regs)
127 unsigned long sp = regs->sp;
129 /* User-mode RSP is not trusted */
133 /* SYSCALL gap still has user-mode RSP */
134 if (ip_within_syscall_gap(regs))
137 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
141 * This function handles the case when an NMI is raised in the #VC
142 * exception handler entry code, before the #VC handler has switched off
143 * its IST stack. In this case, the IST entry for #VC must be adjusted,
144 * so that any nested #VC exception will not overwrite the stack
145 * contents of the interrupted #VC handler.
147 * The IST entry is adjusted unconditionally so that it can be also be
148 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
149 * nested sev_es_ist_exit() call may adjust back the IST entry too
152 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
153 * on the NMI IST stack, as they are only called from NMI handling code
156 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
158 unsigned long old_ist, new_ist;
160 /* Read old IST entry */
161 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
164 * If NMI happened while on the #VC IST stack, set the new IST
165 * value below regs->sp, so that the interrupted stack frame is
166 * not overwritten by subsequent #VC exceptions.
168 if (on_vc_stack(regs))
172 * Reserve additional 8 bytes and store old IST value so this
173 * adjustment can be unrolled in __sev_es_ist_exit().
175 new_ist -= sizeof(old_ist);
176 *(unsigned long *)new_ist = old_ist;
178 /* Set new IST entry */
179 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
182 void noinstr __sev_es_ist_exit(void)
187 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
189 if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
192 /* Read back old IST entry and write it to the TSS */
193 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
197 * Nothing shall interrupt this code path while holding the per-CPU
198 * GHCB. The backup GHCB is only for NMIs interrupting this path.
200 * Callers must disable local interrupts around it.
202 static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
204 struct sev_es_runtime_data *data;
207 WARN_ON(!irqs_disabled());
209 data = this_cpu_read(runtime_data);
210 ghcb = &data->ghcb_page;
212 if (unlikely(data->ghcb_active)) {
213 /* GHCB is already in use - save its contents */
215 if (unlikely(data->backup_ghcb_active)) {
217 * Backup-GHCB is also already in use. There is no way
218 * to continue here so just kill the machine. To make
219 * panic() work, mark GHCBs inactive so that messages
220 * can be printed out.
222 data->ghcb_active = false;
223 data->backup_ghcb_active = false;
225 instrumentation_begin();
226 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
227 instrumentation_end();
230 /* Mark backup_ghcb active before writing to it */
231 data->backup_ghcb_active = true;
233 state->ghcb = &data->backup_ghcb;
235 /* Backup GHCB content */
236 *state->ghcb = *ghcb;
239 data->ghcb_active = true;
245 static inline u64 sev_es_rd_ghcb_msr(void)
247 return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
250 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
255 high = (u32)(val >> 32);
257 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
260 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
261 unsigned char *buffer)
263 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
266 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
268 char buffer[MAX_INSN_SIZE];
271 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
272 if (insn_bytes == 0) {
273 /* Nothing could be copied */
274 ctxt->fi.vector = X86_TRAP_PF;
275 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
276 ctxt->fi.cr2 = ctxt->regs->ip;
278 } else if (insn_bytes == -EINVAL) {
279 /* Effective RIP could not be calculated */
280 ctxt->fi.vector = X86_TRAP_GP;
281 ctxt->fi.error_code = 0;
286 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
287 return ES_DECODE_FAILED;
289 if (ctxt->insn.immediate.got)
292 return ES_DECODE_FAILED;
295 static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
297 char buffer[MAX_INSN_SIZE];
300 res = vc_fetch_insn_kernel(ctxt, buffer);
302 ctxt->fi.vector = X86_TRAP_PF;
303 ctxt->fi.error_code = X86_PF_INSTR;
304 ctxt->fi.cr2 = ctxt->regs->ip;
308 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
310 return ES_DECODE_FAILED;
315 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
317 if (user_mode(ctxt->regs))
318 return __vc_decode_user_insn(ctxt);
320 return __vc_decode_kern_insn(ctxt);
323 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
324 char *dst, char *buf, size_t size)
326 unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
329 * This function uses __put_user() independent of whether kernel or user
330 * memory is accessed. This works fine because __put_user() does no
331 * sanity checks of the pointer being accessed. All that it does is
332 * to report when the access failed.
334 * Also, this function runs in atomic context, so __put_user() is not
335 * allowed to sleep. The page-fault handler detects that it is running
336 * in atomic context and will not try to take mmap_sem and handle the
337 * fault, so additional pagefault_enable()/disable() calls are not
340 * The access can't be done via copy_to_user() here because
341 * vc_write_mem() must not use string instructions to access unsafe
342 * memory. The reason is that MOVS is emulated by the #VC handler by
343 * splitting the move up into a read and a write and taking a nested #VC
344 * exception on whatever of them is the MMIO access. Using string
345 * instructions here would cause infinite nesting.
350 u8 __user *target = (u8 __user *)dst;
353 if (__put_user(d1, target))
359 u16 __user *target = (u16 __user *)dst;
362 if (__put_user(d2, target))
368 u32 __user *target = (u32 __user *)dst;
371 if (__put_user(d4, target))
377 u64 __user *target = (u64 __user *)dst;
380 if (__put_user(d8, target))
385 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
386 return ES_UNSUPPORTED;
392 if (user_mode(ctxt->regs))
393 error_code |= X86_PF_USER;
395 ctxt->fi.vector = X86_TRAP_PF;
396 ctxt->fi.error_code = error_code;
397 ctxt->fi.cr2 = (unsigned long)dst;
402 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
403 char *src, char *buf, size_t size)
405 unsigned long error_code = X86_PF_PROT;
408 * This function uses __get_user() independent of whether kernel or user
409 * memory is accessed. This works fine because __get_user() does no
410 * sanity checks of the pointer being accessed. All that it does is
411 * to report when the access failed.
413 * Also, this function runs in atomic context, so __get_user() is not
414 * allowed to sleep. The page-fault handler detects that it is running
415 * in atomic context and will not try to take mmap_sem and handle the
416 * fault, so additional pagefault_enable()/disable() calls are not
419 * The access can't be done via copy_from_user() here because
420 * vc_read_mem() must not use string instructions to access unsafe
421 * memory. The reason is that MOVS is emulated by the #VC handler by
422 * splitting the move up into a read and a write and taking a nested #VC
423 * exception on whatever of them is the MMIO access. Using string
424 * instructions here would cause infinite nesting.
429 u8 __user *s = (u8 __user *)src;
431 if (__get_user(d1, s))
438 u16 __user *s = (u16 __user *)src;
440 if (__get_user(d2, s))
447 u32 __user *s = (u32 __user *)src;
449 if (__get_user(d4, s))
456 u64 __user *s = (u64 __user *)src;
457 if (__get_user(d8, s))
463 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
464 return ES_UNSUPPORTED;
470 if (user_mode(ctxt->regs))
471 error_code |= X86_PF_USER;
473 ctxt->fi.vector = X86_TRAP_PF;
474 ctxt->fi.error_code = error_code;
475 ctxt->fi.cr2 = (unsigned long)src;
480 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
481 unsigned long vaddr, phys_addr_t *paddr)
483 unsigned long va = (unsigned long)vaddr;
489 pgd = __va(read_cr3_pa());
490 pgd = &pgd[pgd_index(va)];
491 pte = lookup_address_in_pgd(pgd, va, &level);
493 ctxt->fi.vector = X86_TRAP_PF;
494 ctxt->fi.cr2 = vaddr;
495 ctxt->fi.error_code = 0;
497 if (user_mode(ctxt->regs))
498 ctxt->fi.error_code |= X86_PF_USER;
503 if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
504 /* Emulated MMIO to/from encrypted memory not supported */
505 return ES_UNSUPPORTED;
507 pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
508 pa |= va & ~page_level_mask(level);
515 /* Include code shared with pre-decompression boot stage */
516 #include "sev-shared.c"
518 static noinstr void __sev_put_ghcb(struct ghcb_state *state)
520 struct sev_es_runtime_data *data;
523 WARN_ON(!irqs_disabled());
525 data = this_cpu_read(runtime_data);
526 ghcb = &data->ghcb_page;
529 /* Restore GHCB from Backup */
530 *ghcb = *state->ghcb;
531 data->backup_ghcb_active = false;
535 * Invalidate the GHCB so a VMGEXIT instruction issued
536 * from userspace won't appear to be valid.
538 vc_ghcb_invalidate(ghcb);
539 data->ghcb_active = false;
543 void noinstr __sev_es_nmi_complete(void)
545 struct ghcb_state state;
548 ghcb = __sev_get_ghcb(&state);
550 vc_ghcb_invalidate(ghcb);
551 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
552 ghcb_set_sw_exit_info_1(ghcb, 0);
553 ghcb_set_sw_exit_info_2(ghcb, 0);
555 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
558 __sev_put_ghcb(&state);
561 static u64 __init get_secrets_page(void)
563 u64 pa_data = boot_params.cc_blob_address;
564 struct cc_blob_sev_info info;
568 * The CC blob contains the address of the secrets page, check if the
574 map = early_memremap(pa_data, sizeof(info));
576 pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
579 memcpy(&info, map, sizeof(info));
580 early_memunmap(map, sizeof(info));
582 /* smoke-test the secrets page passed */
583 if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
586 return info.secrets_phys;
589 static u64 __init get_snp_jump_table_addr(void)
591 struct snp_secrets_page_layout *layout;
595 pa = get_secrets_page();
599 mem = ioremap_encrypted(pa, PAGE_SIZE);
601 pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
605 layout = (__force struct snp_secrets_page_layout *)mem;
607 addr = layout->os_area.ap_jump_table_pa;
613 static u64 __init get_jump_table_addr(void)
615 struct ghcb_state state;
620 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
621 return get_snp_jump_table_addr();
623 local_irq_save(flags);
625 ghcb = __sev_get_ghcb(&state);
627 vc_ghcb_invalidate(ghcb);
628 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
629 ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
630 ghcb_set_sw_exit_info_2(ghcb, 0);
632 sev_es_wr_ghcb_msr(__pa(ghcb));
635 if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
636 ghcb_sw_exit_info_2_is_valid(ghcb))
637 ret = ghcb->save.sw_exit_info_2;
639 __sev_put_ghcb(&state);
641 local_irq_restore(flags);
646 static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate)
648 unsigned long vaddr_end;
651 vaddr = vaddr & PAGE_MASK;
652 vaddr_end = vaddr + (npages << PAGE_SHIFT);
654 while (vaddr < vaddr_end) {
655 rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
656 if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
657 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
659 vaddr = vaddr + PAGE_SIZE;
663 static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
665 unsigned long paddr_end;
668 paddr = paddr & PAGE_MASK;
669 paddr_end = paddr + (npages << PAGE_SHIFT);
671 while (paddr < paddr_end) {
673 * Use the MSR protocol because this function can be called before
674 * the GHCB is established.
676 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
679 val = sev_es_rd_ghcb_msr();
681 if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
682 "Wrong PSC response code: 0x%x\n",
683 (unsigned int)GHCB_RESP_CODE(val)))
686 if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
687 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
688 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
689 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
692 paddr = paddr + PAGE_SIZE;
698 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
701 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
704 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
708 * Ask the hypervisor to mark the memory pages as private in the RMP
711 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
713 /* Validate the memory pages after they've been added in the RMP table. */
714 pvalidate_pages(vaddr, npages, true);
717 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
720 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
723 /* Invalidate the memory pages before they are marked shared in the RMP table. */
724 pvalidate_pages(vaddr, npages, false);
726 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
727 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
730 void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
732 unsigned long vaddr, npages;
734 vaddr = (unsigned long)__va(paddr);
735 npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
737 if (op == SNP_PAGE_STATE_PRIVATE)
738 early_snp_set_memory_private(vaddr, paddr, npages);
739 else if (op == SNP_PAGE_STATE_SHARED)
740 early_snp_set_memory_shared(vaddr, paddr, npages);
742 WARN(1, "invalid memory op %d\n", op);
745 static int vmgexit_psc(struct snp_psc_desc *desc)
747 int cur_entry, end_entry, ret = 0;
748 struct snp_psc_desc *data;
749 struct ghcb_state state;
750 struct es_em_ctxt ctxt;
755 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
758 local_irq_save(flags);
760 ghcb = __sev_get_ghcb(&state);
766 /* Copy the input desc into GHCB shared buffer */
767 data = (struct snp_psc_desc *)ghcb->shared_buffer;
768 memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
771 * As per the GHCB specification, the hypervisor can resume the guest
772 * before processing all the entries. Check whether all the entries
773 * are processed. If not, then keep retrying. Note, the hypervisor
774 * will update the data memory directly to indicate the status, so
775 * reference the data->hdr everywhere.
777 * The strategy here is to wait for the hypervisor to change the page
778 * state in the RMP table before guest accesses the memory pages. If the
779 * page state change was not successful, then later memory access will
782 cur_entry = data->hdr.cur_entry;
783 end_entry = data->hdr.end_entry;
785 while (data->hdr.cur_entry <= data->hdr.end_entry) {
786 ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
788 /* This will advance the shared buffer data points to. */
789 ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
792 * Page State Change VMGEXIT can pass error code through
795 if (WARN(ret || ghcb->save.sw_exit_info_2,
796 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
797 ret, ghcb->save.sw_exit_info_2)) {
802 /* Verify that reserved bit is not set */
803 if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
809 * Sanity check that entry processing is not going backwards.
810 * This will happen only if hypervisor is tricking us.
812 if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
813 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
814 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
821 __sev_put_ghcb(&state);
824 local_irq_restore(flags);
829 static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
830 unsigned long vaddr_end, int op)
840 memset(data, 0, sizeof(*data));
843 while (vaddr < vaddr_end) {
844 if (is_vmalloc_addr((void *)vaddr))
845 pfn = vmalloc_to_pfn((void *)vaddr);
847 pfn = __pa(vaddr) >> PAGE_SHIFT;
854 * Current SNP implementation doesn't keep track of the RMP page
855 * size so use 4K for simplicity.
857 e->pagesize = RMP_PG_SIZE_4K;
859 vaddr = vaddr + PAGE_SIZE;
864 if (vmgexit_psc(data))
865 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
868 static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
870 unsigned long vaddr_end, next_vaddr;
871 struct snp_psc_desc *desc;
873 desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
875 panic("SNP: failed to allocate memory for PSC descriptor\n");
877 vaddr = vaddr & PAGE_MASK;
878 vaddr_end = vaddr + (npages << PAGE_SHIFT);
880 while (vaddr < vaddr_end) {
881 /* Calculate the last vaddr that fits in one struct snp_psc_desc. */
882 next_vaddr = min_t(unsigned long, vaddr_end,
883 (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
885 __set_pages_state(desc, vaddr, next_vaddr, op);
893 void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
895 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
898 pvalidate_pages(vaddr, npages, false);
900 set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
903 void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
905 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
908 set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
910 pvalidate_pages(vaddr, npages, true);
913 static int snp_set_vmsa(void *va, bool vmsa)
918 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
919 * using the RMPADJUST instruction. However, for the instruction to
920 * succeed it must target the permissions of a lesser privileged
921 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
922 * instruction in the AMD64 APM Volume 3).
926 attrs |= RMPADJUST_VMSA_PAGE_BIT;
928 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
931 #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
932 #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
933 #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
935 #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2)
936 #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3)
938 static void *snp_alloc_vmsa_page(void)
943 * Allocate VMSA page to work around the SNP erratum where the CPU will
944 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
945 * collides with the RMP entry of VMSA page. The recommended workaround
946 * is to not use a large page.
948 * Allocate an 8k page which is also 8k-aligned.
950 p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
956 /* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
959 return page_address(p + 1);
962 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
966 err = snp_set_vmsa(vmsa, false);
968 pr_err("clear VMSA page failed (%u), leaking page\n", err);
970 free_page((unsigned long)vmsa);
973 static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
975 struct sev_es_save_area *cur_vmsa, *vmsa;
976 struct ghcb_state state;
984 * The hypervisor SNP feature support check has happened earlier, just check
985 * the AP_CREATION one here.
987 if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
991 * Verify the desired start IP against the known trampoline start IP
992 * to catch any future new trampolines that may be introduced that
993 * would require a new protected guest entry point.
995 if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
996 "Unsupported SNP start_ip: %lx\n", start_ip))
999 /* Override start_ip with known protected guest start IP */
1000 start_ip = real_mode_header->sev_es_trampoline_start;
1002 /* Find the logical CPU for the APIC ID */
1003 for_each_present_cpu(cpu) {
1004 if (arch_match_cpu_phys_id(cpu, apic_id))
1007 if (cpu >= nr_cpu_ids)
1010 cur_vmsa = per_cpu(sev_vmsa, cpu);
1013 * A new VMSA is created each time because there is no guarantee that
1014 * the current VMSA is the kernels or that the vCPU is not running. If
1015 * an attempt was done to use the current VMSA with a running vCPU, a
1016 * #VMEXIT of that vCPU would wipe out all of the settings being done
1019 vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
1023 /* CR4 should maintain the MCE value */
1024 cr4 = native_read_cr4() & X86_CR4_MCE;
1026 /* Set the CS value based on the start_ip converted to a SIPI vector */
1027 sipi_vector = (start_ip >> 12);
1028 vmsa->cs.base = sipi_vector << 12;
1029 vmsa->cs.limit = AP_INIT_CS_LIMIT;
1030 vmsa->cs.attrib = INIT_CS_ATTRIBS;
1031 vmsa->cs.selector = sipi_vector << 8;
1033 /* Set the RIP value based on start_ip */
1034 vmsa->rip = start_ip & 0xfff;
1036 /* Set AP INIT defaults as documented in the APM */
1037 vmsa->ds.limit = AP_INIT_DS_LIMIT;
1038 vmsa->ds.attrib = INIT_DS_ATTRIBS;
1039 vmsa->es = vmsa->ds;
1040 vmsa->fs = vmsa->ds;
1041 vmsa->gs = vmsa->ds;
1042 vmsa->ss = vmsa->ds;
1044 vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT;
1045 vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT;
1046 vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS;
1047 vmsa->idtr.limit = AP_INIT_IDTR_LIMIT;
1048 vmsa->tr.limit = AP_INIT_TR_LIMIT;
1049 vmsa->tr.attrib = INIT_TR_ATTRIBS;
1052 vmsa->cr0 = AP_INIT_CR0_DEFAULT;
1053 vmsa->dr7 = DR7_RESET_VALUE;
1054 vmsa->dr6 = AP_INIT_DR6_DEFAULT;
1055 vmsa->rflags = AP_INIT_RFLAGS_DEFAULT;
1056 vmsa->g_pat = AP_INIT_GPAT_DEFAULT;
1057 vmsa->xcr0 = AP_INIT_XCR0_DEFAULT;
1058 vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT;
1059 vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT;
1060 vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT;
1062 /* SVME must be set. */
1063 vmsa->efer = EFER_SVME;
1066 * Set the SNP-specific fields for this VMSA:
1068 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
1071 vmsa->sev_features = sev_status >> 2;
1073 /* Switch the page over to a VMSA page now that it is initialized */
1074 ret = snp_set_vmsa(vmsa, true);
1076 pr_err("set VMSA page failed (%u)\n", ret);
1077 free_page((unsigned long)vmsa);
1082 /* Issue VMGEXIT AP Creation NAE event */
1083 local_irq_save(flags);
1085 ghcb = __sev_get_ghcb(&state);
1087 vc_ghcb_invalidate(ghcb);
1088 ghcb_set_rax(ghcb, vmsa->sev_features);
1089 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1090 ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1091 ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1093 sev_es_wr_ghcb_msr(__pa(ghcb));
1096 if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1097 lower_32_bits(ghcb->save.sw_exit_info_1)) {
1098 pr_err("SNP AP Creation error\n");
1102 __sev_put_ghcb(&state);
1104 local_irq_restore(flags);
1106 /* Perform cleanup if there was an error */
1108 snp_cleanup_vmsa(vmsa);
1112 /* Free up any previous VMSA page */
1114 snp_cleanup_vmsa(cur_vmsa);
1116 /* Record the current VMSA page */
1117 per_cpu(sev_vmsa, cpu) = vmsa;
1122 void snp_set_wakeup_secondary_cpu(void)
1124 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1128 * Always set this override if SNP is enabled. This makes it the
1129 * required method to start APs under SNP. If the hypervisor does
1130 * not support AP creation, then no APs will be started.
1132 apic->wakeup_secondary_cpu = wakeup_cpu_via_vmgexit;
1135 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1137 u16 startup_cs, startup_ip;
1138 phys_addr_t jump_table_pa;
1139 u64 jump_table_addr;
1140 u16 __iomem *jump_table;
1142 jump_table_addr = get_jump_table_addr();
1144 /* On UP guests there is no jump table so this is not a failure */
1145 if (!jump_table_addr)
1148 /* Check if AP Jump Table is page-aligned */
1149 if (jump_table_addr & ~PAGE_MASK)
1152 jump_table_pa = jump_table_addr & PAGE_MASK;
1154 startup_cs = (u16)(rmh->trampoline_start >> 4);
1155 startup_ip = (u16)(rmh->sev_es_trampoline_start -
1156 rmh->trampoline_start);
1158 jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1162 writew(startup_ip, &jump_table[0]);
1163 writew(startup_cs, &jump_table[1]);
1165 iounmap(jump_table);
1171 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1172 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1173 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1175 int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1177 struct sev_es_runtime_data *data;
1178 unsigned long address, pflags;
1182 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1185 pflags = _PAGE_NX | _PAGE_RW;
1187 for_each_possible_cpu(cpu) {
1188 data = per_cpu(runtime_data, cpu);
1190 address = __pa(&data->ghcb_page);
1191 pfn = address >> PAGE_SHIFT;
1193 if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1200 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1202 struct pt_regs *regs = ctxt->regs;
1206 /* Is it a WRMSR? */
1207 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1209 ghcb_set_rcx(ghcb, regs->cx);
1211 ghcb_set_rax(ghcb, regs->ax);
1212 ghcb_set_rdx(ghcb, regs->dx);
1215 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR,
1218 if ((ret == ES_OK) && (!exit_info_1)) {
1219 regs->ax = ghcb->save.rax;
1220 regs->dx = ghcb->save.rdx;
1226 static void snp_register_per_cpu_ghcb(void)
1228 struct sev_es_runtime_data *data;
1231 data = this_cpu_read(runtime_data);
1232 ghcb = &data->ghcb_page;
1234 snp_register_ghcb_early(__pa(ghcb));
1237 void setup_ghcb(void)
1239 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1242 /* First make sure the hypervisor talks a supported protocol. */
1243 if (!sev_es_negotiate_protocol())
1244 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1247 * Check whether the runtime #VC exception handler is active. It uses
1248 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1250 * If SNP is active, register the per-CPU GHCB page so that the runtime
1251 * exception handler can use it.
1253 if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1254 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1255 snp_register_per_cpu_ghcb();
1261 * Clear the boot_ghcb. The first exception comes in before the bss
1262 * section is cleared.
1264 memset(&boot_ghcb_page, 0, PAGE_SIZE);
1266 /* Alright - Make the boot-ghcb public */
1267 boot_ghcb = &boot_ghcb_page;
1269 /* SNP guest requires that GHCB GPA must be registered. */
1270 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1271 snp_register_ghcb_early(__pa(&boot_ghcb_page));
1274 #ifdef CONFIG_HOTPLUG_CPU
1275 static void sev_es_ap_hlt_loop(void)
1277 struct ghcb_state state;
1280 ghcb = __sev_get_ghcb(&state);
1283 vc_ghcb_invalidate(ghcb);
1284 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1285 ghcb_set_sw_exit_info_1(ghcb, 0);
1286 ghcb_set_sw_exit_info_2(ghcb, 0);
1288 sev_es_wr_ghcb_msr(__pa(ghcb));
1291 /* Wakeup signal? */
1292 if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1293 ghcb->save.sw_exit_info_2)
1297 __sev_put_ghcb(&state);
1301 * Play_dead handler when running under SEV-ES. This is needed because
1302 * the hypervisor can't deliver an SIPI request to restart the AP.
1303 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1304 * hypervisor wakes it up again.
1306 static void sev_es_play_dead(void)
1310 /* IRQs now disabled */
1312 sev_es_ap_hlt_loop();
1315 * If we get here, the VCPU was woken up again. Jump to CPU
1316 * startup code to get it back online.
1320 #else /* CONFIG_HOTPLUG_CPU */
1321 #define sev_es_play_dead native_play_dead
1322 #endif /* CONFIG_HOTPLUG_CPU */
1325 static void __init sev_es_setup_play_dead(void)
1327 smp_ops.play_dead = sev_es_play_dead;
1330 static inline void sev_es_setup_play_dead(void) { }
1333 static void __init alloc_runtime_data(int cpu)
1335 struct sev_es_runtime_data *data;
1337 data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1339 panic("Can't allocate SEV-ES runtime data");
1341 per_cpu(runtime_data, cpu) = data;
1344 static void __init init_ghcb(int cpu)
1346 struct sev_es_runtime_data *data;
1349 data = per_cpu(runtime_data, cpu);
1351 err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1352 sizeof(data->ghcb_page));
1354 panic("Can't map GHCBs unencrypted");
1356 memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1358 data->ghcb_active = false;
1359 data->backup_ghcb_active = false;
1362 void __init sev_es_init_vc_handling(void)
1366 BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1368 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1371 if (!sev_es_check_cpu_features())
1372 panic("SEV-ES CPU Features missing");
1375 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1378 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1379 sev_hv_features = get_hv_features();
1381 if (!(sev_hv_features & GHCB_HV_FT_SNP))
1382 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1385 /* Enable SEV-ES special handling */
1386 static_branch_enable(&sev_es_enable_key);
1388 /* Initialize per-cpu GHCB pages */
1389 for_each_possible_cpu(cpu) {
1390 alloc_runtime_data(cpu);
1394 sev_es_setup_play_dead();
1396 /* Secondary CPUs use the runtime #VC handler */
1397 initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1400 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1402 int trapnr = ctxt->fi.vector;
1404 if (trapnr == X86_TRAP_PF)
1405 native_write_cr2(ctxt->fi.cr2);
1407 ctxt->regs->orig_ax = ctxt->fi.error_code;
1408 do_early_exception(ctxt->regs, trapnr);
1411 static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1416 reg_array = (long *)ctxt->regs;
1417 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1422 offset /= sizeof(long);
1424 return reg_array + offset;
1426 static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1427 unsigned int bytes, bool read)
1429 u64 exit_code, exit_info_1, exit_info_2;
1430 unsigned long ghcb_pa = __pa(ghcb);
1435 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1436 if (ref == (void __user *)-1L)
1437 return ES_UNSUPPORTED;
1439 exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1441 res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1443 if (res == ES_EXCEPTION && !read)
1444 ctxt->fi.error_code |= X86_PF_WRITE;
1449 exit_info_1 = paddr;
1450 /* Can never be greater than 8 */
1451 exit_info_2 = bytes;
1453 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1455 return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2);
1459 * The MOVS instruction has two memory operands, which raises the
1460 * problem that it is not known whether the access to the source or the
1461 * destination caused the #VC exception (and hence whether an MMIO read
1462 * or write operation needs to be emulated).
1464 * Instead of playing games with walking page-tables and trying to guess
1465 * whether the source or destination is an MMIO range, split the move
1466 * into two operations, a read and a write with only one memory operand.
1467 * This will cause a nested #VC exception on the MMIO address which can
1470 * This implementation has the benefit that it also supports MOVS where
1471 * source _and_ destination are MMIO regions.
1473 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1474 * rare operation. If it turns out to be a performance problem the split
1475 * operations can be moved to memcpy_fromio() and memcpy_toio().
1477 static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1480 unsigned long ds_base, es_base;
1481 unsigned char *src, *dst;
1482 unsigned char buffer[8];
1487 ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1488 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1490 if (ds_base == -1L || es_base == -1L) {
1491 ctxt->fi.vector = X86_TRAP_GP;
1492 ctxt->fi.error_code = 0;
1493 return ES_EXCEPTION;
1496 src = ds_base + (unsigned char *)ctxt->regs->si;
1497 dst = es_base + (unsigned char *)ctxt->regs->di;
1499 ret = vc_read_mem(ctxt, src, buffer, bytes);
1503 ret = vc_write_mem(ctxt, dst, buffer, bytes);
1507 if (ctxt->regs->flags & X86_EFLAGS_DF)
1512 ctxt->regs->si += off;
1513 ctxt->regs->di += off;
1515 rep = insn_has_rep_prefix(&ctxt->insn);
1517 ctxt->regs->cx -= 1;
1519 if (!rep || ctxt->regs->cx == 0)
1525 static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1527 struct insn *insn = &ctxt->insn;
1528 unsigned int bytes = 0;
1529 enum mmio_type mmio;
1534 mmio = insn_decode_mmio(insn, &bytes);
1535 if (mmio == MMIO_DECODE_FAILED)
1536 return ES_DECODE_FAILED;
1538 if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
1539 reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1541 return ES_DECODE_FAILED;
1546 memcpy(ghcb->shared_buffer, reg_data, bytes);
1547 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1549 case MMIO_WRITE_IMM:
1550 memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1551 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1554 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1558 /* Zero-extend for 32-bit operation */
1562 memcpy(reg_data, ghcb->shared_buffer, bytes);
1564 case MMIO_READ_ZERO_EXTEND:
1565 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1569 /* Zero extend based on operand size */
1570 memset(reg_data, 0, insn->opnd_bytes);
1571 memcpy(reg_data, ghcb->shared_buffer, bytes);
1573 case MMIO_READ_SIGN_EXTEND:
1574 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1579 u8 *val = (u8 *)ghcb->shared_buffer;
1581 sign_byte = (*val & 0x80) ? 0xff : 0x00;
1583 u16 *val = (u16 *)ghcb->shared_buffer;
1585 sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1588 /* Sign extend based on operand size */
1589 memset(reg_data, sign_byte, insn->opnd_bytes);
1590 memcpy(reg_data, ghcb->shared_buffer, bytes);
1593 ret = vc_handle_mmio_movs(ctxt, bytes);
1596 ret = ES_UNSUPPORTED;
1603 static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1604 struct es_em_ctxt *ctxt)
1606 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1607 long val, *reg = vc_insn_get_rm(ctxt);
1611 return ES_DECODE_FAILED;
1615 /* Upper 32 bits must be written as zeroes */
1617 ctxt->fi.vector = X86_TRAP_GP;
1618 ctxt->fi.error_code = 0;
1619 return ES_EXCEPTION;
1622 /* Clear out other reserved bits and set bit 10 */
1623 val = (val & 0xffff23ffL) | BIT(10);
1625 /* Early non-zero writes to DR7 are not supported */
1626 if (!data && (val & ~DR7_RESET_VALUE))
1627 return ES_UNSUPPORTED;
1629 /* Using a value of 0 for ExitInfo1 means RAX holds the value */
1630 ghcb_set_rax(ghcb, val);
1631 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1641 static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1642 struct es_em_ctxt *ctxt)
1644 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1645 long *reg = vc_insn_get_rm(ctxt);
1648 return ES_DECODE_FAILED;
1653 *reg = DR7_RESET_VALUE;
1658 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1659 struct es_em_ctxt *ctxt)
1661 return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0);
1664 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1668 ghcb_set_rcx(ghcb, ctxt->regs->cx);
1670 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0);
1674 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1675 return ES_VMM_ERROR;
1677 ctxt->regs->ax = ghcb->save.rax;
1678 ctxt->regs->dx = ghcb->save.rdx;
1683 static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1684 struct es_em_ctxt *ctxt)
1687 * Treat it as a NOP and do not leak a physical address to the
1693 static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1694 struct es_em_ctxt *ctxt)
1696 /* Treat the same as MONITOR/MONITORX */
1700 static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1701 struct es_em_ctxt *ctxt)
1705 ghcb_set_rax(ghcb, ctxt->regs->ax);
1706 ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1708 if (x86_platform.hyper.sev_es_hcall_prepare)
1709 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1711 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1715 if (!ghcb_rax_is_valid(ghcb))
1716 return ES_VMM_ERROR;
1718 ctxt->regs->ax = ghcb->save.rax;
1721 * Call sev_es_hcall_finish() after regs->ax is already set.
1722 * This allows the hypervisor handler to overwrite it again if
1725 if (x86_platform.hyper.sev_es_hcall_finish &&
1726 !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1727 return ES_VMM_ERROR;
1732 static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1733 struct es_em_ctxt *ctxt)
1736 * Calling ecx_alignment_check() directly does not work, because it
1737 * enables IRQs and the GHCB is active. Forward the exception and call
1738 * it later from vc_forward_exception().
1740 ctxt->fi.vector = X86_TRAP_AC;
1741 ctxt->fi.error_code = 0;
1742 return ES_EXCEPTION;
1745 static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1747 unsigned long exit_code)
1749 enum es_result result;
1751 switch (exit_code) {
1752 case SVM_EXIT_READ_DR7:
1753 result = vc_handle_dr7_read(ghcb, ctxt);
1755 case SVM_EXIT_WRITE_DR7:
1756 result = vc_handle_dr7_write(ghcb, ctxt);
1758 case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1759 result = vc_handle_trap_ac(ghcb, ctxt);
1761 case SVM_EXIT_RDTSC:
1762 case SVM_EXIT_RDTSCP:
1763 result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1765 case SVM_EXIT_RDPMC:
1766 result = vc_handle_rdpmc(ghcb, ctxt);
1769 pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1770 result = ES_UNSUPPORTED;
1772 case SVM_EXIT_CPUID:
1773 result = vc_handle_cpuid(ghcb, ctxt);
1776 result = vc_handle_ioio(ghcb, ctxt);
1779 result = vc_handle_msr(ghcb, ctxt);
1781 case SVM_EXIT_VMMCALL:
1782 result = vc_handle_vmmcall(ghcb, ctxt);
1784 case SVM_EXIT_WBINVD:
1785 result = vc_handle_wbinvd(ghcb, ctxt);
1787 case SVM_EXIT_MONITOR:
1788 result = vc_handle_monitor(ghcb, ctxt);
1790 case SVM_EXIT_MWAIT:
1791 result = vc_handle_mwait(ghcb, ctxt);
1794 result = vc_handle_mmio(ghcb, ctxt);
1798 * Unexpected #VC exception
1800 result = ES_UNSUPPORTED;
1806 static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1808 long error_code = ctxt->fi.error_code;
1809 int trapnr = ctxt->fi.vector;
1811 ctxt->regs->orig_ax = ctxt->fi.error_code;
1815 exc_general_protection(ctxt->regs, error_code);
1818 exc_invalid_op(ctxt->regs);
1821 write_cr2(ctxt->fi.cr2);
1822 exc_page_fault(ctxt->regs, error_code);
1825 exc_alignment_check(ctxt->regs, error_code);
1828 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1833 static __always_inline bool is_vc2_stack(unsigned long sp)
1835 return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1838 static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1840 unsigned long sp, prev_sp;
1842 sp = (unsigned long)regs;
1846 * If the code was already executing on the VC2 stack when the #VC
1847 * happened, let it proceed to the normal handling routine. This way the
1848 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1850 return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1853 static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1855 struct ghcb_state state;
1856 struct es_em_ctxt ctxt;
1857 enum es_result result;
1861 ghcb = __sev_get_ghcb(&state);
1863 vc_ghcb_invalidate(ghcb);
1864 result = vc_init_em_ctxt(&ctxt, regs, error_code);
1866 if (result == ES_OK)
1867 result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1869 __sev_put_ghcb(&state);
1871 /* Done - now check the result */
1874 vc_finish_insn(&ctxt);
1876 case ES_UNSUPPORTED:
1877 pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1878 error_code, regs->ip);
1882 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1883 error_code, regs->ip);
1886 case ES_DECODE_FAILED:
1887 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1888 error_code, regs->ip);
1892 vc_forward_exception(&ctxt);
1898 pr_emerg("Unknown result in %s():%d\n", __func__, result);
1900 * Emulating the instruction which caused the #VC exception
1901 * failed - can't continue so print debug information
1909 static __always_inline bool vc_is_db(unsigned long error_code)
1911 return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1915 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1916 * and will panic when an error happens.
1918 DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1920 irqentry_state_t irq_state;
1923 * With the current implementation it is always possible to switch to a
1924 * safe stack because #VC exceptions only happen at known places, like
1925 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1926 * also happen with code instrumentation when the hypervisor intercepts
1927 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1928 * exceptions currently also only happen in safe places.
1930 * But keep this here in case the noinstr annotations are violated due
1933 if (unlikely(vc_from_invalid_context(regs))) {
1934 instrumentation_begin();
1935 panic("Can't handle #VC exception from unsupported context\n");
1936 instrumentation_end();
1940 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1942 if (vc_is_db(error_code)) {
1947 irq_state = irqentry_nmi_enter(regs);
1949 instrumentation_begin();
1951 if (!vc_raw_handle_exception(regs, error_code)) {
1952 /* Show some debug info */
1955 /* Ask hypervisor to sev_es_terminate */
1956 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1958 /* If that fails and we get here - just panic */
1959 panic("Returned from Terminate-Request to Hypervisor\n");
1962 instrumentation_end();
1963 irqentry_nmi_exit(regs, irq_state);
1967 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1968 * and will kill the current task with SIGBUS when an error happens.
1970 DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1973 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1975 if (vc_is_db(error_code)) {
1976 noist_exc_debug(regs);
1980 irqentry_enter_from_user_mode(regs);
1981 instrumentation_begin();
1983 if (!vc_raw_handle_exception(regs, error_code)) {
1985 * Do not kill the machine if user-space triggered the
1986 * exception. Send SIGBUS instead and let user-space deal with
1989 force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
1992 instrumentation_end();
1993 irqentry_exit_to_user_mode(regs);
1996 bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
1998 unsigned long exit_code = regs->orig_ax;
1999 struct es_em_ctxt ctxt;
2000 enum es_result result;
2002 vc_ghcb_invalidate(boot_ghcb);
2004 result = vc_init_em_ctxt(&ctxt, regs, exit_code);
2005 if (result == ES_OK)
2006 result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
2008 /* Done - now check the result */
2011 vc_finish_insn(&ctxt);
2013 case ES_UNSUPPORTED:
2014 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
2015 exit_code, regs->ip);
2018 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
2019 exit_code, regs->ip);
2021 case ES_DECODE_FAILED:
2022 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
2023 exit_code, regs->ip);
2026 vc_early_forward_exception(&ctxt);
2040 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2044 * Initial set up of SNP relies on information provided by the
2045 * Confidential Computing blob, which can be passed to the kernel
2046 * in the following ways, depending on how it is booted:
2048 * - when booted via the boot/decompress kernel:
2051 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2052 * - via a setup_data entry, as defined by the Linux Boot Protocol
2054 * Scan for the blob in that order.
2056 static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2058 struct cc_blob_sev_info *cc_info;
2060 /* Boot kernel would have passed the CC blob via boot_params. */
2061 if (bp->cc_blob_address) {
2062 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2067 * If kernel was booted directly, without the use of the
2068 * boot/decompression kernel, the CC blob may have been passed via
2069 * setup_data instead.
2071 cc_info = find_cc_blob_setup_data(bp);
2076 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2082 bool __init snp_init(struct boot_params *bp)
2084 struct cc_blob_sev_info *cc_info;
2089 cc_info = find_cc_blob(bp);
2093 setup_cpuid_table(cc_info);
2096 * The CC blob will be used later to access the secrets page. Cache
2097 * it here like the boot kernel does.
2099 bp->cc_blob_address = (u32)(unsigned long)cc_info;
2104 void __init snp_abort(void)
2106 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2109 static void dump_cpuid_table(void)
2111 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2114 pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2115 cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2117 for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2118 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2120 pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2121 i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2122 fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2127 * It is useful from an auditing/testing perspective to provide an easy way
2128 * for the guest owner to know that the CPUID table has been initialized as
2129 * expected, but that initialization happens too early in boot to print any
2130 * sort of indicator, and there's not really any other good place to do it,
2133 static int __init report_cpuid_table(void)
2135 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2137 if (!cpuid_table->count)
2140 pr_info("Using SNP CPUID table, %d entries present.\n",
2141 cpuid_table->count);
2148 arch_initcall(report_cpuid_table);
2150 static int __init init_sev_config(char *str)
2154 while ((s = strsep(&str, ","))) {
2155 if (!strcmp(s, "debug")) {
2156 sev_cfg.debug = true;
2160 pr_info("SEV command-line option '%s' was not recognized\n", s);
2165 __setup("sev=", init_sev_config);
2167 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err)
2169 struct ghcb_state state;
2170 struct es_em_ctxt ctxt;
2171 unsigned long flags;
2175 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2182 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
2185 local_irq_save(flags);
2187 ghcb = __sev_get_ghcb(&state);
2193 vc_ghcb_invalidate(ghcb);
2195 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2196 ghcb_set_rax(ghcb, input->data_gpa);
2197 ghcb_set_rbx(ghcb, input->data_npages);
2200 ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2204 if (ghcb->save.sw_exit_info_2) {
2205 /* Number of expected pages are returned in RBX */
2206 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
2207 ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
2208 input->data_npages = ghcb_get_rbx(ghcb);
2210 *fw_err = ghcb->save.sw_exit_info_2;
2216 __sev_put_ghcb(&state);
2218 local_irq_restore(flags);
2222 EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2224 static struct platform_device sev_guest_device = {
2225 .name = "sev-guest",
2229 static int __init snp_init_platform_device(void)
2231 struct sev_guest_platform_data data;
2234 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2237 gpa = get_secrets_page();
2241 data.secrets_gpa = gpa;
2242 if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2245 if (platform_device_register(&sev_guest_device))
2248 pr_info("SNP guest platform device initialized.\n");
2251 device_initcall(snp_init_platform_device);