1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
5 #define pr_fmt(fmt) "tdx: " fmt
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
14 #include <asm/insn-eval.h>
15 #include <asm/pgtable.h>
21 /* Port I/O direction */
25 /* See Exit Qualification for I/O Instructions in VMX documentation */
26 #define VE_IS_IO_IN(e) ((e) & BIT(3))
27 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
28 #define VE_GET_PORT_NUM(e) ((e) >> 16)
29 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
31 #define ATTR_DEBUG BIT(0)
32 #define ATTR_SEPT_VE_DISABLE BIT(28)
34 /* TDX Module call error codes */
35 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
36 #define TDCALL_INVALID_OPERAND 0xc0000100
38 #define TDREPORT_SUBTYPE_0 0
40 /* Called from __tdx_hypercall() for unrecoverable failure */
41 noinstr void __noreturn __tdx_hypercall_failed(void)
43 instrumentation_begin();
44 panic("TDVMCALL failed. TDX module bug?");
47 #ifdef CONFIG_KVM_GUEST
48 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
49 unsigned long p3, unsigned long p4)
51 struct tdx_module_args args = {
59 return __tdx_hypercall(&args);
61 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
65 * Used for TDX guests to make calls directly to the TD module. This
66 * should only be used for calls that have no legitimate reason to fail
67 * or where the kernel can not survive the call failing.
69 static inline void tdcall(u64 fn, struct tdx_module_args *args)
71 if (__tdcall_ret(fn, args))
72 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
76 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
77 * subtype 0) using TDG.MR.REPORT TDCALL.
78 * @reportdata: Address of the input buffer which contains user-defined
79 * REPORTDATA to be included into TDREPORT.
80 * @tdreport: Address of the output buffer to store TDREPORT.
82 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
83 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
84 * It is used in the TDX guest driver module to get the TDREPORT0.
86 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
87 * other TDCALL failures.
89 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
91 struct tdx_module_args args = {
92 .rcx = virt_to_phys(tdreport),
93 .rdx = virt_to_phys(reportdata),
94 .r8 = TDREPORT_SUBTYPE_0,
98 ret = __tdcall(TDG_MR_REPORT, &args);
100 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
107 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
109 static void __noreturn tdx_panic(const char *msg)
111 struct tdx_module_args args = {
112 .r10 = TDX_HYPERCALL_STANDARD,
113 .r11 = TDVMCALL_REPORT_FATAL_ERROR,
114 .r12 = 0, /* Error code: 0 is Panic */
117 /* Define register order according to the GHCI */
118 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
123 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
124 strtomem_pad(message.str, msg, '\0');
126 args.r8 = message.r8;
127 args.r9 = message.r9;
128 args.r14 = message.r14;
129 args.r15 = message.r15;
130 args.rdi = message.rdi;
131 args.rsi = message.rsi;
132 args.rbx = message.rbx;
133 args.rdx = message.rdx;
136 * This hypercall should never return and it is not safe
137 * to keep the guest running. Call it forever if it
141 __tdx_hypercall(&args);
144 static void tdx_parse_tdinfo(u64 *cc_mask)
146 struct tdx_module_args args = {};
147 unsigned int gpa_width;
151 * TDINFO TDX module call is used to get the TD execution environment
152 * information like GPA width, number of available vcpus, debug mode
153 * information, etc. More details about the ABI can be found in TDX
154 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
157 tdcall(TDG_VP_INFO, &args);
160 * The highest bit of a guest physical address is the "sharing" bit.
161 * Set it for shared pages and clear it for private pages.
163 * The GPA width that comes out of this call is critical. TDX guests
164 * can not meaningfully run without it.
166 gpa_width = args.rcx & GENMASK(5, 0);
167 *cc_mask = BIT_ULL(gpa_width - 1);
170 * The kernel can not handle #VE's when accessing normal kernel
171 * memory. Ensure that no #VE will be delivered for accesses to
172 * TD-private memory. Only VMM-shared memory (MMIO) will #VE.
175 if (!(td_attr & ATTR_SEPT_VE_DISABLE)) {
176 const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set.";
178 /* Relax SEPT_VE_DISABLE check for debug TD. */
179 if (td_attr & ATTR_DEBUG)
180 pr_warn("%s\n", msg);
187 * The TDX module spec states that #VE may be injected for a limited set of
190 * - Emulation of the architectural #VE injection on EPT violation;
192 * - As a result of guest TD execution of a disallowed instruction,
193 * a disallowed MSR access, or CPUID virtualization;
195 * - A notification to the guest TD about anomalous behavior;
197 * The last one is opt-in and is not used by the kernel.
199 * The Intel Software Developer's Manual describes cases when instruction
200 * length field can be used in section "Information for VM Exits Due to
201 * Instruction Execution".
203 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
204 * information if #VE occurred due to instruction execution, but not for EPT
207 static int ve_instr_len(struct ve_info *ve)
209 switch (ve->exit_reason) {
210 case EXIT_REASON_HLT:
211 case EXIT_REASON_MSR_READ:
212 case EXIT_REASON_MSR_WRITE:
213 case EXIT_REASON_CPUID:
214 case EXIT_REASON_IO_INSTRUCTION:
215 /* It is safe to use ve->instr_len for #VE due instructions */
216 return ve->instr_len;
217 case EXIT_REASON_EPT_VIOLATION:
219 * For EPT violations, ve->insn_len is not defined. For those,
220 * the kernel must decode instructions manually and should not
221 * be using this function.
223 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
226 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
227 return ve->instr_len;
231 static u64 __cpuidle __halt(const bool irq_disabled)
233 struct tdx_module_args args = {
234 .r10 = TDX_HYPERCALL_STANDARD,
235 .r11 = hcall_func(EXIT_REASON_HLT),
240 * Emulate HLT operation via hypercall. More info about ABI
241 * can be found in TDX Guest-Host-Communication Interface
242 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
244 * The VMM uses the "IRQ disabled" param to understand IRQ
245 * enabled status (RFLAGS.IF) of the TD guest and to determine
246 * whether or not it should schedule the halted vCPU if an
247 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
248 * can keep the vCPU in virtual HLT, even if an IRQ is
249 * pending, without hanging/breaking the guest.
251 return __tdx_hypercall(&args);
254 static int handle_halt(struct ve_info *ve)
256 const bool irq_disabled = irqs_disabled();
258 if (__halt(irq_disabled))
261 return ve_instr_len(ve);
264 void __cpuidle tdx_safe_halt(void)
266 const bool irq_disabled = false;
269 * Use WARN_ONCE() to report the failure.
271 if (__halt(irq_disabled))
272 WARN_ONCE(1, "HLT instruction emulation failed\n");
275 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
277 struct tdx_module_args args = {
278 .r10 = TDX_HYPERCALL_STANDARD,
279 .r11 = hcall_func(EXIT_REASON_MSR_READ),
284 * Emulate the MSR read via hypercall. More info about ABI
285 * can be found in TDX Guest-Host-Communication Interface
286 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
288 if (__tdx_hypercall(&args))
291 regs->ax = lower_32_bits(args.r11);
292 regs->dx = upper_32_bits(args.r11);
293 return ve_instr_len(ve);
296 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
298 struct tdx_module_args args = {
299 .r10 = TDX_HYPERCALL_STANDARD,
300 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
302 .r13 = (u64)regs->dx << 32 | regs->ax,
306 * Emulate the MSR write via hypercall. More info about ABI
307 * can be found in TDX Guest-Host-Communication Interface
308 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
310 if (__tdx_hypercall(&args))
313 return ve_instr_len(ve);
316 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
318 struct tdx_module_args args = {
319 .r10 = TDX_HYPERCALL_STANDARD,
320 .r11 = hcall_func(EXIT_REASON_CPUID),
326 * Only allow VMM to control range reserved for hypervisor
329 * Return all-zeros for any CPUID outside the range. It matches CPU
330 * behaviour for non-supported leaf.
332 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
333 regs->ax = regs->bx = regs->cx = regs->dx = 0;
334 return ve_instr_len(ve);
338 * Emulate the CPUID instruction via a hypercall. More info about
339 * ABI can be found in TDX Guest-Host-Communication Interface
340 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
342 if (__tdx_hypercall(&args))
346 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
347 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
348 * So copy the register contents back to pt_regs.
355 return ve_instr_len(ve);
358 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
360 struct tdx_module_args args = {
361 .r10 = TDX_HYPERCALL_STANDARD,
362 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
369 if (__tdx_hypercall(&args))
376 static bool mmio_write(int size, unsigned long addr, unsigned long val)
378 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
379 EPT_WRITE, addr, val);
382 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
384 unsigned long *reg, val, vaddr;
385 char buffer[MAX_INSN_SIZE];
386 enum insn_mmio_type mmio;
387 struct insn insn = {};
388 int size, extend_size;
391 /* Only in-kernel MMIO is supported */
392 if (WARN_ON_ONCE(user_mode(regs)))
395 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
398 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
401 mmio = insn_decode_mmio(&insn, &size);
402 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
405 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
406 reg = insn_get_modrm_reg_ptr(&insn, regs);
412 * Reject EPT violation #VEs that split pages.
414 * MMIO accesses are supposed to be naturally aligned and therefore
415 * never cross page boundaries. Seeing split page accesses indicates
416 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
418 * load_unaligned_zeropad() will recover using exception fixups.
420 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
421 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
424 /* Handle writes first */
426 case INSN_MMIO_WRITE:
427 memcpy(&val, reg, size);
428 if (!mmio_write(size, ve->gpa, val))
431 case INSN_MMIO_WRITE_IMM:
432 val = insn.immediate.value;
433 if (!mmio_write(size, ve->gpa, val))
437 case INSN_MMIO_READ_ZERO_EXTEND:
438 case INSN_MMIO_READ_SIGN_EXTEND:
439 /* Reads are handled below */
442 case INSN_MMIO_DECODE_FAILED:
444 * MMIO was accessed with an instruction that could not be
445 * decoded or handled properly. It was likely not using io.h
446 * helpers or accessed MMIO accidentally.
450 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
455 if (!mmio_read(size, ve->gpa, &val))
460 /* Zero-extend for 32-bit operation */
461 extend_size = size == 4 ? sizeof(*reg) : 0;
463 case INSN_MMIO_READ_ZERO_EXTEND:
464 /* Zero extend based on operand size */
465 extend_size = insn.opnd_bytes;
467 case INSN_MMIO_READ_SIGN_EXTEND:
468 /* Sign extend based on operand size */
469 extend_size = insn.opnd_bytes;
470 if (size == 1 && val & BIT(7))
472 else if (size > 1 && val & BIT(15))
476 /* All other cases has to be covered with the first switch() */
482 memset(reg, extend_val, extend_size);
483 memcpy(reg, &val, size);
487 static bool handle_in(struct pt_regs *regs, int size, int port)
489 struct tdx_module_args args = {
490 .r10 = TDX_HYPERCALL_STANDARD,
491 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
496 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
500 * Emulate the I/O read via hypercall. More info about ABI can be found
501 * in TDX Guest-Host-Communication Interface (GHCI) section titled
502 * "TDG.VP.VMCALL<Instruction.IO>".
504 success = !__tdx_hypercall(&args);
506 /* Update part of the register affected by the emulated instruction */
509 regs->ax |= args.r11 & mask;
514 static bool handle_out(struct pt_regs *regs, int size, int port)
516 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
519 * Emulate the I/O write via hypercall. More info about ABI can be found
520 * in TDX Guest-Host-Communication Interface (GHCI) section titled
521 * "TDG.VP.VMCALL<Instruction.IO>".
523 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
524 PORT_WRITE, port, regs->ax & mask);
528 * Emulate I/O using hypercall.
530 * Assumes the IO instruction was using ax, which is enforced
531 * by the standard io.h macros.
533 * Return True on success or False on failure.
535 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
537 u32 exit_qual = ve->exit_qual;
541 if (VE_IS_IO_STRING(exit_qual))
544 in = VE_IS_IO_IN(exit_qual);
545 size = VE_GET_IO_SIZE(exit_qual);
546 port = VE_GET_PORT_NUM(exit_qual);
550 ret = handle_in(regs, size, port);
552 ret = handle_out(regs, size, port);
556 return ve_instr_len(ve);
560 * Early #VE exception handler. Only handles a subset of port I/O.
561 * Intended only for earlyprintk. If failed, return false.
563 __init bool tdx_early_handle_ve(struct pt_regs *regs)
568 tdx_get_ve_info(&ve);
570 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
573 insn_len = handle_io(regs, &ve);
577 regs->ip += insn_len;
581 void tdx_get_ve_info(struct ve_info *ve)
583 struct tdx_module_args args = {};
586 * Called during #VE handling to retrieve the #VE info from the
589 * This has to be called early in #VE handling. A "nested" #VE which
590 * occurs before this will raise a #DF and is not recoverable.
592 * The call retrieves the #VE info from the TDX module, which also
593 * clears the "#VE valid" flag. This must be done before anything else
594 * because any #VE that occurs while the valid flag is set will lead to
597 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
598 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
600 tdcall(TDG_VP_VEINFO_GET, &args);
602 /* Transfer the output parameters */
603 ve->exit_reason = args.rcx;
604 ve->exit_qual = args.rdx;
607 ve->instr_len = lower_32_bits(args.r10);
608 ve->instr_info = upper_32_bits(args.r10);
612 * Handle the user initiated #VE.
614 * On success, returns the number of bytes RIP should be incremented (>=0)
615 * or -errno on error.
617 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
619 switch (ve->exit_reason) {
620 case EXIT_REASON_CPUID:
621 return handle_cpuid(regs, ve);
623 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
628 static inline bool is_private_gpa(u64 gpa)
630 return gpa == cc_mkenc(gpa);
634 * Handle the kernel #VE.
636 * On success, returns the number of bytes RIP should be incremented (>=0)
637 * or -errno on error.
639 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
641 switch (ve->exit_reason) {
642 case EXIT_REASON_HLT:
643 return handle_halt(ve);
644 case EXIT_REASON_MSR_READ:
645 return read_msr(regs, ve);
646 case EXIT_REASON_MSR_WRITE:
647 return write_msr(regs, ve);
648 case EXIT_REASON_CPUID:
649 return handle_cpuid(regs, ve);
650 case EXIT_REASON_EPT_VIOLATION:
651 if (is_private_gpa(ve->gpa))
652 panic("Unexpected EPT-violation on private memory.");
653 return handle_mmio(regs, ve);
654 case EXIT_REASON_IO_INSTRUCTION:
655 return handle_io(regs, ve);
657 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
662 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
667 insn_len = virt_exception_user(regs, ve);
669 insn_len = virt_exception_kernel(regs, ve);
673 /* After successful #VE handling, move the IP */
674 regs->ip += insn_len;
679 static bool tdx_tlb_flush_required(bool private)
682 * TDX guest is responsible for flushing TLB on private->shared
683 * transition. VMM is responsible for flushing on shared->private.
685 * The VMM _can't_ flush private addresses as it can't generate PAs
686 * with the guest's HKID. Shared memory isn't subject to integrity
687 * checking, i.e. the VMM doesn't need to flush for its own protection.
689 * There's no need to flush when converting from shared to private,
690 * as flushing is the VMM's responsibility in this case, e.g. it must
691 * flush to avoid integrity failures in the face of a buggy or
697 static bool tdx_cache_flush_required(void)
700 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
701 * TDX doesn't have such capability.
703 * Flush cache unconditionally.
709 * Notify the VMM about page mapping conversion. More info about ABI
710 * can be found in TDX Guest-Host-Communication Interface (GHCI),
711 * section "TDG.VP.VMCALL<MapGPA>".
713 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
715 /* Retrying the hypercall a second time should succeed; use 3 just in case */
716 const int max_retries_per_page = 3;
720 /* Set the shared (decrypted) bits: */
721 start |= cc_mkdec(0);
725 while (retry_count < max_retries_per_page) {
726 struct tdx_module_args args = {
727 .r10 = TDX_HYPERCALL_STANDARD,
728 .r11 = TDVMCALL_MAP_GPA,
730 .r13 = end - start };
733 u64 ret = __tdx_hypercall(&args);
735 if (ret != TDVMCALL_STATUS_RETRY)
738 * The guest must retry the operation for the pages in the
739 * region starting at the GPA specified in R11. R11 comes
740 * from the untrusted VMM. Sanity check it.
742 map_fail_paddr = args.r11;
743 if (map_fail_paddr < start || map_fail_paddr >= end)
746 /* "Consume" a retry without forward progress */
747 if (map_fail_paddr == start) {
752 start = map_fail_paddr;
760 * Inform the VMM of the guest's intent for this physical page: shared with
761 * the VMM or private to the guest. The VMM is expected to change its mapping
762 * of the page in response.
764 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
766 phys_addr_t start = __pa(vaddr);
767 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
769 if (!tdx_map_gpa(start, end, enc))
772 /* shared->private conversion requires memory to be accepted before use */
774 return tdx_accept_memory(start, end);
779 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
783 * Only handle shared->private conversion here.
784 * See the comment in tdx_early_init().
787 return tdx_enc_status_changed(vaddr, numpages, enc);
791 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
795 * Only handle private->shared conversion here.
796 * See the comment in tdx_early_init().
799 return tdx_enc_status_changed(vaddr, numpages, enc);
803 void __init tdx_early_init(void)
805 struct tdx_module_args args = {
806 .rdx = TDCS_NOTIFY_ENABLES,
812 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
814 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
817 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
819 /* TSC is the only reliable clock in TDX guest */
820 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
822 cc_vendor = CC_VENDOR_INTEL;
823 tdx_parse_tdinfo(&cc_mask);
824 cc_set_mask(cc_mask);
826 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
827 tdcall(TDG_VM_WR, &args);
830 * All bits above GPA width are reserved and kernel treats shared bit
831 * as flag, not as part of physical address.
833 * Adjust physical mask to only cover valid GPA bits.
835 physical_mask &= cc_mask - 1;
838 * The kernel mapping should match the TDX metadata for the page.
839 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
840 * owned by the caller and can catch even _momentary_ mismatches. Bad
841 * things happen on mismatch:
843 * - Private mapping => Shared Page == Guest shutdown
844 * - Shared mapping => Private Page == Recoverable #VE
846 * guest.enc_status_change_prepare() converts the page from
847 * shared=>private before the mapping becomes private.
849 * guest.enc_status_change_finish() converts the page from
850 * private=>shared after the mapping becomes private.
852 * In both cases there is a temporary shared mapping to a private page,
853 * which can result in a #VE. But, there is never a private mapping to
856 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
857 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
859 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
860 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
863 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
864 * bringup low level code. That raises #VE which cannot be handled
867 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
868 * implemented seperately in the low level startup ASM code.
869 * Until that is in place, disable parallel bringup for TDX.
871 x86_cpuinit.parallel_bringup = false;
873 pr_info("Guest detected\n");