1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
5 #define pr_fmt(fmt) "tdx: " fmt
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
14 #include <asm/insn-eval.h>
15 #include <asm/pgtable.h>
17 /* TDX module Call Leaf IDs */
18 #define TDX_GET_INFO 1
19 #define TDX_GET_VEINFO 3
20 #define TDX_GET_REPORT 4
21 #define TDX_ACCEPT_PAGE 6
23 /* TDX hypercall Leaf IDs */
24 #define TDVMCALL_MAP_GPA 0x10001
30 /* Port I/O direction */
34 /* See Exit Qualification for I/O Instructions in VMX documentation */
35 #define VE_IS_IO_IN(e) ((e) & BIT(3))
36 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
37 #define VE_GET_PORT_NUM(e) ((e) >> 16)
38 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
40 #define ATTR_SEPT_VE_DISABLE BIT(28)
42 /* TDX Module call error codes */
43 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
44 #define TDCALL_INVALID_OPERAND 0xc0000100
46 #define TDREPORT_SUBTYPE_0 0
49 * Wrapper for standard use of __tdx_hypercall with no output aside from
52 static inline u64 _tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15)
54 struct tdx_hypercall_args args = {
55 .r10 = TDX_HYPERCALL_STANDARD,
63 return __tdx_hypercall(&args, 0);
66 /* Called from __tdx_hypercall() for unrecoverable failure */
67 void __tdx_hypercall_failed(void)
69 panic("TDVMCALL failed. TDX module bug?");
73 * The TDG.VP.VMCALL-Instruction-execution sub-functions are defined
74 * independently from but are currently matched 1:1 with VMX EXIT_REASONs.
75 * Reusing the KVM EXIT_REASON macros makes it easier to connect the host and
76 * guest sides of these calls.
78 static u64 hcall_func(u64 exit_reason)
83 #ifdef CONFIG_KVM_GUEST
84 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
85 unsigned long p3, unsigned long p4)
87 struct tdx_hypercall_args args = {
95 return __tdx_hypercall(&args, 0);
97 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
101 * Used for TDX guests to make calls directly to the TD module. This
102 * should only be used for calls that have no legitimate reason to fail
103 * or where the kernel can not survive the call failing.
105 static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
106 struct tdx_module_output *out)
108 if (__tdx_module_call(fn, rcx, rdx, r8, r9, out))
109 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
113 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
114 * subtype 0) using TDG.MR.REPORT TDCALL.
115 * @reportdata: Address of the input buffer which contains user-defined
116 * REPORTDATA to be included into TDREPORT.
117 * @tdreport: Address of the output buffer to store TDREPORT.
119 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
120 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
121 * It is used in the TDX guest driver module to get the TDREPORT0.
123 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
124 * other TDCALL failures.
126 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
130 ret = __tdx_module_call(TDX_GET_REPORT, virt_to_phys(tdreport),
131 virt_to_phys(reportdata), TDREPORT_SUBTYPE_0,
134 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
141 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
143 static void tdx_parse_tdinfo(u64 *cc_mask)
145 struct tdx_module_output out;
146 unsigned int gpa_width;
150 * TDINFO TDX module call is used to get the TD execution environment
151 * information like GPA width, number of available vcpus, debug mode
152 * information, etc. More details about the ABI can be found in TDX
153 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
156 tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out);
159 * The highest bit of a guest physical address is the "sharing" bit.
160 * Set it for shared pages and clear it for private pages.
162 * The GPA width that comes out of this call is critical. TDX guests
163 * can not meaningfully run without it.
165 gpa_width = out.rcx & GENMASK(5, 0);
166 *cc_mask = BIT_ULL(gpa_width - 1);
169 * The kernel can not handle #VE's when accessing normal kernel
170 * memory. Ensure that no #VE will be delivered for accesses to
171 * TD-private memory. Only VMM-shared memory (MMIO) will #VE.
174 if (!(td_attr & ATTR_SEPT_VE_DISABLE))
175 panic("TD misconfiguration: SEPT_VE_DISABLE attibute must be set.\n");
179 * The TDX module spec states that #VE may be injected for a limited set of
182 * - Emulation of the architectural #VE injection on EPT violation;
184 * - As a result of guest TD execution of a disallowed instruction,
185 * a disallowed MSR access, or CPUID virtualization;
187 * - A notification to the guest TD about anomalous behavior;
189 * The last one is opt-in and is not used by the kernel.
191 * The Intel Software Developer's Manual describes cases when instruction
192 * length field can be used in section "Information for VM Exits Due to
193 * Instruction Execution".
195 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
196 * information if #VE occurred due to instruction execution, but not for EPT
199 static int ve_instr_len(struct ve_info *ve)
201 switch (ve->exit_reason) {
202 case EXIT_REASON_HLT:
203 case EXIT_REASON_MSR_READ:
204 case EXIT_REASON_MSR_WRITE:
205 case EXIT_REASON_CPUID:
206 case EXIT_REASON_IO_INSTRUCTION:
207 /* It is safe to use ve->instr_len for #VE due instructions */
208 return ve->instr_len;
209 case EXIT_REASON_EPT_VIOLATION:
211 * For EPT violations, ve->insn_len is not defined. For those,
212 * the kernel must decode instructions manually and should not
213 * be using this function.
215 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
218 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
219 return ve->instr_len;
223 static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti)
225 struct tdx_hypercall_args args = {
226 .r10 = TDX_HYPERCALL_STANDARD,
227 .r11 = hcall_func(EXIT_REASON_HLT),
232 * Emulate HLT operation via hypercall. More info about ABI
233 * can be found in TDX Guest-Host-Communication Interface
234 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
236 * The VMM uses the "IRQ disabled" param to understand IRQ
237 * enabled status (RFLAGS.IF) of the TD guest and to determine
238 * whether or not it should schedule the halted vCPU if an
239 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
240 * can keep the vCPU in virtual HLT, even if an IRQ is
241 * pending, without hanging/breaking the guest.
243 return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0);
246 static int handle_halt(struct ve_info *ve)
249 * Since non safe halt is mainly used in CPU offlining
250 * and the guest will always stay in the halt state, don't
251 * call the STI instruction (set do_sti as false).
253 const bool irq_disabled = irqs_disabled();
254 const bool do_sti = false;
256 if (__halt(irq_disabled, do_sti))
259 return ve_instr_len(ve);
262 void __cpuidle tdx_safe_halt(void)
265 * For do_sti=true case, __tdx_hypercall() function enables
266 * interrupts using the STI instruction before the TDCALL. So
267 * set irq_disabled as false.
269 const bool irq_disabled = false;
270 const bool do_sti = true;
273 * Use WARN_ONCE() to report the failure.
275 if (__halt(irq_disabled, do_sti))
276 WARN_ONCE(1, "HLT instruction emulation failed\n");
278 /* XXX I can't make sense of what @do_sti actually does */
279 raw_local_irq_disable();
282 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
284 struct tdx_hypercall_args args = {
285 .r10 = TDX_HYPERCALL_STANDARD,
286 .r11 = hcall_func(EXIT_REASON_MSR_READ),
291 * Emulate the MSR read via hypercall. More info about ABI
292 * can be found in TDX Guest-Host-Communication Interface
293 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
295 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
298 regs->ax = lower_32_bits(args.r11);
299 regs->dx = upper_32_bits(args.r11);
300 return ve_instr_len(ve);
303 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
305 struct tdx_hypercall_args args = {
306 .r10 = TDX_HYPERCALL_STANDARD,
307 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
309 .r13 = (u64)regs->dx << 32 | regs->ax,
313 * Emulate the MSR write via hypercall. More info about ABI
314 * can be found in TDX Guest-Host-Communication Interface
315 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
317 if (__tdx_hypercall(&args, 0))
320 return ve_instr_len(ve);
323 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
325 struct tdx_hypercall_args args = {
326 .r10 = TDX_HYPERCALL_STANDARD,
327 .r11 = hcall_func(EXIT_REASON_CPUID),
333 * Only allow VMM to control range reserved for hypervisor
336 * Return all-zeros for any CPUID outside the range. It matches CPU
337 * behaviour for non-supported leaf.
339 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
340 regs->ax = regs->bx = regs->cx = regs->dx = 0;
341 return ve_instr_len(ve);
345 * Emulate the CPUID instruction via a hypercall. More info about
346 * ABI can be found in TDX Guest-Host-Communication Interface
347 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
349 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
353 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
354 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
355 * So copy the register contents back to pt_regs.
362 return ve_instr_len(ve);
365 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
367 struct tdx_hypercall_args args = {
368 .r10 = TDX_HYPERCALL_STANDARD,
369 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
376 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
382 static bool mmio_write(int size, unsigned long addr, unsigned long val)
384 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
385 EPT_WRITE, addr, val);
388 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
390 unsigned long *reg, val, vaddr;
391 char buffer[MAX_INSN_SIZE];
392 struct insn insn = {};
394 int size, extend_size;
397 /* Only in-kernel MMIO is supported */
398 if (WARN_ON_ONCE(user_mode(regs)))
401 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
404 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
407 mmio = insn_decode_mmio(&insn, &size);
408 if (WARN_ON_ONCE(mmio == MMIO_DECODE_FAILED))
411 if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
412 reg = insn_get_modrm_reg_ptr(&insn, regs);
418 * Reject EPT violation #VEs that split pages.
420 * MMIO accesses are supposed to be naturally aligned and therefore
421 * never cross page boundaries. Seeing split page accesses indicates
422 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
424 * load_unaligned_zeropad() will recover using exception fixups.
426 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
427 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
430 /* Handle writes first */
433 memcpy(&val, reg, size);
434 if (!mmio_write(size, ve->gpa, val))
438 val = insn.immediate.value;
439 if (!mmio_write(size, ve->gpa, val))
443 case MMIO_READ_ZERO_EXTEND:
444 case MMIO_READ_SIGN_EXTEND:
445 /* Reads are handled below */
448 case MMIO_DECODE_FAILED:
450 * MMIO was accessed with an instruction that could not be
451 * decoded or handled properly. It was likely not using io.h
452 * helpers or accessed MMIO accidentally.
456 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
461 if (!mmio_read(size, ve->gpa, &val))
466 /* Zero-extend for 32-bit operation */
467 extend_size = size == 4 ? sizeof(*reg) : 0;
469 case MMIO_READ_ZERO_EXTEND:
470 /* Zero extend based on operand size */
471 extend_size = insn.opnd_bytes;
473 case MMIO_READ_SIGN_EXTEND:
474 /* Sign extend based on operand size */
475 extend_size = insn.opnd_bytes;
476 if (size == 1 && val & BIT(7))
478 else if (size > 1 && val & BIT(15))
482 /* All other cases has to be covered with the first switch() */
488 memset(reg, extend_val, extend_size);
489 memcpy(reg, &val, size);
493 static bool handle_in(struct pt_regs *regs, int size, int port)
495 struct tdx_hypercall_args args = {
496 .r10 = TDX_HYPERCALL_STANDARD,
497 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
502 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
506 * Emulate the I/O read via hypercall. More info about ABI can be found
507 * in TDX Guest-Host-Communication Interface (GHCI) section titled
508 * "TDG.VP.VMCALL<Instruction.IO>".
510 success = !__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT);
512 /* Update part of the register affected by the emulated instruction */
515 regs->ax |= args.r11 & mask;
520 static bool handle_out(struct pt_regs *regs, int size, int port)
522 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
525 * Emulate the I/O write via hypercall. More info about ABI can be found
526 * in TDX Guest-Host-Communication Interface (GHCI) section titled
527 * "TDG.VP.VMCALL<Instruction.IO>".
529 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
530 PORT_WRITE, port, regs->ax & mask);
534 * Emulate I/O using hypercall.
536 * Assumes the IO instruction was using ax, which is enforced
537 * by the standard io.h macros.
539 * Return True on success or False on failure.
541 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
543 u32 exit_qual = ve->exit_qual;
547 if (VE_IS_IO_STRING(exit_qual))
550 in = VE_IS_IO_IN(exit_qual);
551 size = VE_GET_IO_SIZE(exit_qual);
552 port = VE_GET_PORT_NUM(exit_qual);
556 ret = handle_in(regs, size, port);
558 ret = handle_out(regs, size, port);
562 return ve_instr_len(ve);
566 * Early #VE exception handler. Only handles a subset of port I/O.
567 * Intended only for earlyprintk. If failed, return false.
569 __init bool tdx_early_handle_ve(struct pt_regs *regs)
574 tdx_get_ve_info(&ve);
576 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
579 insn_len = handle_io(regs, &ve);
583 regs->ip += insn_len;
587 void tdx_get_ve_info(struct ve_info *ve)
589 struct tdx_module_output out;
592 * Called during #VE handling to retrieve the #VE info from the
595 * This has to be called early in #VE handling. A "nested" #VE which
596 * occurs before this will raise a #DF and is not recoverable.
598 * The call retrieves the #VE info from the TDX module, which also
599 * clears the "#VE valid" flag. This must be done before anything else
600 * because any #VE that occurs while the valid flag is set will lead to
603 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
604 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
606 tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out);
608 /* Transfer the output parameters */
609 ve->exit_reason = out.rcx;
610 ve->exit_qual = out.rdx;
613 ve->instr_len = lower_32_bits(out.r10);
614 ve->instr_info = upper_32_bits(out.r10);
618 * Handle the user initiated #VE.
620 * On success, returns the number of bytes RIP should be incremented (>=0)
621 * or -errno on error.
623 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
625 switch (ve->exit_reason) {
626 case EXIT_REASON_CPUID:
627 return handle_cpuid(regs, ve);
629 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
635 * Handle the kernel #VE.
637 * On success, returns the number of bytes RIP should be incremented (>=0)
638 * or -errno on error.
640 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
642 switch (ve->exit_reason) {
643 case EXIT_REASON_HLT:
644 return handle_halt(ve);
645 case EXIT_REASON_MSR_READ:
646 return read_msr(regs, ve);
647 case EXIT_REASON_MSR_WRITE:
648 return write_msr(regs, ve);
649 case EXIT_REASON_CPUID:
650 return handle_cpuid(regs, ve);
651 case EXIT_REASON_EPT_VIOLATION:
652 return handle_mmio(regs, ve);
653 case EXIT_REASON_IO_INSTRUCTION:
654 return handle_io(regs, ve);
656 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
661 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
666 insn_len = virt_exception_user(regs, ve);
668 insn_len = virt_exception_kernel(regs, ve);
672 /* After successful #VE handling, move the IP */
673 regs->ip += insn_len;
678 static bool tdx_tlb_flush_required(bool private)
681 * TDX guest is responsible for flushing TLB on private->shared
682 * transition. VMM is responsible for flushing on shared->private.
684 * The VMM _can't_ flush private addresses as it can't generate PAs
685 * with the guest's HKID. Shared memory isn't subject to integrity
686 * checking, i.e. the VMM doesn't need to flush for its own protection.
688 * There's no need to flush when converting from shared to private,
689 * as flushing is the VMM's responsibility in this case, e.g. it must
690 * flush to avoid integrity failures in the face of a buggy or
696 static bool tdx_cache_flush_required(void)
699 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
700 * TDX doesn't have such capability.
702 * Flush cache unconditionally.
707 static bool try_accept_one(phys_addr_t *start, unsigned long len,
708 enum pg_level pg_level)
710 unsigned long accept_size = page_level_size(pg_level);
714 if (!IS_ALIGNED(*start, accept_size))
717 if (len < accept_size)
721 * Pass the page physical address to the TDX module to accept the
722 * pending, private page.
724 * Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G.
740 tdcall_rcx = *start | page_size;
741 if (__tdx_module_call(TDX_ACCEPT_PAGE, tdcall_rcx, 0, 0, 0, NULL))
744 *start += accept_size;
749 * Inform the VMM of the guest's intent for this physical page: shared with
750 * the VMM or private to the guest. The VMM is expected to change its mapping
751 * of the page in response.
753 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
755 phys_addr_t start = __pa(vaddr);
756 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
759 /* Set the shared (decrypted) bits: */
760 start |= cc_mkdec(0);
765 * Notify the VMM about page mapping conversion. More info about ABI
766 * can be found in TDX Guest-Host-Communication Interface (GHCI),
767 * section "TDG.VP.VMCALL<MapGPA>"
769 if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
772 /* private->shared conversion requires only MapGPA call */
777 * For shared->private conversion, accept the page using
778 * TDX_ACCEPT_PAGE TDX module call.
780 while (start < end) {
781 unsigned long len = end - start;
784 * Try larger accepts first. It gives chance to VMM to keep
785 * 1G/2M SEPT entries where possible and speeds up process by
786 * cutting number of hypercalls (if successful).
789 if (try_accept_one(&start, len, PG_LEVEL_1G))
792 if (try_accept_one(&start, len, PG_LEVEL_2M))
795 if (!try_accept_one(&start, len, PG_LEVEL_4K))
802 void __init tdx_early_init(void)
807 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
809 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
812 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
814 cc_set_vendor(CC_VENDOR_INTEL);
815 tdx_parse_tdinfo(&cc_mask);
816 cc_set_mask(cc_mask);
819 * All bits above GPA width are reserved and kernel treats shared bit
820 * as flag, not as part of physical address.
822 * Adjust physical mask to only cover valid GPA bits.
824 physical_mask &= cc_mask - 1;
826 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
827 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
828 x86_platform.guest.enc_status_change_finish = tdx_enc_status_changed;
830 pr_info("Guest detected\n");