1 /* SPDX-License-Identifier: GPL-2.0 */
3 #include <linux/linkage.h>
4 #include <asm/export.h>
10 /* Relative to %rbp. */
11 #define SGX_ENCLAVE_OFFSET_OF_RUN 16
13 /* The offsets relative to struct sgx_enclave_run. */
14 #define SGX_ENCLAVE_RUN_TCS 0
15 #define SGX_ENCLAVE_RUN_LEAF 8
16 #define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12
17 #define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14
18 #define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16
19 #define SGX_ENCLAVE_RUN_USER_HANDLER 24
20 #define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */
21 #define SGX_ENCLAVE_RUN_RESERVED_START 40
22 #define SGX_ENCLAVE_RUN_RESERVED_END 256
27 SYM_FUNC_START(__vdso_sgx_enter_enclave)
31 .cfi_adjust_cfa_offset 8
32 .cfi_rel_offset %rbp, 0
34 .cfi_def_cfa_register %rbp
36 .cfi_rel_offset %rbx, -8
40 /* EENTER <= function <= ERESUME */
46 mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
48 /* Validate that the reserved area contains only zeros. */
49 mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx
54 cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx
57 /* Load TCS and AEP */
58 mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
59 lea .Lasync_exit_pointer(%rip), %rcx
61 /* Single ENCLU serving as both EENTER and AEP (ERESUME) */
63 .Lenclu_eenter_eresume:
66 /* EEXIT jumps here unless the enclave is doing something fancy. */
67 mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
69 /* Set exit_reason. */
70 movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
72 /* Invoke userspace's exit handler if one was provided. */
74 cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
75 jne .Linvoke_userspace_handler
77 /* Success, in the sense that ENCLU was attempted. */
86 /* The out-of-line code runs with the pre-leave stack frame. */
94 mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
96 /* Set the exception info. */
97 mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
98 mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
99 mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
100 mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
103 .Linvoke_userspace_handler:
104 /* Pass the untrusted RSP (at exit) to the callback via %rcx. */
107 /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
110 /* Save the untrusted RSP offset in %rbx (non-volatile register). */
115 * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
116 * _after_ pushing the parameters on the stack, hence the bonus push.
121 /* Push struct sgx_enclave_exception as a param to the callback. */
124 /* Clear RFLAGS.DF per x86_64 ABI */
128 * Load the callback pointer to %rax and lfence for LVI (load value
129 * injection) protection before making the call.
131 mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
135 /* Undo the post-exit %rsp adjustment. */
136 lea 0x10(%rsp, %rbx), %rsp
139 * If the return from callback is zero or negative, return immediately,
140 * else re-execute ENCLU with the positive return value interpreted as
141 * the requested ENCLU function.
149 _ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
151 SYM_FUNC_END(__vdso_sgx_enter_enclave)