1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/jump_label.h>
3 #include <asm/unwind_hints.h>
4 #include <asm/cpufeatures.h>
5 #include <asm/page_types.h>
6 #include <asm/percpu.h>
7 #include <asm/asm-offsets.h>
8 #include <asm/processor-flags.h>
9 #include <asm/ptrace-abi.h>
13 x86 function call convention, 64-bit:
14 -------------------------------------
15 arguments | callee-saved | extra caller-saved | return
16 [callee-clobbered] | | [callee-clobbered] |
17 ---------------------------------------------------------------------------
18 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
20 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
21 functions when it sees tail-call optimization possibilities) rflags is
22 clobbered. Leftover arguments are passed over the stack frame.)
24 [*] In the frame-pointers case rbp is fixed to the stack frame.
26 [**] for struct return values wider than 64 bits the return convention is a
27 bit more complex: up to 128 bits width we return small structures
28 straight in rax, rdx. For structures larger than that (3 words or
29 larger) the caller puts a pointer to an on-stack return struct
30 [allocated in the caller's stack frame] into the first argument - i.e.
31 into rdi. All other arguments shift up by one in this case.
32 Fortunately this case is rare in the kernel.
34 For 32-bit we have the following conventions - kernel is built with
35 -mregparm=3 and -freg-struct-return:
37 x86 function calling convention, 32-bit:
38 ----------------------------------------
39 arguments | callee-saved | extra caller-saved | return
40 [callee-clobbered] | | [callee-clobbered] |
41 -------------------------------------------------------------------------
42 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
44 ( here too esp is obviously invariant across normal function calls. eflags
45 is clobbered. Leftover arguments are passed over the stack frame. )
47 [*] In the frame-pointers case ebp is fixed to the stack frame.
49 [**] We build with -freg-struct-return, which on 32-bit means similar
50 semantics as on 64-bit: edx can be used for a second return value
51 (i.e. covering integer and structure sizes up to 64 bits) - after that
52 it gets more complex and more expensive: 3-word or larger struct returns
53 get done in the caller's frame and the pointer to the return struct goes
54 into regparm0, i.e. eax - the other arguments shift up and the
55 function's register parameters degenerate to regparm=2 in essence.
62 * 64-bit system call stack frame layout defines and helpers,
66 .macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
68 pushq %rsi /* pt_regs->si */
69 movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
70 movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
72 pushq %rdi /* pt_regs->di */
73 pushq %rsi /* pt_regs->si */
75 pushq \rdx /* pt_regs->dx */
76 pushq \rcx /* pt_regs->cx */
77 pushq \rax /* pt_regs->ax */
78 pushq %r8 /* pt_regs->r8 */
79 pushq %r9 /* pt_regs->r9 */
80 pushq %r10 /* pt_regs->r10 */
81 pushq %r11 /* pt_regs->r11 */
82 pushq %rbx /* pt_regs->rbx */
83 pushq %rbp /* pt_regs->rbp */
84 pushq %r12 /* pt_regs->r12 */
85 pushq %r13 /* pt_regs->r13 */
86 pushq %r14 /* pt_regs->r14 */
87 pushq %r15 /* pt_regs->r15 */
91 pushq %rsi /* return address on top of stack */
97 * Sanitize registers of values that a speculation attack might
98 * otherwise want to exploit. The lower registers are likely clobbered
99 * well before they could be put to use in a speculative execution
102 xorl %esi, %esi /* nospec si */
103 xorl %edx, %edx /* nospec dx */
104 xorl %ecx, %ecx /* nospec cx */
105 xorl %r8d, %r8d /* nospec r8 */
106 xorl %r9d, %r9d /* nospec r9 */
107 xorl %r10d, %r10d /* nospec r10 */
108 xorl %r11d, %r11d /* nospec r11 */
109 xorl %ebx, %ebx /* nospec rbx */
110 xorl %ebp, %ebp /* nospec rbp */
111 xorl %r12d, %r12d /* nospec r12 */
112 xorl %r13d, %r13d /* nospec r13 */
113 xorl %r14d, %r14d /* nospec r14 */
114 xorl %r15d, %r15d /* nospec r15 */
118 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
119 PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret
123 .macro POP_REGS pop_rdi=1
143 #ifdef CONFIG_PAGE_TABLE_ISOLATION
146 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
149 #define PTI_USER_PGTABLE_BIT PAGE_SHIFT
150 #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
151 #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
152 #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
153 #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
155 .macro SET_NOFLUSH_BIT reg:req
156 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
159 .macro ADJUST_KERNEL_CR3 reg:req
160 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
161 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
162 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
165 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
166 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
167 mov %cr3, \scratch_reg
168 ADJUST_KERNEL_CR3 \scratch_reg
169 mov \scratch_reg, %cr3
173 #define THIS_CPU_user_pcid_flush_mask \
174 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
176 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
177 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
178 mov %cr3, \scratch_reg
180 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
183 * Test if the ASID needs a flush.
185 movq \scratch_reg, \scratch_reg2
186 andq $(0x7FF), \scratch_reg /* mask ASID */
187 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
190 /* Flush needed, clear the bit */
191 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
192 movq \scratch_reg2, \scratch_reg
196 movq \scratch_reg2, \scratch_reg
197 SET_NOFLUSH_BIT \scratch_reg
200 /* Flip the ASID to the user version */
201 orq $(PTI_USER_PCID_MASK), \scratch_reg
204 /* Flip the PGD to the user version */
205 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
206 mov \scratch_reg, %cr3
210 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
212 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
216 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
217 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
218 movq %cr3, \scratch_reg
219 movq \scratch_reg, \save_reg
221 * Test the user pagetable bit. If set, then the user page tables
222 * are active. If clear CR3 already has the kernel page table
225 bt $PTI_USER_PGTABLE_BIT, \scratch_reg
228 ADJUST_KERNEL_CR3 \scratch_reg
229 movq \scratch_reg, %cr3
234 .macro RESTORE_CR3 scratch_reg:req save_reg:req
235 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
237 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
240 * KERNEL pages can always resume with NOFLUSH as we do
243 bt $PTI_USER_PGTABLE_BIT, \save_reg
247 * Check if there's a pending flush for the user ASID we're
250 movq \save_reg, \scratch_reg
251 andq $(0x7FF), \scratch_reg
252 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
255 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
259 SET_NOFLUSH_BIT \save_reg
263 * The CR3 write could be avoided when not changing its value,
264 * but would require a CR3 read *and* a scratch register.
270 #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
272 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
274 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
276 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
278 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
280 .macro RESTORE_CR3 scratch_reg:req save_reg:req
286 * Mitigate Spectre v1 for conditional swapgs code paths.
288 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
289 * prevent a speculative swapgs when coming from kernel space.
291 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
292 * to prevent the swapgs from getting speculatively skipped when coming from
295 .macro FENCE_SWAPGS_USER_ENTRY
296 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
298 .macro FENCE_SWAPGS_KERNEL_ENTRY
299 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
302 .macro STACKLEAK_ERASE_NOCLOBBER
303 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
310 .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
312 GET_PERCPU_BASE \scratch_reg
313 wrgsbase \scratch_reg
316 #else /* CONFIG_X86_64 */
317 # undef UNWIND_HINT_IRET_REGS
318 # define UNWIND_HINT_IRET_REGS
319 #endif /* !CONFIG_X86_64 */
321 .macro STACKLEAK_ERASE
322 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
330 * CPU/node NR is loaded from the limit (size) field of a special segment
331 * descriptor entry in GDT.
333 .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
334 movq $__CPUNODE_SEG, \reg
339 * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
340 * We normally use %gs for accessing per-CPU data, but we are setting up
341 * %gs here and obviously can not use %gs itself to access per-CPU data.
343 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
344 * may not restore the host's value until the CPU returns to userspace.
345 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
346 * while running KVM's run loop.
348 .macro GET_PERCPU_BASE reg:req
349 LOAD_CPU_AND_NODE_SEG_LIMIT \reg
350 andq $VDSO_CPUNODE_MASK, \reg
351 movq __per_cpu_offset(, \reg, 8), \reg
356 .macro GET_PERCPU_BASE reg:req
357 movq pcpu_unit_offsets(%rip), \reg
360 #endif /* CONFIG_SMP */