1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86_64/entry.S
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
9 * entry.S contains the system-call and fault low-level handling routines.
11 * Some of this is documented in Documentation/x86/entry_64.rst
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
15 * at the top of the kernel process stack.
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
21 #include <linux/linkage.h>
22 #include <asm/segment.h>
23 #include <asm/cache.h>
24 #include <asm/errno.h>
25 #include <asm/asm-offsets.h>
27 #include <asm/unistd.h>
28 #include <asm/thread_info.h>
29 #include <asm/hw_irq.h>
30 #include <asm/page_types.h>
31 #include <asm/irqflags.h>
32 #include <asm/paravirt.h>
33 #include <asm/percpu.h>
36 #include <asm/pgtable_types.h>
37 #include <asm/export.h>
38 #include <asm/frame.h>
39 #include <asm/trapnr.h>
40 #include <asm/nospec-branch.h>
41 #include <asm/fsgsbase.h>
42 #include <linux/err.h>
47 .section .entry.text, "ax"
49 #ifdef CONFIG_PARAVIRT
50 SYM_CODE_START(native_usergs_sysret64)
54 SYM_CODE_END(native_usergs_sysret64)
55 #endif /* CONFIG_PARAVIRT */
58 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
60 * This is the only entry point used for 64-bit system calls. The
61 * hardware interface is reasonably well designed and the register to
62 * argument mapping Linux uses fits well with the registers that are
63 * available when SYSCALL is used.
65 * SYSCALL instructions can be found inlined in libc implementations as
66 * well as some other programs and libraries. There are also a handful
67 * of SYSCALL instructions in the vDSO used, for example, as a
68 * clock_gettimeofday fallback.
70 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
71 * then loads new ss, cs, and rip from previously programmed MSRs.
72 * rflags gets masked by a value from another MSR (so CLD and CLAC
73 * are not needed). SYSCALL does not save anything on the stack
74 * and does not change rsp.
77 * rax system call number
79 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
83 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
86 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
88 * Only called from user space.
90 * When user can change pt_regs->foo always force IRET. That is because
91 * it deals with uncanonical addresses better. SYSRET has trouble
92 * with them due to bugs in both AMD and Intel CPUs.
95 SYM_CODE_START(entry_SYSCALL_64)
99 /* tss.sp2 is scratch space. */
100 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
101 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
102 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
104 /* Construct struct pt_regs on stack */
105 pushq $__USER_DS /* pt_regs->ss */
106 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
107 pushq %r11 /* pt_regs->flags */
108 pushq $__USER_CS /* pt_regs->cs */
109 pushq %rcx /* pt_regs->ip */
110 SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
111 pushq %rax /* pt_regs->orig_ax */
113 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
118 call do_syscall_64 /* returns with IRQs disabled */
121 * Try to use SYSRET instead of IRET if we're returning to
122 * a completely clean 64-bit userspace context. If we're not,
123 * go to the slow exit path.
128 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */
129 jne swapgs_restore_regs_and_return_to_usermode
132 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
133 * in kernel space. This essentially lets the user take over
134 * the kernel, since userspace controls RSP.
136 * If width of "canonical tail" ever becomes variable, this will need
137 * to be updated to remain correct on both old and new CPUs.
139 * Change top bits to match most significant bit (47th or 56th bit
140 * depending on paging mode) in the address.
142 #ifdef CONFIG_X86_5LEVEL
143 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
144 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
146 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
147 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
150 /* If this changed %rcx, it was not canonical */
152 jne swapgs_restore_regs_and_return_to_usermode
154 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
155 jne swapgs_restore_regs_and_return_to_usermode
158 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
159 jne swapgs_restore_regs_and_return_to_usermode
162 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
163 * restore RF properly. If the slowpath sets it for whatever reason, we
164 * need to restore it correctly.
166 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
167 * trap from userspace immediately after SYSRET. This would cause an
168 * infinite loop whenever #DB happens with register state that satisfies
169 * the opportunistic SYSRET conditions. For example, single-stepping
172 * movq $stuck_here, %rcx
177 * would never get past 'stuck_here'.
179 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
180 jnz swapgs_restore_regs_and_return_to_usermode
182 /* nothing to check for RSP */
184 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
185 jne swapgs_restore_regs_and_return_to_usermode
188 * We win! This label is here just for ease of understanding
189 * perf profiles. Nothing jumps here.
191 syscall_return_via_sysret:
192 /* rcx and r11 are already restored (see code above) */
193 POP_REGS pop_rdi=0 skip_r11rcx=1
196 * Now all regs are restored except RSP and RDI.
197 * Save old stack pointer and switch to trampoline stack.
200 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
203 pushq RSP-RDI(%rdi) /* RSP */
204 pushq (%rdi) /* RDI */
207 * We are on the trampoline stack. All regs except RDI are live.
208 * We can do future final exit work right here.
210 STACKLEAK_ERASE_NOCLOBBER
212 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
217 SYM_CODE_END(entry_SYSCALL_64)
223 .pushsection .text, "ax"
224 SYM_FUNC_START(__switch_to_asm)
226 * Save callee-saved registers
227 * This must match the order in inactive_task_frame
237 movq %rsp, TASK_threadsp(%rdi)
238 movq TASK_threadsp(%rsi), %rsp
240 #ifdef CONFIG_STACKPROTECTOR
241 movq TASK_stack_canary(%rsi), %rbx
242 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
245 #ifdef CONFIG_RETPOLINE
247 * When switching from a shallower to a deeper call stack
248 * the RSB may either underflow or use entries populated
249 * with userspace addresses. On CPUs where those concerns
250 * exist, overwrite the RSB with entries which capture
251 * speculative execution to prevent attack.
253 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
256 /* restore callee-saved registers */
265 SYM_FUNC_END(__switch_to_asm)
269 * A newly forked process directly context switches into this address.
271 * rax: prev task we switched from
272 * rbx: kernel thread func (NULL for user thread)
273 * r12: kernel thread arg
275 .pushsection .text, "ax"
276 SYM_CODE_START(ret_from_fork)
279 call schedule_tail /* rdi: 'prev' task parameter */
281 testq %rbx, %rbx /* from kernel_thread? */
282 jnz 1f /* kernel threads are uncommon */
287 call syscall_exit_to_user_mode /* returns with IRQs disabled */
288 jmp swapgs_restore_regs_and_return_to_usermode
296 * A kernel thread is allowed to return here after successfully
297 * calling kernel_execve(). Exit to userspace to complete the execve()
302 SYM_CODE_END(ret_from_fork)
305 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
306 #ifdef CONFIG_DEBUG_ENTRY
309 testl $X86_EFLAGS_IF, %eax
318 * idtentry_body - Macro to emit code calling the C function
319 * @cfunc: C function to be called
320 * @has_error_code: Hardware pushed error code on stack
322 .macro idtentry_body cfunc has_error_code:req
327 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
329 .if \has_error_code == 1
330 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
331 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
340 * idtentry - Macro to generate entry stubs for simple IDT entries
341 * @vector: Vector number
342 * @asmsym: ASM symbol for the entry point
343 * @cfunc: C function to be called
344 * @has_error_code: Hardware pushed error code on stack
346 * The macro emits code to set up the kernel context for straight forward
347 * and simple IDT entries. No IST stack, no paranoid entry checks.
349 .macro idtentry vector asmsym cfunc has_error_code:req
350 SYM_CODE_START(\asmsym)
351 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
354 .if \has_error_code == 0
355 pushq $-1 /* ORIG_RAX: no syscall to restart */
358 .if \vector == X86_TRAP_BP
360 * If coming from kernel space, create a 6-word gap to allow the
361 * int3 handler to emulate a call instruction.
363 testb $3, CS-ORIG_RAX(%rsp)
364 jnz .Lfrom_usermode_no_gap_\@
368 UNWIND_HINT_IRET_REGS offset=8
369 .Lfrom_usermode_no_gap_\@:
372 idtentry_body \cfunc \has_error_code
374 _ASM_NOKPROBE(\asmsym)
375 SYM_CODE_END(\asmsym)
379 * Interrupt entry/exit.
381 + The interrupt stubs push (vector) onto the stack, which is the error_code
382 * position of idtentry exceptions, and jump to one of the two idtentry points
385 * common_interrupt is a hotpath, align it to a cache line
387 .macro idtentry_irq vector cfunc
388 .p2align CONFIG_X86_L1_CACHE_SHIFT
389 idtentry \vector asm_\cfunc \cfunc has_error_code=1
393 * System vectors which invoke their handlers directly and are not
394 * going through the regular common device interrupt handling code.
396 .macro idtentry_sysvec vector cfunc
397 idtentry \vector asm_\cfunc \cfunc has_error_code=0
401 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
402 * @vector: Vector number
403 * @asmsym: ASM symbol for the entry point
404 * @cfunc: C function to be called
406 * The macro emits code to set up the kernel context for #MC and #DB
408 * If the entry comes from user space it uses the normal entry path
409 * including the return to user space work and preemption checks on
412 * If hits in kernel mode then it needs to go through the paranoid
413 * entry as the exception can hit any random state. No preemption
414 * check on exit to keep the paranoid path simple.
416 .macro idtentry_mce_db vector asmsym cfunc
417 SYM_CODE_START(\asmsym)
418 UNWIND_HINT_IRET_REGS
421 pushq $-1 /* ORIG_RAX: no syscall to restart */
424 * If the entry is from userspace, switch stacks and treat it as
427 testb $3, CS-ORIG_RAX(%rsp)
428 jnz .Lfrom_usermode_switch_stack_\@
430 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
435 movq %rsp, %rdi /* pt_regs pointer */
441 /* Switch to the regular task stack and use the noist entry point */
442 .Lfrom_usermode_switch_stack_\@:
443 idtentry_body noist_\cfunc, has_error_code=0
445 _ASM_NOKPROBE(\asmsym)
446 SYM_CODE_END(\asmsym)
450 * Double fault entry. Straight paranoid. No checks from which context
451 * this comes because for the espfix induced #DF this would do the wrong
454 .macro idtentry_df vector asmsym cfunc
455 SYM_CODE_START(\asmsym)
456 UNWIND_HINT_IRET_REGS offset=8
459 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
463 movq %rsp, %rdi /* pt_regs pointer into first argument */
464 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
465 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
470 _ASM_NOKPROBE(\asmsym)
471 SYM_CODE_END(\asmsym)
475 * Include the defines which emit the idt entries which are shared
476 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
477 * so the stacktrace boundary checks work.
480 .globl __irqentry_text_start
481 __irqentry_text_start:
483 #include <asm/idtentry.h>
486 .globl __irqentry_text_end
489 SYM_CODE_START_LOCAL(common_interrupt_return)
490 SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
491 #ifdef CONFIG_DEBUG_ENTRY
492 /* Assert that pt_regs indicates user mode. */
501 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
502 * Save old stack pointer and switch to trampoline stack.
505 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
508 /* Copy the IRET frame to the trampoline stack. */
509 pushq 6*8(%rdi) /* SS */
510 pushq 5*8(%rdi) /* RSP */
511 pushq 4*8(%rdi) /* EFLAGS */
512 pushq 3*8(%rdi) /* CS */
513 pushq 2*8(%rdi) /* RIP */
515 /* Push user RDI on the trampoline stack. */
519 * We are on the trampoline stack. All regs except RDI are live.
520 * We can do future final exit work right here.
522 STACKLEAK_ERASE_NOCLOBBER
524 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
532 SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
533 #ifdef CONFIG_DEBUG_ENTRY
534 /* Assert that pt_regs indicates kernel mode. */
541 addq $8, %rsp /* skip regs->orig_ax */
543 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
544 * when returning from IPI handler.
548 SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
549 UNWIND_HINT_IRET_REGS
551 * Are we returning to a stack segment from the LDT? Note: in
552 * 64-bit mode SS:RSP on the exception stack is always valid.
554 #ifdef CONFIG_X86_ESPFIX64
555 testb $4, (SS-RIP)(%rsp)
556 jnz native_irq_return_ldt
559 SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
561 * This may fault. Non-paranoid faults on return to userspace are
562 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
563 * Double-faults due to espfix64 are handled in exc_double_fault.
564 * Other faults here are fatal.
568 #ifdef CONFIG_X86_ESPFIX64
569 native_irq_return_ldt:
571 * We are running with user GSBASE. All GPRs contain their user
572 * values. We have a percpu ESPFIX stack that is eight slots
573 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
574 * of the ESPFIX stack.
576 * We clobber RAX and RDI in this code. We stash RDI on the
577 * normal stack and RAX on the ESPFIX stack.
579 * The ESPFIX stack layout we set up looks like this:
581 * --- top of ESPFIX stack ---
586 * RIP <-- RSP points here when we're done
587 * RAX <-- espfix_waddr points here
588 * --- bottom of ESPFIX stack ---
591 pushq %rdi /* Stash user RDI */
592 SWAPGS /* to kernel GS */
593 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
595 movq PER_CPU_VAR(espfix_waddr), %rdi
596 movq %rax, (0*8)(%rdi) /* user RAX */
597 movq (1*8)(%rsp), %rax /* user RIP */
598 movq %rax, (1*8)(%rdi)
599 movq (2*8)(%rsp), %rax /* user CS */
600 movq %rax, (2*8)(%rdi)
601 movq (3*8)(%rsp), %rax /* user RFLAGS */
602 movq %rax, (3*8)(%rdi)
603 movq (5*8)(%rsp), %rax /* user SS */
604 movq %rax, (5*8)(%rdi)
605 movq (4*8)(%rsp), %rax /* user RSP */
606 movq %rax, (4*8)(%rdi)
607 /* Now RAX == RSP. */
609 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
612 * espfix_stack[31:16] == 0. The page tables are set up such that
613 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
614 * espfix_waddr for any X. That is, there are 65536 RO aliases of
615 * the same page. Set up RSP so that RSP[31:16] contains the
616 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
617 * still points to an RO alias of the ESPFIX stack.
619 orq PER_CPU_VAR(espfix_stack), %rax
621 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
622 SWAPGS /* to user GS */
623 popq %rdi /* Restore user RDI */
626 UNWIND_HINT_IRET_REGS offset=8
629 * At this point, we cannot write to the stack any more, but we can
632 popq %rax /* Restore user RAX */
635 * RSP now points to an ordinary IRET frame, except that the page
636 * is read-only and RSP[31:16] are preloaded with the userspace
637 * values. We can now IRET back to userspace.
639 jmp native_irq_return_iret
641 SYM_CODE_END(common_interrupt_return)
642 _ASM_NOKPROBE(common_interrupt_return)
645 * Reload gs selector with exception handling
648 * Is in entry.text as it shouldn't be instrumented.
650 SYM_FUNC_START(asm_load_gs_index)
655 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
659 SYM_FUNC_END(asm_load_gs_index)
660 EXPORT_SYMBOL(asm_load_gs_index)
662 _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
663 .section .fixup, "ax"
664 /* running with kernelgs */
665 SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
666 swapgs /* switch back to user gs */
668 /* This can't be a string because the preprocessor needs to see it. */
669 movl $__USER_DS, %eax
672 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
676 SYM_CODE_END(.Lbad_gs)
680 * rdi: New stack pointer points to the top word of the stack
681 * rsi: Function pointer
682 * rdx: Function argument (can be NULL if none)
684 SYM_FUNC_START(asm_call_on_stack)
686 * Save the frame pointer unconditionally. This allows the ORC
687 * unwinder to handle the stack switch.
693 * The unwinder relies on the word at the top of the new stack
694 * page linking back to the previous RSP.
698 /* Move the argument to the right place */
702 .pushsection .discard.instr_begin
709 .pushsection .discard.instr_end
713 /* Restore the previous stack pointer from RBP. */
716 SYM_FUNC_END(asm_call_on_stack)
720 * A note on the "critical region" in our callback handler.
721 * We want to avoid stacking callback handlers due to events occurring
722 * during handling of the last event. To do this, we keep events disabled
723 * until we've done all processing. HOWEVER, we must enable events before
724 * popping the stack frame (can't be done atomically) and so it would still
725 * be possible to get enough handler activations to overflow the stack.
726 * Although unlikely, bugs of that kind are hard to track down, so we'd
727 * like to avoid the possibility.
728 * So, on entry to the handler we detect whether we interrupted an
729 * existing activation in its critical region -- if so, we pop the current
730 * activation and restart the handler using the previous one.
732 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
734 SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
737 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
738 * see the correct pointer to the pt_regs
741 movq %rdi, %rsp /* we don't return, adjust the stack frame */
744 call xen_pv_evtchn_do_upcall
747 SYM_CODE_END(exc_xen_hypervisor_callback)
750 * Hypervisor uses this for application faults while it executes.
751 * We get here for two reasons:
752 * 1. Fault while reloading DS, ES, FS or GS
753 * 2. Fault while executing IRET
754 * Category 1 we do not need to fix up as Xen has already reloaded all segment
755 * registers that could be reloaded and zeroed the others.
756 * Category 2 we fix up by killing the current process. We cannot use the
757 * normal Linux return path in this case because if we use the IRET hypercall
758 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
759 * We distinguish between categories by comparing each saved segment register
760 * with its current contents: any discrepancy means we in category 1.
762 SYM_CODE_START(xen_failsafe_callback)
776 /* All segments match their saved values => Category 2 (Bad IRET). */
781 UNWIND_HINT_IRET_REGS offset=8
782 jmp asm_exc_general_protection
783 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
787 UNWIND_HINT_IRET_REGS
788 pushq $-1 /* orig_ax = -1 => not a system call */
792 SYM_CODE_END(xen_failsafe_callback)
793 #endif /* CONFIG_XEN_PV */
796 * Save all registers in pt_regs. Return GSBASE related information
797 * in EBX depending on the availability of the FSGSBASE instructions:
800 * N 0 -> SWAPGS on exit
801 * 1 -> no SWAPGS on exit
803 * Y GSBASE value at entry, must be restored in paranoid_exit
805 SYM_CODE_START_LOCAL(paranoid_entry)
808 PUSH_AND_CLEAR_REGS save_ret=1
809 ENCODE_FRAME_POINTER 8
812 * Always stash CR3 in %r14. This value will be restored,
813 * verbatim, at exit. Needed if paranoid_entry interrupted
814 * another entry that already switched to the user CR3 value
815 * but has not yet returned to userspace.
817 * This is also why CS (stashed in the "iret frame" by the
818 * hardware at entry) can not be used: this may be a return
819 * to kernel code, but with a user CR3 value.
821 * Switching CR3 does not depend on kernel GSBASE so it can
822 * be done before switching to the kernel GSBASE. This is
823 * required for FSGSBASE because the kernel GSBASE has to
824 * be retrieved from a kernel internal table.
826 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
829 * Handling GSBASE depends on the availability of FSGSBASE.
831 * Without FSGSBASE the kernel enforces that negative GSBASE
832 * values indicate kernel GSBASE. With FSGSBASE no assumptions
833 * can be made about the GSBASE value when entering from user
836 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
839 * Read the current GSBASE and store it in %rbx unconditionally,
840 * retrieve and set the current CPUs kernel GSBASE. The stored value
841 * has to be restored in paranoid_exit unconditionally.
843 * The MSR write ensures that no subsequent load is based on a
844 * mispredicted GSBASE. No extra FENCE required.
846 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
849 .Lparanoid_entry_checkgs:
850 /* EBX = 1 -> kernel GSBASE active, no restore required */
853 * The kernel-enforced convention is a negative GSBASE indicates
854 * a kernel value. No SWAPGS needed on entry and exit.
856 movl $MSR_GS_BASE, %ecx
859 jns .Lparanoid_entry_swapgs
862 .Lparanoid_entry_swapgs:
866 * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
867 * unconditional CR3 write, even in the PTI case. So do an lfence
868 * to prevent GS speculation, regardless of whether PTI is enabled.
870 FENCE_SWAPGS_KERNEL_ENTRY
872 /* EBX = 0 -> SWAPGS required on exit */
875 SYM_CODE_END(paranoid_entry)
878 * "Paranoid" exit path from exception stack. This is invoked
879 * only on return from non-NMI IST interrupts that came
882 * We may be returning to very strange contexts (e.g. very early
883 * in syscall entry), so checking for preemption here would
884 * be complicated. Fortunately, there's no good reason to try
885 * to handle preemption here.
887 * R/EBX contains the GSBASE related information depending on the
888 * availability of the FSGSBASE instructions:
891 * N 0 -> SWAPGS on exit
892 * 1 -> no SWAPGS on exit
894 * Y User space GSBASE, must be restored unconditionally
896 SYM_CODE_START_LOCAL(paranoid_exit)
899 * The order of operations is important. RESTORE_CR3 requires
902 * NB to anyone to try to optimize this code: this code does
903 * not execute at all for exceptions from user mode. Those
904 * exceptions go through error_exit instead.
906 RESTORE_CR3 scratch_reg=%rax save_reg=%r14
908 /* Handle the three GSBASE cases */
909 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
911 /* With FSGSBASE enabled, unconditionally restore GSBASE */
913 jmp restore_regs_and_return_to_kernel
915 .Lparanoid_exit_checkgs:
916 /* On non-FSGSBASE systems, conditionally do SWAPGS */
918 jnz restore_regs_and_return_to_kernel
920 /* We are returning to a context with user GSBASE */
922 jmp restore_regs_and_return_to_kernel
923 SYM_CODE_END(paranoid_exit)
926 * Save all registers in pt_regs, and switch GS if needed.
928 SYM_CODE_START_LOCAL(error_entry)
931 PUSH_AND_CLEAR_REGS save_ret=1
932 ENCODE_FRAME_POINTER 8
934 jz .Lerror_kernelspace
937 * We entered from user mode or we're pretending to have entered
938 * from user mode due to an IRET fault.
941 FENCE_SWAPGS_USER_ENTRY
942 /* We have user CR3. Change to kernel CR3. */
943 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
945 .Lerror_entry_from_usermode_after_swapgs:
946 /* Put us onto the real thread stack. */
947 popq %r12 /* save return addr in %12 */
948 movq %rsp, %rdi /* arg0 = pt_regs pointer */
950 movq %rax, %rsp /* switch stack */
955 .Lerror_entry_done_lfence:
956 FENCE_SWAPGS_KERNEL_ENTRY
961 * There are two places in the kernel that can potentially fault with
962 * usergs. Handle them here. B stepping K8s sometimes report a
963 * truncated RIP for IRET exceptions returning to compat mode. Check
964 * for these here too.
967 leaq native_irq_return_iret(%rip), %rcx
968 cmpq %rcx, RIP+8(%rsp)
970 movl %ecx, %eax /* zero extend */
971 cmpq %rax, RIP+8(%rsp)
973 cmpq $.Lgs_change, RIP+8(%rsp)
974 jne .Lerror_entry_done_lfence
977 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
978 * gsbase and proceed. We'll fix up the exception and land in
979 * .Lgs_change's error handler with kernel gsbase.
982 FENCE_SWAPGS_USER_ENTRY
983 jmp .Lerror_entry_done
986 /* Fix truncated RIP */
987 movq %rcx, RIP+8(%rsp)
992 * We came from an IRET to user mode, so we have user
993 * gsbase and CR3. Switch to kernel gsbase and CR3:
996 FENCE_SWAPGS_USER_ENTRY
997 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1000 * Pretend that the exception came from user mode: set up pt_regs
1001 * as if we faulted immediately after IRET.
1006 jmp .Lerror_entry_from_usermode_after_swapgs
1007 SYM_CODE_END(error_entry)
1009 SYM_CODE_START_LOCAL(error_return)
1011 DEBUG_ENTRY_ASSERT_IRQS_OFF
1013 jz restore_regs_and_return_to_kernel
1014 jmp swapgs_restore_regs_and_return_to_usermode
1015 SYM_CODE_END(error_return)
1018 * Runs on exception stack. Xen PV does not go through this path at all,
1019 * so we can use real assembly here.
1022 * %r14: Used to save/restore the CR3 of the interrupted context
1023 * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
1025 SYM_CODE_START(asm_exc_nmi)
1026 UNWIND_HINT_IRET_REGS
1029 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1030 * the iretq it performs will take us out of NMI context.
1031 * This means that we can have nested NMIs where the next
1032 * NMI is using the top of the stack of the previous NMI. We
1033 * can't let it execute because the nested NMI will corrupt the
1034 * stack of the previous NMI. NMI handlers are not re-entrant
1037 * To handle this case we do the following:
1038 * Check the a special location on the stack that contains
1039 * a variable that is set when NMIs are executing.
1040 * The interrupted task's stack is also checked to see if it
1042 * If the variable is not set and the stack is not the NMI
1044 * o Set the special variable on the stack
1045 * o Copy the interrupt frame into an "outermost" location on the
1047 * o Copy the interrupt frame into an "iret" location on the stack
1048 * o Continue processing the NMI
1049 * If the variable is set or the previous stack is the NMI stack:
1050 * o Modify the "iret" location to jump to the repeat_nmi
1051 * o return back to the first NMI
1053 * Now on exit of the first NMI, we first clear the stack variable
1054 * The NMI stack will tell any nested NMIs at that point that it is
1055 * nested. Then we pop the stack normally with iret, and if there was
1056 * a nested NMI that updated the copy interrupt stack frame, a
1057 * jump will be made to the repeat_nmi code that will handle the second
1060 * However, espfix prevents us from directly returning to userspace
1061 * with a single IRET instruction. Similarly, IRET to user mode
1062 * can fault. We therefore handle NMIs from user space like
1063 * other IST entries.
1068 /* Use %rdx as our temp variable throughout */
1071 testb $3, CS-RIP+8(%rsp)
1072 jz .Lnmi_from_kernel
1075 * NMI from user mode. We need to run on the thread stack, but we
1076 * can't go through the normal entry paths: NMIs are masked, and
1077 * we don't want to enable interrupts, because then we'll end
1078 * up in an awkward situation in which IRQs are on but NMIs
1081 * We also must not push anything to the stack before switching
1082 * stacks lest we corrupt the "NMI executing" variable.
1087 FENCE_SWAPGS_USER_ENTRY
1088 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1090 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1091 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1092 pushq 5*8(%rdx) /* pt_regs->ss */
1093 pushq 4*8(%rdx) /* pt_regs->rsp */
1094 pushq 3*8(%rdx) /* pt_regs->flags */
1095 pushq 2*8(%rdx) /* pt_regs->cs */
1096 pushq 1*8(%rdx) /* pt_regs->rip */
1097 UNWIND_HINT_IRET_REGS
1098 pushq $-1 /* pt_regs->orig_ax */
1099 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1100 ENCODE_FRAME_POINTER
1103 * At this point we no longer need to worry about stack damage
1104 * due to nesting -- we're on the normal thread stack and we're
1105 * done with the NMI stack.
1113 * Return back to user mode. We must *not* do the normal exit
1114 * work, because we don't want to enable interrupts.
1116 jmp swapgs_restore_regs_and_return_to_usermode
1120 * Here's what our stack frame will look like:
1121 * +---------------------------------------------------------+
1123 * | original Return RSP |
1124 * | original RFLAGS |
1127 * +---------------------------------------------------------+
1128 * | temp storage for rdx |
1129 * +---------------------------------------------------------+
1130 * | "NMI executing" variable |
1131 * +---------------------------------------------------------+
1132 * | iret SS } Copied from "outermost" frame |
1133 * | iret Return RSP } on each loop iteration; overwritten |
1134 * | iret RFLAGS } by a nested NMI to force another |
1135 * | iret CS } iteration if needed. |
1137 * +---------------------------------------------------------+
1138 * | outermost SS } initialized in first_nmi; |
1139 * | outermost Return RSP } will not be changed before |
1140 * | outermost RFLAGS } NMI processing is done. |
1141 * | outermost CS } Copied to "iret" frame on each |
1142 * | outermost RIP } iteration. |
1143 * +---------------------------------------------------------+
1145 * +---------------------------------------------------------+
1147 * The "original" frame is used by hardware. Before re-enabling
1148 * NMIs, we need to be done with it, and we need to leave enough
1149 * space for the asm code here.
1151 * We return by executing IRET while RSP points to the "iret" frame.
1152 * That will either return for real or it will loop back into NMI
1155 * The "outermost" frame is copied to the "iret" frame on each
1156 * iteration of the loop, so each iteration starts with the "iret"
1157 * frame pointing to the final return target.
1161 * Determine whether we're a nested NMI.
1163 * If we interrupted kernel code between repeat_nmi and
1164 * end_repeat_nmi, then we are a nested NMI. We must not
1165 * modify the "iret" frame because it's being written by
1166 * the outer NMI. That's okay; the outer NMI handler is
1167 * about to about to call exc_nmi() anyway, so we can just
1168 * resume the outer NMI.
1171 movq $repeat_nmi, %rdx
1174 movq $end_repeat_nmi, %rdx
1180 * Now check "NMI executing". If it's set, then we're nested.
1181 * This will not detect if we interrupted an outer NMI just
1188 * Now test if the previous stack was an NMI stack. This covers
1189 * the case where we interrupt an outer NMI after it clears
1190 * "NMI executing" but before IRET. We need to be careful, though:
1191 * there is one case in which RSP could point to the NMI stack
1192 * despite there being no NMI active: naughty userspace controls
1193 * RSP at the very beginning of the SYSCALL targets. We can
1194 * pull a fast one on naughty userspace, though: we program
1195 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1196 * if it controls the kernel's RSP. We set DF before we clear
1200 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1201 cmpq %rdx, 4*8(%rsp)
1202 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1205 subq $EXCEPTION_STKSZ, %rdx
1206 cmpq %rdx, 4*8(%rsp)
1207 /* If it is below the NMI stack, it is a normal NMI */
1210 /* Ah, it is within the NMI stack. */
1212 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1213 jz first_nmi /* RSP was user controlled. */
1215 /* This is a nested NMI. */
1219 * Modify the "iret" frame to point to repeat_nmi, forcing another
1220 * iteration of NMI handling.
1223 leaq -10*8(%rsp), %rdx
1230 /* Put stack back */
1236 /* We are returning to kernel mode, so this cannot result in a fault. */
1243 /* Make room for "NMI executing". */
1246 /* Leave room for the "iret" frame */
1249 /* Copy the "original" frame to the "outermost" frame */
1253 UNWIND_HINT_IRET_REGS
1255 /* Everything up to here is safe from nested NMIs */
1257 #ifdef CONFIG_DEBUG_ENTRY
1259 * For ease of testing, unmask NMIs right away. Disabled by
1260 * default because IRET is very expensive.
1263 pushq %rsp /* RSP (minus 8 because of the previous push) */
1264 addq $8, (%rsp) /* Fix up RSP */
1266 pushq $__KERNEL_CS /* CS */
1268 iretq /* continues at repeat_nmi below */
1269 UNWIND_HINT_IRET_REGS
1275 * If there was a nested NMI, the first NMI's iret will return
1276 * here. But NMIs are still enabled and we can take another
1277 * nested NMI. The nested NMI checks the interrupted RIP to see
1278 * if it is between repeat_nmi and end_repeat_nmi, and if so
1279 * it will just return, as we are about to repeat an NMI anyway.
1280 * This makes it safe to copy to the stack frame that a nested
1283 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1284 * we're repeating an NMI, gsbase has the same value that it had on
1285 * the first iteration. paranoid_entry will load the kernel
1286 * gsbase if needed before we call exc_nmi(). "NMI executing"
1289 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1292 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1293 * here must not modify the "iret" frame while we're writing to
1294 * it or it will end up containing garbage.
1304 * Everything below this point can be preempted by a nested NMI.
1305 * If this happens, then the inner NMI will change the "iret"
1306 * frame to point back to repeat_nmi.
1308 pushq $-1 /* ORIG_RAX: no syscall to restart */
1311 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1312 * as we should not be calling schedule in NMI context.
1313 * Even with normal interrupts enabled. An NMI should not be
1314 * setting NEED_RESCHED or anything that normal interrupts and
1315 * exceptions might do.
1324 /* Always restore stashed CR3 value (see paranoid_entry) */
1325 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1328 * The above invocation of paranoid_entry stored the GSBASE
1329 * related information in R/EBX depending on the availability
1332 * If FSGSBASE is enabled, restore the saved GSBASE value
1333 * unconditionally, otherwise take the conditional SWAPGS path.
1335 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
1341 /* EBX == 0 -> invoke SWAPGS */
1352 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1353 * at the "iret" frame.
1358 * Clear "NMI executing". Set DF first so that we can easily
1359 * distinguish the remaining code between here and IRET from
1360 * the SYSCALL entry and exit paths.
1362 * We arguably should just inspect RIP instead, but I (Andy) wrote
1363 * this code when I had the misapprehension that Xen PV supported
1364 * NMIs, and Xen PV would break that approach.
1367 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1370 * iretq reads the "iret" frame and exits the NMI stack in a
1371 * single instruction. We are returning to kernel mode, so this
1372 * cannot result in a fault. Similarly, we don't need to worry
1373 * about espfix64 on the way back to kernel mode.
1376 SYM_CODE_END(asm_exc_nmi)
1378 #ifndef CONFIG_IA32_EMULATION
1380 * This handles SYSCALL from 32-bit code. There is no way to program
1381 * MSRs to fully disable 32-bit SYSCALL.
1383 SYM_CODE_START(ignore_sysret)
1387 SYM_CODE_END(ignore_sysret)
1390 .pushsection .text, "ax"
1391 SYM_CODE_START(rewind_stack_do_exit)
1393 /* Prevent any naive code from trying to unwind to our caller. */
1396 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
1397 leaq -PTREGS_SIZE(%rax), %rsp
1401 SYM_CODE_END(rewind_stack_do_exit)