2 * Copyright (C) 1991,1992 Linus Torvalds
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 * Stack layout in 'syscall_exit':
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
38 #include <asm/page_types.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/ftrace.h>
42 #include <asm/irq_vectors.h>
43 #include <asm/cpufeature.h>
44 #include <asm/alternative-asm.h>
48 .section .entry.text, "ax"
51 * We use macros for low-level operations which need to be overridden
52 * for paravirtualization. The following will never clobber any registers:
53 * INTERRUPT_RETURN (aka. "iret")
54 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
55 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
57 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
58 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
59 * Allowing a register to be clobbered can shrink the paravirt replacement
60 * enough to patch inline, increasing performance.
64 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
66 # define preempt_stop(clobbers)
67 # define resume_kernel restore_all
70 .macro TRACE_IRQS_IRET
71 #ifdef CONFIG_TRACE_IRQFLAGS
72 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
80 * User gs save/restore
82 * %gs is used for userland TLS and kernel only uses it for stack
83 * canary which is required to be at %gs:20 by gcc. Read the comment
84 * at the top of stackprotector.h for more info.
86 * Local labels 98 and 99 are used.
88 #ifdef CONFIG_X86_32_LAZY_GS
90 /* unfortunately push/pop can't be no-op */
95 addl $(4 + \pop), %esp
100 /* all the rest are no-op */
107 .macro REG_TO_PTGS reg
109 .macro SET_KERNEL_GS reg
112 #else /* CONFIG_X86_32_LAZY_GS */
125 .pushsection .fixup, "ax"
129 _ASM_EXTABLE(98b, 99b)
133 98: mov PT_GS(%esp), %gs
136 .pushsection .fixup, "ax"
137 99: movl $0, PT_GS(%esp)
140 _ASM_EXTABLE(98b, 99b)
146 .macro REG_TO_PTGS reg
147 movl \reg, PT_GS(%esp)
149 .macro SET_KERNEL_GS reg
150 movl $(__KERNEL_STACK_CANARY), \reg
154 #endif /* CONFIG_X86_32_LAZY_GS */
169 movl $(__USER_DS), %edx
172 movl $(__KERNEL_PERCPU), %edx
177 .macro RESTORE_INT_REGS
187 .macro RESTORE_REGS pop=0
193 .pushsection .fixup, "ax"
210 GET_THREAD_INFO(%ebp)
212 pushl $0x0202 # Reset kernel eflags
217 ENTRY(ret_from_kernel_thread)
220 GET_THREAD_INFO(%ebp)
222 pushl $0x0202 # Reset kernel eflags
224 movl PT_EBP(%esp), %eax
226 movl $0, PT_EAX(%esp)
228 ENDPROC(ret_from_kernel_thread)
231 * Return to user mode is not as complex as all this looks,
232 * but we want the default path for a system call return to
233 * go as quickly as possible which is why some of this is
234 * less clear than it otherwise should be.
237 # userspace resumption stub bypassing syscall exit tracing
240 preempt_stop(CLBR_ANY)
242 GET_THREAD_INFO(%ebp)
244 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
245 movb PT_CS(%esp), %al
246 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
249 * We can be coming here from child spawned by kernel_thread().
251 movl PT_CS(%esp), %eax
252 andl $SEGMENT_RPL_MASK, %eax
255 jb resume_kernel # not returning to v8086 or userspace
257 ENTRY(resume_userspace)
259 DISABLE_INTERRUPTS(CLBR_ANY)
262 call prepare_exit_to_usermode
264 END(ret_from_exception)
266 #ifdef CONFIG_PREEMPT
268 DISABLE_INTERRUPTS(CLBR_ANY)
270 cmpl $0, PER_CPU_VAR(__preempt_count)
272 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
274 call preempt_schedule_irq
280 * SYSENTER_RETURN points to after the SYSENTER instruction
281 * in the vsyscall page. See vsyscall-sysentry.S, which defines
285 # SYSENTER call handler stub
286 ENTRY(entry_SYSENTER_32)
287 movl TSS_sysenter_sp0(%esp), %esp
290 * Interrupts are disabled here, but we can't trace it until
291 * enough kernel state to call TRACE_IRQS_OFF can be called - but
292 * we immediately enable interrupts at that point anyway.
297 orl $X86_EFLAGS_IF, (%esp)
300 * Push current_thread_info()->sysenter_return to the stack.
301 * A tiny bit of offset fixup is necessary: TI_sysenter_return
302 * is relative to thread_info, which is at the bottom of the
303 * kernel stack page. 4*4 means the 4 words pushed above;
304 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
305 * and THREAD_SIZE takes us to the bottom.
307 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
311 ENABLE_INTERRUPTS(CLBR_NONE)
314 * Load the potential sixth argument from user stack.
315 * Careful about security.
317 cmpl $__PAGE_OFFSET-3, %ebp
322 movl %ebp, PT_EBP(%esp)
323 _ASM_EXTABLE(1b, syscall_fault)
325 GET_THREAD_INFO(%ebp)
327 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
328 jnz syscall_trace_entry
330 cmpl $(NR_syscalls), %eax
332 call *sys_call_table(, %eax, 4)
334 movl %eax, PT_EAX(%esp)
336 DISABLE_INTERRUPTS(CLBR_ANY)
338 movl TI_flags(%ebp), %ecx
339 testl $_TIF_ALLWORK_MASK, %ecx
340 jnz syscall_exit_work_irqs_off
342 /* if something modifies registers it must also disable sysexit */
343 movl PT_EIP(%esp), %edx
344 movl PT_OLDESP(%esp), %ecx
347 1: mov PT_FS(%esp), %fs
349 ENABLE_INTERRUPTS_SYSEXIT
351 .pushsection .fixup, "ax"
352 2: movl $0, PT_FS(%esp)
357 ENDPROC(entry_SYSENTER_32)
359 # system call handler stub
360 ENTRY(entry_INT80_32)
362 pushl %eax # save orig_eax
364 GET_THREAD_INFO(%ebp)
365 # system call tracing in operation / emulation
366 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
367 jnz syscall_trace_entry
368 cmpl $(NR_syscalls), %eax
371 call *sys_call_table(, %eax, 4)
373 movl %eax, PT_EAX(%esp) # store the return value
376 jmp syscall_exit_work
381 #ifdef CONFIG_X86_ESPFIX32
382 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
384 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
385 * are returning to the kernel.
386 * See comments in process.c:copy_thread() for details.
388 movb PT_OLDSS(%esp), %ah
389 movb PT_CS(%esp), %al
390 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
391 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
392 je ldt_ss # returning to user-space with LDT SS
395 RESTORE_REGS 4 # skip orig_eax/error_code
398 .section .fixup, "ax"
400 pushl $0 # no error code
404 _ASM_EXTABLE(irq_return, iret_exc)
406 #ifdef CONFIG_X86_ESPFIX32
408 #ifdef CONFIG_PARAVIRT
410 * The kernel can't run on a non-flat stack if paravirt mode
411 * is active. Rather than try to fixup the high bits of
412 * ESP, bypass this code entirely. This may break DOSemu
413 * and/or Wine support in a paravirt VM, although the option
414 * is still available to implement the setting of the high
415 * 16-bits in the INTERRUPT_RETURN paravirt-op.
417 cmpl $0, pv_info+PARAVIRT_enabled
422 * Setup and switch to ESPFIX stack
424 * We're returning to userspace with a 16 bit stack. The CPU will not
425 * restore the high word of ESP for us on executing iret... This is an
426 * "official" bug of all the x86-compatible CPUs, which we can work
427 * around to make dosemu and wine happy. We do this by preloading the
428 * high word of ESP with the high word of the userspace ESP while
429 * compensating for the offset by changing to the ESPFIX segment with
430 * a base address that matches for the difference.
432 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
433 mov %esp, %edx /* load kernel esp */
434 mov PT_OLDESP(%esp), %eax /* load userspace esp */
435 mov %dx, %ax /* eax: new kernel esp */
436 sub %eax, %edx /* offset (low word is 0) */
438 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
439 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
441 pushl %eax /* new kernel esp */
443 * Disable interrupts, but do not irqtrace this section: we
444 * will soon execute iret and the tracer was already set to
445 * the irqstate after the IRET:
447 DISABLE_INTERRUPTS(CLBR_EAX)
448 lss (%esp), %esp /* switch to espfix segment */
451 ENDPROC(entry_INT80_32)
453 # perform syscall exit tracing
456 movl $-ENOSYS, PT_EAX(%esp)
458 call syscall_trace_enter
459 /* What it returned is what we'll actually use. */
460 cmpl $(NR_syscalls), %eax
463 END(syscall_trace_entry)
465 # perform syscall exit tracing
467 syscall_exit_work_irqs_off:
469 ENABLE_INTERRUPTS(CLBR_ANY)
473 call syscall_return_slowpath
475 END(syscall_exit_work)
479 GET_THREAD_INFO(%ebp)
480 movl $-EFAULT, PT_EAX(%esp)
486 jmp syscall_after_call
491 jmp sysenter_after_call
494 .macro FIXUP_ESPFIX_STACK
496 * Switch back for ESPFIX stack to the normal zerobased stack
498 * We can't call C functions using the ESPFIX stack. This code reads
499 * the high word of the segment base from the GDT and swiches to the
500 * normal stack and adjusts ESP with the matching offset.
502 #ifdef CONFIG_X86_ESPFIX32
503 /* fixup the stack */
504 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
505 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
507 addl %esp, %eax /* the adjusted stack pointer */
510 lss (%esp), %esp /* switch to the normal stack segment */
513 .macro UNWIND_ESPFIX_STACK
514 #ifdef CONFIG_X86_ESPFIX32
516 /* see if on espfix stack */
517 cmpw $__ESPFIX_SS, %ax
519 movl $__KERNEL_DS, %eax
522 /* switch to normal stack */
529 * Build the entry stubs with some assembler magic.
530 * We pack 1 stub into every 8-byte block.
533 ENTRY(irq_entries_start)
534 vector=FIRST_EXTERNAL_VECTOR
535 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
536 pushl $(~vector+0x80) /* Note: always in signed byte range */
541 END(irq_entries_start)
544 * the CPU automatically disables interrupts when executing an IRQ vector,
545 * so IRQ-flags tracing has to follow that:
547 .p2align CONFIG_X86_L1_CACHE_SHIFT
550 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
556 ENDPROC(common_interrupt)
558 #define BUILD_INTERRUPT3(name, nr, fn) \
570 #ifdef CONFIG_TRACING
571 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
573 # define TRACE_BUILD_INTERRUPT(name, nr)
576 #define BUILD_INTERRUPT(name, nr) \
577 BUILD_INTERRUPT3(name, nr, smp_##name); \
578 TRACE_BUILD_INTERRUPT(name, nr)
580 /* The include is where all of the SMP etc. interrupts come from */
581 #include <asm/entry_arch.h>
583 ENTRY(coprocessor_error)
586 pushl $do_coprocessor_error
588 END(coprocessor_error)
590 ENTRY(simd_coprocessor_error)
593 #ifdef CONFIG_X86_INVD_BUG
594 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
595 ALTERNATIVE "pushl $do_general_protection", \
596 "pushl $do_simd_coprocessor_error", \
599 pushl $do_simd_coprocessor_error
602 END(simd_coprocessor_error)
604 ENTRY(device_not_available)
606 pushl $-1 # mark this as an int
607 pushl $do_device_not_available
609 END(device_not_available)
611 #ifdef CONFIG_PARAVIRT
614 _ASM_EXTABLE(native_iret, iret_exc)
617 ENTRY(native_irq_enable_sysexit)
620 END(native_irq_enable_sysexit)
644 ENTRY(coprocessor_segment_overrun)
647 pushl $do_coprocessor_segment_overrun
649 END(coprocessor_segment_overrun)
653 pushl $do_invalid_TSS
657 ENTRY(segment_not_present)
659 pushl $do_segment_not_present
661 END(segment_not_present)
665 pushl $do_stack_segment
669 ENTRY(alignment_check)
671 pushl $do_alignment_check
677 pushl $0 # no error code
678 pushl $do_divide_error
682 #ifdef CONFIG_X86_MCE
686 pushl machine_check_vector
691 ENTRY(spurious_interrupt_bug)
694 pushl $do_spurious_interrupt_bug
696 END(spurious_interrupt_bug)
700 * Xen doesn't set %esp to be precisely what the normal SYSENTER
701 * entry point expects, so fix it up before using the normal path.
703 ENTRY(xen_sysenter_target)
704 addl $5*4, %esp /* remove xen-provided frame */
705 jmp sysenter_past_esp
707 ENTRY(xen_hypervisor_callback)
708 pushl $-1 /* orig_ax = -1 => not a system call */
713 * Check to see if we got the event in the critical
714 * region in xen_iret_direct, after we've reenabled
715 * events and checked for pending events. This simulates
716 * iret instruction's behaviour where it delivers a
717 * pending interrupt when enabling interrupts:
719 movl PT_EIP(%esp), %eax
720 cmpl $xen_iret_start_crit, %eax
722 cmpl $xen_iret_end_crit, %eax
725 jmp xen_iret_crit_fixup
729 call xen_evtchn_do_upcall
730 #ifndef CONFIG_PREEMPT
731 call xen_maybe_preempt_hcall
734 ENDPROC(xen_hypervisor_callback)
737 * Hypervisor uses this for application faults while it executes.
738 * We get here for two reasons:
739 * 1. Fault while reloading DS, ES, FS or GS
740 * 2. Fault while executing IRET
741 * Category 1 we fix up by reattempting the load, and zeroing the segment
742 * register if the load fails.
743 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
744 * normal Linux return path in this case because if we use the IRET hypercall
745 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
746 * We distinguish between categories by maintaining a status value in EAX.
748 ENTRY(xen_failsafe_callback)
755 /* EAX == 0 => Category 1 (Bad segment)
756 EAX != 0 => Category 2 (Bad IRET) */
762 5: pushl $-1 /* orig_ax = -1 => not a system call */
764 jmp ret_from_exception
766 .section .fixup, "ax"
784 ENDPROC(xen_failsafe_callback)
786 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
787 xen_evtchn_do_upcall)
789 #endif /* CONFIG_XEN */
791 #if IS_ENABLED(CONFIG_HYPERV)
793 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
794 hyperv_vector_handler)
796 #endif /* CONFIG_HYPERV */
798 #ifdef CONFIG_FUNCTION_TRACER
799 #ifdef CONFIG_DYNAMIC_FTRACE
809 pushl $0 /* Pass NULL as regs pointer */
812 movl function_trace_op, %ecx
813 subl $MCOUNT_INSN_SIZE, %eax
819 addl $4, %esp /* skip NULL pointer */
824 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
825 .globl ftrace_graph_call
835 ENTRY(ftrace_regs_caller)
836 pushf /* push flags before compare (in cs location) */
839 * i386 does not save SS and ESP when coming from kernel.
840 * Instead, to get sp, ®s->sp is used (see ptrace.h).
841 * Unfortunately, that means eflags must be at the same location
842 * as the current return ip is. We move the return ip into the
843 * ip location, and move flags into the return ip location.
845 pushl 4(%esp) /* save return ip into ip slot */
847 pushl $0 /* Load 0 into orig_ax */
860 movl 13*4(%esp), %eax /* Get the saved flags */
861 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
862 /* clobbering return ip */
863 movl $__KERNEL_CS, 13*4(%esp)
865 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
866 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
867 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
868 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
869 pushl %esp /* Save pt_regs as 4th parameter */
871 GLOBAL(ftrace_regs_call)
874 addl $4, %esp /* Skip pt_regs */
875 movl 14*4(%esp), %eax /* Move flags back into cs */
876 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
877 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
878 movl %eax, 14*4(%esp) /* Put return ip back for ret */
891 addl $8, %esp /* Skip orig_ax and ip */
892 popf /* Pop flags at end (no addl to corrupt flags) */
897 #else /* ! CONFIG_DYNAMIC_FTRACE */
900 cmpl $__PAGE_OFFSET, %esp
901 jb ftrace_stub /* Paging not enabled yet? */
903 cmpl $ftrace_stub, ftrace_trace_function
905 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
906 cmpl $ftrace_stub, ftrace_graph_return
907 jnz ftrace_graph_caller
909 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
910 jnz ftrace_graph_caller
916 /* taken from glibc */
923 subl $MCOUNT_INSN_SIZE, %eax
925 call *ftrace_trace_function
932 #endif /* CONFIG_DYNAMIC_FTRACE */
933 #endif /* CONFIG_FUNCTION_TRACER */
935 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
936 ENTRY(ftrace_graph_caller)
943 subl $MCOUNT_INSN_SIZE, %eax
944 call prepare_ftrace_return
949 END(ftrace_graph_caller)
951 .globl return_to_handler
956 call ftrace_return_to_handler
963 #ifdef CONFIG_TRACING
964 ENTRY(trace_page_fault)
966 pushl $trace_do_page_fault
968 END(trace_page_fault)
976 /* the function address is in %gs's slot on the stack */
988 movl $(__KERNEL_PERCPU), %ecx
992 movl PT_GS(%esp), %edi # get the function address
993 movl PT_ORIG_EAX(%esp), %edx # get the error code
994 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
997 movl $(__USER_DS), %ecx
1001 movl %esp, %eax # pt_regs pointer
1003 jmp ret_from_exception
1007 * Debug traps and NMI can happen at the one SYSENTER instruction
1008 * that sets up the real kernel stack. Check here, since we can't
1009 * allow the wrong stack to be used.
1011 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1012 * already pushed 3 words if it hits on the sysenter instruction:
1013 * eflags, cs and eip.
1015 * We just load the right stack, and push the three (known) values
1016 * by hand onto the new stack - while updating the return eip past
1017 * the instruction that would have done it for sysenter.
1019 .macro FIX_STACK offset ok label
1020 cmpw $__KERNEL_CS, 4(%esp)
1023 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1026 pushl $sysenter_past_esp
1031 cmpl $entry_SYSENTER_32, (%esp)
1032 jne debug_stack_correct
1033 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1034 debug_stack_correct:
1035 pushl $-1 # mark this as an int
1038 xorl %edx, %edx # error code 0
1039 movl %esp, %eax # pt_regs pointer
1041 jmp ret_from_exception
1045 * NMI is doubly nasty. It can happen _while_ we're handling
1046 * a debug fault, and the debug fault hasn't yet been able to
1047 * clear up the stack. So we first check whether we got an
1048 * NMI on the sysenter entry path, but after that we need to
1049 * check whether we got an NMI on the debug path where the debug
1050 * fault happened on the sysenter path.
1054 #ifdef CONFIG_X86_ESPFIX32
1057 cmpw $__ESPFIX_SS, %ax
1061 cmpl $entry_SYSENTER_32, (%esp)
1066 * Do not access memory above the end of our stack page,
1067 * it might not exist.
1069 andl $(THREAD_SIZE-1), %eax
1070 cmpl $(THREAD_SIZE-20), %eax
1072 jae nmi_stack_correct
1073 cmpl $entry_SYSENTER_32, 12(%esp)
1074 je nmi_debug_stack_check
1078 xorl %edx, %edx # zero error code
1079 movl %esp, %eax # pt_regs pointer
1081 jmp restore_all_notrace
1084 FIX_STACK 12, nmi_stack_correct, 1
1085 jmp nmi_stack_correct
1087 nmi_debug_stack_check:
1088 cmpw $__KERNEL_CS, 16(%esp)
1089 jne nmi_stack_correct
1091 jb nmi_stack_correct
1092 cmpl $debug_esp_fix_insn, (%esp)
1093 ja nmi_stack_correct
1094 FIX_STACK 24, nmi_stack_correct, 1
1095 jmp nmi_stack_correct
1097 #ifdef CONFIG_X86_ESPFIX32
1100 * create the pointer to lss back
1105 /* copy the iret frame of 12 bytes */
1111 FIXUP_ESPFIX_STACK # %eax == %esp
1112 xorl %edx, %edx # zero error code
1115 lss 12+4(%esp), %esp # back to espfix stack
1122 pushl $-1 # mark this as an int
1125 xorl %edx, %edx # zero error code
1126 movl %esp, %eax # pt_regs pointer
1128 jmp ret_from_exception
1131 ENTRY(general_protection)
1132 pushl $do_general_protection
1134 END(general_protection)
1136 #ifdef CONFIG_KVM_GUEST
1137 ENTRY(async_page_fault)
1139 pushl $do_async_page_fault
1141 END(async_page_fault)