2 * Copyright (C) 1991,1992 Linus Torvalds
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 * Stack layout in 'syscall_exit':
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
38 #include <asm/page_types.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/ftrace.h>
42 #include <asm/irq_vectors.h>
43 #include <asm/cpufeature.h>
44 #include <asm/alternative-asm.h>
48 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
49 #include <linux/elf-em.h>
50 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
51 #define __AUDIT_ARCH_LE 0x40000000
53 #ifndef CONFIG_AUDITSYSCALL
54 # define sysenter_audit syscall_trace_entry
55 # define sysexit_audit syscall_exit_work
58 .section .entry.text, "ax"
61 * We use macros for low-level operations which need to be overridden
62 * for paravirtualization. The following will never clobber any registers:
63 * INTERRUPT_RETURN (aka. "iret")
64 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
65 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
67 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
68 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
69 * Allowing a register to be clobbered can shrink the paravirt replacement
70 * enough to patch inline, increasing performance.
74 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
76 # define preempt_stop(clobbers)
77 # define resume_kernel restore_all
80 .macro TRACE_IRQS_IRET
81 #ifdef CONFIG_TRACE_IRQFLAGS
82 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
90 * User gs save/restore
92 * %gs is used for userland TLS and kernel only uses it for stack
93 * canary which is required to be at %gs:20 by gcc. Read the comment
94 * at the top of stackprotector.h for more info.
96 * Local labels 98 and 99 are used.
98 #ifdef CONFIG_X86_32_LAZY_GS
100 /* unfortunately push/pop can't be no-op */
105 addl $(4 + \pop), %esp
110 /* all the rest are no-op */
117 .macro REG_TO_PTGS reg
119 .macro SET_KERNEL_GS reg
122 #else /* CONFIG_X86_32_LAZY_GS */
135 .pushsection .fixup, "ax"
139 _ASM_EXTABLE(98b, 99b)
143 98: mov PT_GS(%esp), %gs
146 .pushsection .fixup, "ax"
147 99: movl $0, PT_GS(%esp)
150 _ASM_EXTABLE(98b, 99b)
156 .macro REG_TO_PTGS reg
157 movl \reg, PT_GS(%esp)
159 .macro SET_KERNEL_GS reg
160 movl $(__KERNEL_STACK_CANARY), \reg
164 #endif /* CONFIG_X86_32_LAZY_GS */
179 movl $(__USER_DS), %edx
182 movl $(__KERNEL_PERCPU), %edx
187 .macro RESTORE_INT_REGS
197 .macro RESTORE_REGS pop=0
203 .pushsection .fixup, "ax"
220 GET_THREAD_INFO(%ebp)
222 pushl $0x0202 # Reset kernel eflags
227 ENTRY(ret_from_kernel_thread)
230 GET_THREAD_INFO(%ebp)
232 pushl $0x0202 # Reset kernel eflags
234 movl PT_EBP(%esp), %eax
236 movl $0, PT_EAX(%esp)
238 ENDPROC(ret_from_kernel_thread)
241 * Return to user mode is not as complex as all this looks,
242 * but we want the default path for a system call return to
243 * go as quickly as possible which is why some of this is
244 * less clear than it otherwise should be.
247 # userspace resumption stub bypassing syscall exit tracing
250 preempt_stop(CLBR_ANY)
252 GET_THREAD_INFO(%ebp)
254 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
255 movb PT_CS(%esp), %al
256 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
259 * We can be coming here from child spawned by kernel_thread().
261 movl PT_CS(%esp), %eax
262 andl $SEGMENT_RPL_MASK, %eax
265 jb resume_kernel # not returning to v8086 or userspace
267 ENTRY(resume_userspace)
269 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
270 # setting need_resched or sigpending
271 # between sampling and the iret
273 movl TI_flags(%ebp), %ecx
274 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
275 # int/exception return?
278 END(ret_from_exception)
280 #ifdef CONFIG_PREEMPT
282 DISABLE_INTERRUPTS(CLBR_ANY)
284 cmpl $0, PER_CPU_VAR(__preempt_count)
286 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
288 call preempt_schedule_irq
294 * SYSENTER_RETURN points to after the SYSENTER instruction
295 * in the vsyscall page. See vsyscall-sysentry.S, which defines
299 # SYSENTER call handler stub
300 ENTRY(entry_SYSENTER_32)
301 movl TSS_sysenter_sp0(%esp), %esp
304 * Interrupts are disabled here, but we can't trace it until
305 * enough kernel state to call TRACE_IRQS_OFF can be called - but
306 * we immediately enable interrupts at that point anyway.
311 orl $X86_EFLAGS_IF, (%esp)
314 * Push current_thread_info()->sysenter_return to the stack.
315 * A tiny bit of offset fixup is necessary: TI_sysenter_return
316 * is relative to thread_info, which is at the bottom of the
317 * kernel stack page. 4*4 means the 4 words pushed above;
318 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
319 * and THREAD_SIZE takes us to the bottom.
321 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
325 ENABLE_INTERRUPTS(CLBR_NONE)
328 * Load the potential sixth argument from user stack.
329 * Careful about security.
331 cmpl $__PAGE_OFFSET-3, %ebp
336 movl %ebp, PT_EBP(%esp)
337 _ASM_EXTABLE(1b, syscall_fault)
339 GET_THREAD_INFO(%ebp)
341 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
344 cmpl $(NR_syscalls), %eax
346 call *sys_call_table(, %eax, 4)
348 movl %eax, PT_EAX(%esp)
350 DISABLE_INTERRUPTS(CLBR_ANY)
352 movl TI_flags(%ebp), %ecx
353 testl $_TIF_ALLWORK_MASK, %ecx
356 /* if something modifies registers it must also disable sysexit */
357 movl PT_EIP(%esp), %edx
358 movl PT_OLDESP(%esp), %ecx
361 1: mov PT_FS(%esp), %fs
363 ENABLE_INTERRUPTS_SYSEXIT
365 #ifdef CONFIG_AUDITSYSCALL
367 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
368 jnz syscall_trace_entry
369 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
370 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
371 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
372 pushl PT_ESI(%esp) /* a3: 5th arg */
373 pushl PT_EDX+4(%esp) /* a2: 4th arg */
374 call __audit_syscall_entry
375 popl %ecx /* get that remapped edx off the stack */
376 popl %ecx /* get that remapped esi off the stack */
377 movl PT_EAX(%esp), %eax /* reload syscall number */
381 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
382 jnz syscall_exit_work
384 ENABLE_INTERRUPTS(CLBR_ANY)
385 movl %eax, %edx /* second arg, syscall return value */
386 cmpl $-MAX_ERRNO, %eax /* is it an error ? */
387 setbe %al /* 1 if so, 0 if not */
388 movzbl %al, %eax /* zero-extend that */
389 call __audit_syscall_exit
390 DISABLE_INTERRUPTS(CLBR_ANY)
392 movl TI_flags(%ebp), %ecx
393 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
394 jnz syscall_exit_work
395 movl PT_EAX(%esp), %eax /* reload syscall return value */
399 .pushsection .fixup, "ax"
400 2: movl $0, PT_FS(%esp)
405 ENDPROC(entry_SYSENTER_32)
407 # system call handler stub
408 ENTRY(entry_INT80_32)
410 pushl %eax # save orig_eax
412 GET_THREAD_INFO(%ebp)
413 # system call tracing in operation / emulation
414 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
415 jnz syscall_trace_entry
416 cmpl $(NR_syscalls), %eax
419 call *sys_call_table(, %eax, 4)
421 movl %eax, PT_EAX(%esp) # store the return value
424 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
425 # setting need_resched or sigpending
426 # between sampling and the iret
428 movl TI_flags(%ebp), %ecx
429 testl $_TIF_ALLWORK_MASK, %ecx # current->work
430 jnz syscall_exit_work
435 #ifdef CONFIG_X86_ESPFIX32
436 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
438 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
439 * are returning to the kernel.
440 * See comments in process.c:copy_thread() for details.
442 movb PT_OLDSS(%esp), %ah
443 movb PT_CS(%esp), %al
444 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
445 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
446 je ldt_ss # returning to user-space with LDT SS
449 RESTORE_REGS 4 # skip orig_eax/error_code
452 .section .fixup, "ax"
454 pushl $0 # no error code
458 _ASM_EXTABLE(irq_return, iret_exc)
460 #ifdef CONFIG_X86_ESPFIX32
462 #ifdef CONFIG_PARAVIRT
464 * The kernel can't run on a non-flat stack if paravirt mode
465 * is active. Rather than try to fixup the high bits of
466 * ESP, bypass this code entirely. This may break DOSemu
467 * and/or Wine support in a paravirt VM, although the option
468 * is still available to implement the setting of the high
469 * 16-bits in the INTERRUPT_RETURN paravirt-op.
471 cmpl $0, pv_info+PARAVIRT_enabled
476 * Setup and switch to ESPFIX stack
478 * We're returning to userspace with a 16 bit stack. The CPU will not
479 * restore the high word of ESP for us on executing iret... This is an
480 * "official" bug of all the x86-compatible CPUs, which we can work
481 * around to make dosemu and wine happy. We do this by preloading the
482 * high word of ESP with the high word of the userspace ESP while
483 * compensating for the offset by changing to the ESPFIX segment with
484 * a base address that matches for the difference.
486 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
487 mov %esp, %edx /* load kernel esp */
488 mov PT_OLDESP(%esp), %eax /* load userspace esp */
489 mov %dx, %ax /* eax: new kernel esp */
490 sub %eax, %edx /* offset (low word is 0) */
492 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
493 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
495 pushl %eax /* new kernel esp */
497 * Disable interrupts, but do not irqtrace this section: we
498 * will soon execute iret and the tracer was already set to
499 * the irqstate after the IRET:
501 DISABLE_INTERRUPTS(CLBR_EAX)
502 lss (%esp), %esp /* switch to espfix segment */
505 ENDPROC(entry_INT80_32)
507 # perform work that needs to be done immediately before resumption
510 testb $_TIF_NEED_RESCHED, %cl
515 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
516 # setting need_resched or sigpending
517 # between sampling and the iret
519 movl TI_flags(%ebp), %ecx
520 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
521 # than syscall tracing?
523 testb $_TIF_NEED_RESCHED, %cl
526 work_notifysig: # deal with pending signals and
527 # notify-resume requests
529 ENABLE_INTERRUPTS(CLBR_NONE)
532 call do_notify_resume
536 # perform syscall exit tracing
539 movl $-ENOSYS, PT_EAX(%esp)
541 call syscall_trace_enter
542 /* What it returned is what we'll actually use. */
543 cmpl $(NR_syscalls), %eax
546 END(syscall_trace_entry)
548 # perform syscall exit tracing
551 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
554 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
557 call syscall_trace_leave
559 END(syscall_exit_work)
563 GET_THREAD_INFO(%ebp)
564 movl $-EFAULT, PT_EAX(%esp)
570 jmp syscall_after_call
575 jmp sysenter_after_call
578 .macro FIXUP_ESPFIX_STACK
580 * Switch back for ESPFIX stack to the normal zerobased stack
582 * We can't call C functions using the ESPFIX stack. This code reads
583 * the high word of the segment base from the GDT and swiches to the
584 * normal stack and adjusts ESP with the matching offset.
586 #ifdef CONFIG_X86_ESPFIX32
587 /* fixup the stack */
588 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
589 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
591 addl %esp, %eax /* the adjusted stack pointer */
594 lss (%esp), %esp /* switch to the normal stack segment */
597 .macro UNWIND_ESPFIX_STACK
598 #ifdef CONFIG_X86_ESPFIX32
600 /* see if on espfix stack */
601 cmpw $__ESPFIX_SS, %ax
603 movl $__KERNEL_DS, %eax
606 /* switch to normal stack */
613 * Build the entry stubs with some assembler magic.
614 * We pack 1 stub into every 8-byte block.
617 ENTRY(irq_entries_start)
618 vector=FIRST_EXTERNAL_VECTOR
619 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
620 pushl $(~vector+0x80) /* Note: always in signed byte range */
625 END(irq_entries_start)
628 * the CPU automatically disables interrupts when executing an IRQ vector,
629 * so IRQ-flags tracing has to follow that:
631 .p2align CONFIG_X86_L1_CACHE_SHIFT
634 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
640 ENDPROC(common_interrupt)
642 #define BUILD_INTERRUPT3(name, nr, fn) \
654 #ifdef CONFIG_TRACING
655 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
657 # define TRACE_BUILD_INTERRUPT(name, nr)
660 #define BUILD_INTERRUPT(name, nr) \
661 BUILD_INTERRUPT3(name, nr, smp_##name); \
662 TRACE_BUILD_INTERRUPT(name, nr)
664 /* The include is where all of the SMP etc. interrupts come from */
665 #include <asm/entry_arch.h>
667 ENTRY(coprocessor_error)
670 pushl $do_coprocessor_error
672 END(coprocessor_error)
674 ENTRY(simd_coprocessor_error)
677 #ifdef CONFIG_X86_INVD_BUG
678 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
679 ALTERNATIVE "pushl $do_general_protection", \
680 "pushl $do_simd_coprocessor_error", \
683 pushl $do_simd_coprocessor_error
686 END(simd_coprocessor_error)
688 ENTRY(device_not_available)
690 pushl $-1 # mark this as an int
691 pushl $do_device_not_available
693 END(device_not_available)
695 #ifdef CONFIG_PARAVIRT
698 _ASM_EXTABLE(native_iret, iret_exc)
701 ENTRY(native_irq_enable_sysexit)
704 END(native_irq_enable_sysexit)
728 ENTRY(coprocessor_segment_overrun)
731 pushl $do_coprocessor_segment_overrun
733 END(coprocessor_segment_overrun)
737 pushl $do_invalid_TSS
741 ENTRY(segment_not_present)
743 pushl $do_segment_not_present
745 END(segment_not_present)
749 pushl $do_stack_segment
753 ENTRY(alignment_check)
755 pushl $do_alignment_check
761 pushl $0 # no error code
762 pushl $do_divide_error
766 #ifdef CONFIG_X86_MCE
770 pushl machine_check_vector
775 ENTRY(spurious_interrupt_bug)
778 pushl $do_spurious_interrupt_bug
780 END(spurious_interrupt_bug)
784 * Xen doesn't set %esp to be precisely what the normal SYSENTER
785 * entry point expects, so fix it up before using the normal path.
787 ENTRY(xen_sysenter_target)
788 addl $5*4, %esp /* remove xen-provided frame */
789 jmp sysenter_past_esp
791 ENTRY(xen_hypervisor_callback)
792 pushl $-1 /* orig_ax = -1 => not a system call */
797 * Check to see if we got the event in the critical
798 * region in xen_iret_direct, after we've reenabled
799 * events and checked for pending events. This simulates
800 * iret instruction's behaviour where it delivers a
801 * pending interrupt when enabling interrupts:
803 movl PT_EIP(%esp), %eax
804 cmpl $xen_iret_start_crit, %eax
806 cmpl $xen_iret_end_crit, %eax
809 jmp xen_iret_crit_fixup
813 call xen_evtchn_do_upcall
814 #ifndef CONFIG_PREEMPT
815 call xen_maybe_preempt_hcall
818 ENDPROC(xen_hypervisor_callback)
821 * Hypervisor uses this for application faults while it executes.
822 * We get here for two reasons:
823 * 1. Fault while reloading DS, ES, FS or GS
824 * 2. Fault while executing IRET
825 * Category 1 we fix up by reattempting the load, and zeroing the segment
826 * register if the load fails.
827 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
828 * normal Linux return path in this case because if we use the IRET hypercall
829 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
830 * We distinguish between categories by maintaining a status value in EAX.
832 ENTRY(xen_failsafe_callback)
839 /* EAX == 0 => Category 1 (Bad segment)
840 EAX != 0 => Category 2 (Bad IRET) */
846 5: pushl $-1 /* orig_ax = -1 => not a system call */
848 jmp ret_from_exception
850 .section .fixup, "ax"
868 ENDPROC(xen_failsafe_callback)
870 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
871 xen_evtchn_do_upcall)
873 #endif /* CONFIG_XEN */
875 #if IS_ENABLED(CONFIG_HYPERV)
877 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
878 hyperv_vector_handler)
880 #endif /* CONFIG_HYPERV */
882 #ifdef CONFIG_FUNCTION_TRACER
883 #ifdef CONFIG_DYNAMIC_FTRACE
893 pushl $0 /* Pass NULL as regs pointer */
896 movl function_trace_op, %ecx
897 subl $MCOUNT_INSN_SIZE, %eax
903 addl $4, %esp /* skip NULL pointer */
908 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
909 .globl ftrace_graph_call
919 ENTRY(ftrace_regs_caller)
920 pushf /* push flags before compare (in cs location) */
923 * i386 does not save SS and ESP when coming from kernel.
924 * Instead, to get sp, ®s->sp is used (see ptrace.h).
925 * Unfortunately, that means eflags must be at the same location
926 * as the current return ip is. We move the return ip into the
927 * ip location, and move flags into the return ip location.
929 pushl 4(%esp) /* save return ip into ip slot */
931 pushl $0 /* Load 0 into orig_ax */
944 movl 13*4(%esp), %eax /* Get the saved flags */
945 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
946 /* clobbering return ip */
947 movl $__KERNEL_CS, 13*4(%esp)
949 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
950 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
951 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
952 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
953 pushl %esp /* Save pt_regs as 4th parameter */
955 GLOBAL(ftrace_regs_call)
958 addl $4, %esp /* Skip pt_regs */
959 movl 14*4(%esp), %eax /* Move flags back into cs */
960 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
961 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
962 movl %eax, 14*4(%esp) /* Put return ip back for ret */
975 addl $8, %esp /* Skip orig_ax and ip */
976 popf /* Pop flags at end (no addl to corrupt flags) */
981 #else /* ! CONFIG_DYNAMIC_FTRACE */
984 cmpl $__PAGE_OFFSET, %esp
985 jb ftrace_stub /* Paging not enabled yet? */
987 cmpl $ftrace_stub, ftrace_trace_function
989 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
990 cmpl $ftrace_stub, ftrace_graph_return
991 jnz ftrace_graph_caller
993 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
994 jnz ftrace_graph_caller
1000 /* taken from glibc */
1005 movl 0xc(%esp), %eax
1006 movl 0x4(%ebp), %edx
1007 subl $MCOUNT_INSN_SIZE, %eax
1009 call *ftrace_trace_function
1016 #endif /* CONFIG_DYNAMIC_FTRACE */
1017 #endif /* CONFIG_FUNCTION_TRACER */
1019 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1020 ENTRY(ftrace_graph_caller)
1024 movl 0xc(%esp), %eax
1027 subl $MCOUNT_INSN_SIZE, %eax
1028 call prepare_ftrace_return
1033 END(ftrace_graph_caller)
1035 .globl return_to_handler
1040 call ftrace_return_to_handler
1047 #ifdef CONFIG_TRACING
1048 ENTRY(trace_page_fault)
1050 pushl $trace_do_page_fault
1052 END(trace_page_fault)
1057 pushl $do_page_fault
1060 /* the function address is in %gs's slot on the stack */
1072 movl $(__KERNEL_PERCPU), %ecx
1076 movl PT_GS(%esp), %edi # get the function address
1077 movl PT_ORIG_EAX(%esp), %edx # get the error code
1078 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1081 movl $(__USER_DS), %ecx
1085 movl %esp, %eax # pt_regs pointer
1087 jmp ret_from_exception
1091 * Debug traps and NMI can happen at the one SYSENTER instruction
1092 * that sets up the real kernel stack. Check here, since we can't
1093 * allow the wrong stack to be used.
1095 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1096 * already pushed 3 words if it hits on the sysenter instruction:
1097 * eflags, cs and eip.
1099 * We just load the right stack, and push the three (known) values
1100 * by hand onto the new stack - while updating the return eip past
1101 * the instruction that would have done it for sysenter.
1103 .macro FIX_STACK offset ok label
1104 cmpw $__KERNEL_CS, 4(%esp)
1107 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1110 pushl $sysenter_past_esp
1115 cmpl $entry_SYSENTER_32, (%esp)
1116 jne debug_stack_correct
1117 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1118 debug_stack_correct:
1119 pushl $-1 # mark this as an int
1122 xorl %edx, %edx # error code 0
1123 movl %esp, %eax # pt_regs pointer
1125 jmp ret_from_exception
1129 * NMI is doubly nasty. It can happen _while_ we're handling
1130 * a debug fault, and the debug fault hasn't yet been able to
1131 * clear up the stack. So we first check whether we got an
1132 * NMI on the sysenter entry path, but after that we need to
1133 * check whether we got an NMI on the debug path where the debug
1134 * fault happened on the sysenter path.
1138 #ifdef CONFIG_X86_ESPFIX32
1141 cmpw $__ESPFIX_SS, %ax
1145 cmpl $entry_SYSENTER_32, (%esp)
1150 * Do not access memory above the end of our stack page,
1151 * it might not exist.
1153 andl $(THREAD_SIZE-1), %eax
1154 cmpl $(THREAD_SIZE-20), %eax
1156 jae nmi_stack_correct
1157 cmpl $entry_SYSENTER_32, 12(%esp)
1158 je nmi_debug_stack_check
1162 xorl %edx, %edx # zero error code
1163 movl %esp, %eax # pt_regs pointer
1165 jmp restore_all_notrace
1168 FIX_STACK 12, nmi_stack_correct, 1
1169 jmp nmi_stack_correct
1171 nmi_debug_stack_check:
1172 cmpw $__KERNEL_CS, 16(%esp)
1173 jne nmi_stack_correct
1175 jb nmi_stack_correct
1176 cmpl $debug_esp_fix_insn, (%esp)
1177 ja nmi_stack_correct
1178 FIX_STACK 24, nmi_stack_correct, 1
1179 jmp nmi_stack_correct
1181 #ifdef CONFIG_X86_ESPFIX32
1184 * create the pointer to lss back
1189 /* copy the iret frame of 12 bytes */
1195 FIXUP_ESPFIX_STACK # %eax == %esp
1196 xorl %edx, %edx # zero error code
1199 lss 12+4(%esp), %esp # back to espfix stack
1206 pushl $-1 # mark this as an int
1209 xorl %edx, %edx # zero error code
1210 movl %esp, %eax # pt_regs pointer
1212 jmp ret_from_exception
1215 ENTRY(general_protection)
1216 pushl $do_general_protection
1218 END(general_protection)
1220 #ifdef CONFIG_KVM_GUEST
1221 ENTRY(async_page_fault)
1223 pushl $do_async_page_fault
1225 END(async_page_fault)