2 * Copyright (C) 1991,1992 Linus Torvalds
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 * Stack layout while running C code:
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
38 #include <asm/page_types.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/ftrace.h>
42 #include <asm/irq_vectors.h>
43 #include <asm/cpufeature.h>
44 #include <asm/alternative-asm.h>
48 .section .entry.text, "ax"
51 * We use macros for low-level operations which need to be overridden
52 * for paravirtualization. The following will never clobber any registers:
53 * INTERRUPT_RETURN (aka. "iret")
54 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
55 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
57 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
58 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
59 * Allowing a register to be clobbered can shrink the paravirt replacement
60 * enough to patch inline, increasing performance.
64 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
66 # define preempt_stop(clobbers)
67 # define resume_kernel restore_all
70 .macro TRACE_IRQS_IRET
71 #ifdef CONFIG_TRACE_IRQFLAGS
72 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
80 * User gs save/restore
82 * %gs is used for userland TLS and kernel only uses it for stack
83 * canary which is required to be at %gs:20 by gcc. Read the comment
84 * at the top of stackprotector.h for more info.
86 * Local labels 98 and 99 are used.
88 #ifdef CONFIG_X86_32_LAZY_GS
90 /* unfortunately push/pop can't be no-op */
95 addl $(4 + \pop), %esp
100 /* all the rest are no-op */
107 .macro REG_TO_PTGS reg
109 .macro SET_KERNEL_GS reg
112 #else /* CONFIG_X86_32_LAZY_GS */
125 .pushsection .fixup, "ax"
129 _ASM_EXTABLE(98b, 99b)
133 98: mov PT_GS(%esp), %gs
136 .pushsection .fixup, "ax"
137 99: movl $0, PT_GS(%esp)
140 _ASM_EXTABLE(98b, 99b)
146 .macro REG_TO_PTGS reg
147 movl \reg, PT_GS(%esp)
149 .macro SET_KERNEL_GS reg
150 movl $(__KERNEL_STACK_CANARY), \reg
154 #endif /* CONFIG_X86_32_LAZY_GS */
156 .macro SAVE_ALL pt_regs_ax=%eax
169 movl $(__USER_DS), %edx
172 movl $(__KERNEL_PERCPU), %edx
177 .macro RESTORE_INT_REGS
187 .macro RESTORE_REGS pop=0
193 .pushsection .fixup, "ax"
210 GET_THREAD_INFO(%ebp)
212 pushl $0x0202 # Reset kernel eflags
215 /* When we fork, we trace the syscall return in the child, too. */
217 call syscall_return_slowpath
221 ENTRY(ret_from_kernel_thread)
224 GET_THREAD_INFO(%ebp)
226 pushl $0x0202 # Reset kernel eflags
228 movl PT_EBP(%esp), %eax
230 movl $0, PT_EAX(%esp)
233 * Kernel threads return to userspace as if returning from a syscall.
234 * We should check whether anything actually uses this path and, if so,
235 * consider switching it over to ret_from_fork.
238 call syscall_return_slowpath
240 ENDPROC(ret_from_kernel_thread)
243 * Return to user mode is not as complex as all this looks,
244 * but we want the default path for a system call return to
245 * go as quickly as possible which is why some of this is
246 * less clear than it otherwise should be.
249 # userspace resumption stub bypassing syscall exit tracing
252 preempt_stop(CLBR_ANY)
254 GET_THREAD_INFO(%ebp)
256 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
257 movb PT_CS(%esp), %al
258 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
261 * We can be coming here from child spawned by kernel_thread().
263 movl PT_CS(%esp), %eax
264 andl $SEGMENT_RPL_MASK, %eax
267 jb resume_kernel # not returning to v8086 or userspace
269 ENTRY(resume_userspace)
270 DISABLE_INTERRUPTS(CLBR_ANY)
273 call prepare_exit_to_usermode
275 END(ret_from_exception)
277 #ifdef CONFIG_PREEMPT
279 DISABLE_INTERRUPTS(CLBR_ANY)
281 cmpl $0, PER_CPU_VAR(__preempt_count)
283 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
285 call preempt_schedule_irq
291 * SYSENTER_RETURN points to after the SYSENTER instruction
292 * in the vsyscall page. See vsyscall-sysentry.S, which defines
296 # SYSENTER call handler stub
297 ENTRY(entry_SYSENTER_32)
298 movl TSS_sysenter_sp0(%esp), %esp
301 * Interrupts are disabled here, but we can't trace it until
302 * enough kernel state to call TRACE_IRQS_OFF can be called - but
303 * we immediately enable interrupts at that point anyway.
308 orl $X86_EFLAGS_IF, (%esp)
311 * Push current_thread_info()->sysenter_return to the stack.
312 * A tiny bit of offset fixup is necessary: TI_sysenter_return
313 * is relative to thread_info, which is at the bottom of the
314 * kernel stack page. 4*4 means the 4 words pushed above;
315 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
316 * and THREAD_SIZE takes us to the bottom.
318 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
322 ENABLE_INTERRUPTS(CLBR_NONE)
325 * Load the potential sixth argument from user stack.
326 * Careful about security.
328 cmpl $__PAGE_OFFSET-3, %ebp
333 movl %ebp, PT_EBP(%esp)
334 _ASM_EXTABLE(1b, syscall_fault)
336 GET_THREAD_INFO(%ebp)
338 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
339 jnz syscall_trace_entry
341 cmpl $(NR_syscalls), %eax
343 call *sys_call_table(, %eax, 4)
345 movl %eax, PT_EAX(%esp)
347 DISABLE_INTERRUPTS(CLBR_ANY)
349 movl TI_flags(%ebp), %ecx
350 testl $_TIF_ALLWORK_MASK, %ecx
351 jnz syscall_exit_work_irqs_off
353 /* if something modifies registers it must also disable sysexit */
354 movl PT_EIP(%esp), %edx
355 movl PT_OLDESP(%esp), %ecx
358 1: mov PT_FS(%esp), %fs
360 ENABLE_INTERRUPTS_SYSEXIT
362 .pushsection .fixup, "ax"
363 2: movl $0, PT_FS(%esp)
368 ENDPROC(entry_SYSENTER_32)
370 # system call handler stub
371 ENTRY(entry_INT80_32)
373 pushl %eax /* pt_regs->orig_ax */
374 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, load -ENOSYS into ax */
377 * User mode is traced as though IRQs are on, and the interrupt gate
383 call do_int80_syscall_32
388 #ifdef CONFIG_X86_ESPFIX32
389 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
391 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
392 * are returning to the kernel.
393 * See comments in process.c:copy_thread() for details.
395 movb PT_OLDSS(%esp), %ah
396 movb PT_CS(%esp), %al
397 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
398 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
399 je ldt_ss # returning to user-space with LDT SS
402 RESTORE_REGS 4 # skip orig_eax/error_code
405 .section .fixup, "ax"
407 pushl $0 # no error code
411 _ASM_EXTABLE(irq_return, iret_exc)
413 #ifdef CONFIG_X86_ESPFIX32
415 #ifdef CONFIG_PARAVIRT
417 * The kernel can't run on a non-flat stack if paravirt mode
418 * is active. Rather than try to fixup the high bits of
419 * ESP, bypass this code entirely. This may break DOSemu
420 * and/or Wine support in a paravirt VM, although the option
421 * is still available to implement the setting of the high
422 * 16-bits in the INTERRUPT_RETURN paravirt-op.
424 cmpl $0, pv_info+PARAVIRT_enabled
429 * Setup and switch to ESPFIX stack
431 * We're returning to userspace with a 16 bit stack. The CPU will not
432 * restore the high word of ESP for us on executing iret... This is an
433 * "official" bug of all the x86-compatible CPUs, which we can work
434 * around to make dosemu and wine happy. We do this by preloading the
435 * high word of ESP with the high word of the userspace ESP while
436 * compensating for the offset by changing to the ESPFIX segment with
437 * a base address that matches for the difference.
439 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
440 mov %esp, %edx /* load kernel esp */
441 mov PT_OLDESP(%esp), %eax /* load userspace esp */
442 mov %dx, %ax /* eax: new kernel esp */
443 sub %eax, %edx /* offset (low word is 0) */
445 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
446 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
448 pushl %eax /* new kernel esp */
450 * Disable interrupts, but do not irqtrace this section: we
451 * will soon execute iret and the tracer was already set to
452 * the irqstate after the IRET:
454 DISABLE_INTERRUPTS(CLBR_EAX)
455 lss (%esp), %esp /* switch to espfix segment */
458 ENDPROC(entry_INT80_32)
460 # perform syscall exit tracing
463 movl $-ENOSYS, PT_EAX(%esp)
465 call syscall_trace_enter
466 /* What it returned is what we'll actually use. */
467 cmpl $(NR_syscalls), %eax
470 END(syscall_trace_entry)
472 # perform syscall exit tracing
474 syscall_exit_work_irqs_off:
476 ENABLE_INTERRUPTS(CLBR_ANY)
480 call syscall_return_slowpath
482 END(syscall_exit_work)
486 GET_THREAD_INFO(%ebp)
487 movl $-EFAULT, PT_EAX(%esp)
493 jmp sysenter_after_call
496 .macro FIXUP_ESPFIX_STACK
498 * Switch back for ESPFIX stack to the normal zerobased stack
500 * We can't call C functions using the ESPFIX stack. This code reads
501 * the high word of the segment base from the GDT and swiches to the
502 * normal stack and adjusts ESP with the matching offset.
504 #ifdef CONFIG_X86_ESPFIX32
505 /* fixup the stack */
506 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
507 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
509 addl %esp, %eax /* the adjusted stack pointer */
512 lss (%esp), %esp /* switch to the normal stack segment */
515 .macro UNWIND_ESPFIX_STACK
516 #ifdef CONFIG_X86_ESPFIX32
518 /* see if on espfix stack */
519 cmpw $__ESPFIX_SS, %ax
521 movl $__KERNEL_DS, %eax
524 /* switch to normal stack */
531 * Build the entry stubs with some assembler magic.
532 * We pack 1 stub into every 8-byte block.
535 ENTRY(irq_entries_start)
536 vector=FIRST_EXTERNAL_VECTOR
537 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
538 pushl $(~vector+0x80) /* Note: always in signed byte range */
543 END(irq_entries_start)
546 * the CPU automatically disables interrupts when executing an IRQ vector,
547 * so IRQ-flags tracing has to follow that:
549 .p2align CONFIG_X86_L1_CACHE_SHIFT
552 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
558 ENDPROC(common_interrupt)
560 #define BUILD_INTERRUPT3(name, nr, fn) \
572 #ifdef CONFIG_TRACING
573 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
575 # define TRACE_BUILD_INTERRUPT(name, nr)
578 #define BUILD_INTERRUPT(name, nr) \
579 BUILD_INTERRUPT3(name, nr, smp_##name); \
580 TRACE_BUILD_INTERRUPT(name, nr)
582 /* The include is where all of the SMP etc. interrupts come from */
583 #include <asm/entry_arch.h>
585 ENTRY(coprocessor_error)
588 pushl $do_coprocessor_error
590 END(coprocessor_error)
592 ENTRY(simd_coprocessor_error)
595 #ifdef CONFIG_X86_INVD_BUG
596 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
597 ALTERNATIVE "pushl $do_general_protection", \
598 "pushl $do_simd_coprocessor_error", \
601 pushl $do_simd_coprocessor_error
604 END(simd_coprocessor_error)
606 ENTRY(device_not_available)
608 pushl $-1 # mark this as an int
609 pushl $do_device_not_available
611 END(device_not_available)
613 #ifdef CONFIG_PARAVIRT
616 _ASM_EXTABLE(native_iret, iret_exc)
619 ENTRY(native_irq_enable_sysexit)
622 END(native_irq_enable_sysexit)
646 ENTRY(coprocessor_segment_overrun)
649 pushl $do_coprocessor_segment_overrun
651 END(coprocessor_segment_overrun)
655 pushl $do_invalid_TSS
659 ENTRY(segment_not_present)
661 pushl $do_segment_not_present
663 END(segment_not_present)
667 pushl $do_stack_segment
671 ENTRY(alignment_check)
673 pushl $do_alignment_check
679 pushl $0 # no error code
680 pushl $do_divide_error
684 #ifdef CONFIG_X86_MCE
688 pushl machine_check_vector
693 ENTRY(spurious_interrupt_bug)
696 pushl $do_spurious_interrupt_bug
698 END(spurious_interrupt_bug)
702 * Xen doesn't set %esp to be precisely what the normal SYSENTER
703 * entry point expects, so fix it up before using the normal path.
705 ENTRY(xen_sysenter_target)
706 addl $5*4, %esp /* remove xen-provided frame */
707 jmp sysenter_past_esp
709 ENTRY(xen_hypervisor_callback)
710 pushl $-1 /* orig_ax = -1 => not a system call */
715 * Check to see if we got the event in the critical
716 * region in xen_iret_direct, after we've reenabled
717 * events and checked for pending events. This simulates
718 * iret instruction's behaviour where it delivers a
719 * pending interrupt when enabling interrupts:
721 movl PT_EIP(%esp), %eax
722 cmpl $xen_iret_start_crit, %eax
724 cmpl $xen_iret_end_crit, %eax
727 jmp xen_iret_crit_fixup
731 call xen_evtchn_do_upcall
732 #ifndef CONFIG_PREEMPT
733 call xen_maybe_preempt_hcall
736 ENDPROC(xen_hypervisor_callback)
739 * Hypervisor uses this for application faults while it executes.
740 * We get here for two reasons:
741 * 1. Fault while reloading DS, ES, FS or GS
742 * 2. Fault while executing IRET
743 * Category 1 we fix up by reattempting the load, and zeroing the segment
744 * register if the load fails.
745 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
746 * normal Linux return path in this case because if we use the IRET hypercall
747 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
748 * We distinguish between categories by maintaining a status value in EAX.
750 ENTRY(xen_failsafe_callback)
757 /* EAX == 0 => Category 1 (Bad segment)
758 EAX != 0 => Category 2 (Bad IRET) */
764 5: pushl $-1 /* orig_ax = -1 => not a system call */
766 jmp ret_from_exception
768 .section .fixup, "ax"
786 ENDPROC(xen_failsafe_callback)
788 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
789 xen_evtchn_do_upcall)
791 #endif /* CONFIG_XEN */
793 #if IS_ENABLED(CONFIG_HYPERV)
795 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
796 hyperv_vector_handler)
798 #endif /* CONFIG_HYPERV */
800 #ifdef CONFIG_FUNCTION_TRACER
801 #ifdef CONFIG_DYNAMIC_FTRACE
811 pushl $0 /* Pass NULL as regs pointer */
814 movl function_trace_op, %ecx
815 subl $MCOUNT_INSN_SIZE, %eax
821 addl $4, %esp /* skip NULL pointer */
826 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
827 .globl ftrace_graph_call
837 ENTRY(ftrace_regs_caller)
838 pushf /* push flags before compare (in cs location) */
841 * i386 does not save SS and ESP when coming from kernel.
842 * Instead, to get sp, ®s->sp is used (see ptrace.h).
843 * Unfortunately, that means eflags must be at the same location
844 * as the current return ip is. We move the return ip into the
845 * ip location, and move flags into the return ip location.
847 pushl 4(%esp) /* save return ip into ip slot */
849 pushl $0 /* Load 0 into orig_ax */
862 movl 13*4(%esp), %eax /* Get the saved flags */
863 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
864 /* clobbering return ip */
865 movl $__KERNEL_CS, 13*4(%esp)
867 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
868 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
869 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
870 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
871 pushl %esp /* Save pt_regs as 4th parameter */
873 GLOBAL(ftrace_regs_call)
876 addl $4, %esp /* Skip pt_regs */
877 movl 14*4(%esp), %eax /* Move flags back into cs */
878 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
879 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
880 movl %eax, 14*4(%esp) /* Put return ip back for ret */
893 addl $8, %esp /* Skip orig_ax and ip */
894 popf /* Pop flags at end (no addl to corrupt flags) */
899 #else /* ! CONFIG_DYNAMIC_FTRACE */
902 cmpl $__PAGE_OFFSET, %esp
903 jb ftrace_stub /* Paging not enabled yet? */
905 cmpl $ftrace_stub, ftrace_trace_function
907 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
908 cmpl $ftrace_stub, ftrace_graph_return
909 jnz ftrace_graph_caller
911 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
912 jnz ftrace_graph_caller
918 /* taken from glibc */
925 subl $MCOUNT_INSN_SIZE, %eax
927 call *ftrace_trace_function
934 #endif /* CONFIG_DYNAMIC_FTRACE */
935 #endif /* CONFIG_FUNCTION_TRACER */
937 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
938 ENTRY(ftrace_graph_caller)
945 subl $MCOUNT_INSN_SIZE, %eax
946 call prepare_ftrace_return
951 END(ftrace_graph_caller)
953 .globl return_to_handler
958 call ftrace_return_to_handler
965 #ifdef CONFIG_TRACING
966 ENTRY(trace_page_fault)
968 pushl $trace_do_page_fault
970 END(trace_page_fault)
978 /* the function address is in %gs's slot on the stack */
990 movl $(__KERNEL_PERCPU), %ecx
994 movl PT_GS(%esp), %edi # get the function address
995 movl PT_ORIG_EAX(%esp), %edx # get the error code
996 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
999 movl $(__USER_DS), %ecx
1003 movl %esp, %eax # pt_regs pointer
1005 jmp ret_from_exception
1009 * Debug traps and NMI can happen at the one SYSENTER instruction
1010 * that sets up the real kernel stack. Check here, since we can't
1011 * allow the wrong stack to be used.
1013 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1014 * already pushed 3 words if it hits on the sysenter instruction:
1015 * eflags, cs and eip.
1017 * We just load the right stack, and push the three (known) values
1018 * by hand onto the new stack - while updating the return eip past
1019 * the instruction that would have done it for sysenter.
1021 .macro FIX_STACK offset ok label
1022 cmpw $__KERNEL_CS, 4(%esp)
1025 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1028 pushl $sysenter_past_esp
1033 cmpl $entry_SYSENTER_32, (%esp)
1034 jne debug_stack_correct
1035 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1036 debug_stack_correct:
1037 pushl $-1 # mark this as an int
1040 xorl %edx, %edx # error code 0
1041 movl %esp, %eax # pt_regs pointer
1043 jmp ret_from_exception
1047 * NMI is doubly nasty. It can happen _while_ we're handling
1048 * a debug fault, and the debug fault hasn't yet been able to
1049 * clear up the stack. So we first check whether we got an
1050 * NMI on the sysenter entry path, but after that we need to
1051 * check whether we got an NMI on the debug path where the debug
1052 * fault happened on the sysenter path.
1056 #ifdef CONFIG_X86_ESPFIX32
1059 cmpw $__ESPFIX_SS, %ax
1063 cmpl $entry_SYSENTER_32, (%esp)
1068 * Do not access memory above the end of our stack page,
1069 * it might not exist.
1071 andl $(THREAD_SIZE-1), %eax
1072 cmpl $(THREAD_SIZE-20), %eax
1074 jae nmi_stack_correct
1075 cmpl $entry_SYSENTER_32, 12(%esp)
1076 je nmi_debug_stack_check
1080 xorl %edx, %edx # zero error code
1081 movl %esp, %eax # pt_regs pointer
1083 jmp restore_all_notrace
1086 FIX_STACK 12, nmi_stack_correct, 1
1087 jmp nmi_stack_correct
1089 nmi_debug_stack_check:
1090 cmpw $__KERNEL_CS, 16(%esp)
1091 jne nmi_stack_correct
1093 jb nmi_stack_correct
1094 cmpl $debug_esp_fix_insn, (%esp)
1095 ja nmi_stack_correct
1096 FIX_STACK 24, nmi_stack_correct, 1
1097 jmp nmi_stack_correct
1099 #ifdef CONFIG_X86_ESPFIX32
1102 * create the pointer to lss back
1107 /* copy the iret frame of 12 bytes */
1113 FIXUP_ESPFIX_STACK # %eax == %esp
1114 xorl %edx, %edx # zero error code
1117 lss 12+4(%esp), %esp # back to espfix stack
1124 pushl $-1 # mark this as an int
1127 xorl %edx, %edx # zero error code
1128 movl %esp, %eax # pt_regs pointer
1130 jmp ret_from_exception
1133 ENTRY(general_protection)
1134 pushl $do_general_protection
1136 END(general_protection)
1138 #ifdef CONFIG_KVM_GUEST
1139 ENTRY(async_page_fault)
1141 pushl $do_async_page_fault
1143 END(async_page_fault)