1 // SPDX-License-Identifier: GPL-2.0-only
3 * common.c - C code for kernel entry and exit
4 * Copyright (c) 2015 Andrew Lutomirski
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/tracehook.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/export.h>
22 #include <linux/context_tracking.h>
23 #include <linux/user-return-notifier.h>
24 #include <linux/nospec.h>
25 #include <linux/uprobes.h>
26 #include <linux/livepatch.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
31 #include <xen/xen-ops.h>
32 #include <xen/events.h>
36 #include <asm/traps.h>
38 #include <asm/cpufeature.h>
39 #include <asm/fpu/api.h>
40 #include <asm/nospec-branch.h>
41 #include <asm/io_bitmap.h>
42 #include <asm/syscall.h>
43 #include <asm/irq_stack.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/syscalls.h>
48 /* Check that the stack and regs on entry from user mode are sane. */
49 static noinstr void check_user_regs(struct pt_regs *regs)
51 if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
53 * Make sure that the entry code gave us a sensible EFLAGS
54 * register. Native because we want to check the actual CPU
55 * state, not the interrupt state as imagined by Xen.
57 unsigned long flags = native_save_fl();
58 WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
61 /* We think we came from user mode. Make sure pt_regs agrees. */
62 WARN_ON_ONCE(!user_mode(regs));
65 * All entries from user mode (except #DF) should be on the
66 * normal thread stack and should have user pt_regs in the
69 WARN_ON_ONCE(!on_thread_stack());
70 WARN_ON_ONCE(regs != task_pt_regs(current));
74 #ifdef CONFIG_CONTEXT_TRACKING
76 * enter_from_user_mode - Establish state when coming from user mode
78 * Syscall entry disables interrupts, but user mode is traced as interrupts
79 * enabled. Also with NO_HZ_FULL RCU might be idle.
81 * 1) Tell lockdep that interrupts are disabled
82 * 2) Invoke context tracking if enabled to reactivate RCU
83 * 3) Trace interrupts off state
85 static noinstr void enter_from_user_mode(struct pt_regs *regs)
87 enum ctx_state state = ct_state();
89 check_user_regs(regs);
90 lockdep_hardirqs_off(CALLER_ADDR0);
93 instrumentation_begin();
94 CT_WARN_ON(state != CONTEXT_USER);
95 trace_hardirqs_off_finish();
96 instrumentation_end();
99 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
101 check_user_regs(regs);
102 lockdep_hardirqs_off(CALLER_ADDR0);
103 instrumentation_begin();
104 trace_hardirqs_off_finish();
105 instrumentation_end();
110 * exit_to_user_mode - Fixup state when exiting to user mode
112 * Syscall exit enables interrupts, but the kernel state is interrupts
113 * disabled when this is invoked. Also tell RCU about it.
115 * 1) Trace interrupts on state
116 * 2) Invoke context tracking if enabled to adjust RCU state
117 * 3) Clear CPU buffers if CPU is affected by MDS and the migitation is on.
118 * 4) Tell lockdep that interrupts are enabled
120 static __always_inline void exit_to_user_mode(void)
122 instrumentation_begin();
123 trace_hardirqs_on_prepare();
124 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
125 instrumentation_end();
128 mds_user_clear_cpu_buffers();
129 lockdep_hardirqs_on(CALLER_ADDR0);
132 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
135 if (arch == AUDIT_ARCH_X86_64) {
136 audit_syscall_entry(regs->orig_ax, regs->di,
137 regs->si, regs->dx, regs->r10);
141 audit_syscall_entry(regs->orig_ax, regs->bx,
142 regs->cx, regs->dx, regs->si);
147 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
148 * to skip the syscall.
150 static long syscall_trace_enter(struct pt_regs *regs)
152 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
154 struct thread_info *ti = current_thread_info();
155 unsigned long ret = 0;
158 work = READ_ONCE(ti->flags);
160 if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
161 ret = tracehook_report_syscall_entry(regs);
162 if (ret || (work & _TIF_SYSCALL_EMU))
166 #ifdef CONFIG_SECCOMP
168 * Do seccomp after ptrace, to catch any tracer changes.
170 if (work & _TIF_SECCOMP) {
171 struct seccomp_data sd;
174 sd.nr = regs->orig_ax;
175 sd.instruction_pointer = regs->ip;
177 if (arch == AUDIT_ARCH_X86_64) {
178 sd.args[0] = regs->di;
179 sd.args[1] = regs->si;
180 sd.args[2] = regs->dx;
181 sd.args[3] = regs->r10;
182 sd.args[4] = regs->r8;
183 sd.args[5] = regs->r9;
187 sd.args[0] = regs->bx;
188 sd.args[1] = regs->cx;
189 sd.args[2] = regs->dx;
190 sd.args[3] = regs->si;
191 sd.args[4] = regs->di;
192 sd.args[5] = regs->bp;
195 ret = __secure_computing(&sd);
201 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
202 trace_sys_enter(regs, regs->orig_ax);
204 do_audit_syscall_entry(regs, arch);
206 return ret ?: regs->orig_ax;
209 #define EXIT_TO_USERMODE_LOOP_FLAGS \
210 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
211 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
213 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
216 * In order to return to user mode, we need to have IRQs off with
217 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
218 * can be set at any time on preemptible kernels if we have IRQs on,
219 * so we need to loop. Disabling preemption wouldn't help: doing the
220 * work to clear some of the flags can sleep.
223 /* We have work to do. */
226 if (cached_flags & _TIF_NEED_RESCHED)
229 if (cached_flags & _TIF_UPROBE)
230 uprobe_notify_resume(regs);
232 if (cached_flags & _TIF_PATCH_PENDING)
233 klp_update_patch_state(current);
235 /* deal with pending signal delivery */
236 if (cached_flags & _TIF_SIGPENDING)
239 if (cached_flags & _TIF_NOTIFY_RESUME) {
240 clear_thread_flag(TIF_NOTIFY_RESUME);
241 tracehook_notify_resume(regs);
242 rseq_handle_notify_resume(NULL, regs);
245 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
246 fire_user_return_notifiers();
248 /* Disable IRQs and retry */
251 cached_flags = READ_ONCE(current_thread_info()->flags);
253 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
258 static void __prepare_exit_to_usermode(struct pt_regs *regs)
260 struct thread_info *ti = current_thread_info();
263 addr_limit_user_check();
265 lockdep_assert_irqs_disabled();
268 cached_flags = READ_ONCE(ti->flags);
270 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
271 exit_to_usermode_loop(regs, cached_flags);
273 /* Reload ti->flags; we may have rescheduled above. */
274 cached_flags = READ_ONCE(ti->flags);
276 if (unlikely(cached_flags & _TIF_IO_BITMAP))
277 tss_update_io_bitmap();
279 fpregs_assert_state_consistent();
280 if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
285 * Compat syscalls set TS_COMPAT. Make sure we clear it before
286 * returning to user mode. We need to clear it *after* signal
287 * handling, because syscall restart has a fixup for compat
288 * syscalls. The fixup is exercised by the ptrace_syscall_32
291 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
292 * special case only applies after poking regs and before the
293 * very next return to user mode.
295 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
299 static noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
301 instrumentation_begin();
302 __prepare_exit_to_usermode(regs);
303 instrumentation_end();
307 #define SYSCALL_EXIT_WORK_FLAGS \
308 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
309 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
311 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
315 audit_syscall_exit(regs);
317 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
318 trace_sys_exit(regs, regs->ax);
321 * If TIF_SYSCALL_EMU is set, we only get here because of
322 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
323 * We already reported this syscall instruction in
324 * syscall_trace_enter().
327 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
329 if (step || cached_flags & _TIF_SYSCALL_TRACE)
330 tracehook_report_syscall_exit(regs, step);
333 static void __syscall_return_slowpath(struct pt_regs *regs)
335 struct thread_info *ti = current_thread_info();
336 u32 cached_flags = READ_ONCE(ti->flags);
338 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
340 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
341 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
347 * First do one-time work. If these work items are enabled, we
348 * want to run them exactly once per syscall exit with IRQs on.
350 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
351 syscall_slow_exit_work(regs, cached_flags);
354 __prepare_exit_to_usermode(regs);
358 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
359 * state such that we can immediately switch to user mode.
361 __visible noinstr void syscall_return_slowpath(struct pt_regs *regs)
363 instrumentation_begin();
364 __syscall_return_slowpath(regs);
365 instrumentation_end();
369 static noinstr long syscall_enter(struct pt_regs *regs, unsigned long nr)
371 struct thread_info *ti;
373 enter_from_user_mode(regs);
374 instrumentation_begin();
377 ti = current_thread_info();
378 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
379 nr = syscall_trace_enter(regs);
381 instrumentation_end();
386 __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
388 nr = syscall_enter(regs, nr);
390 instrumentation_begin();
391 if (likely(nr < NR_syscalls)) {
392 nr = array_index_nospec(nr, NR_syscalls);
393 regs->ax = sys_call_table[nr](regs);
394 #ifdef CONFIG_X86_X32_ABI
395 } else if (likely((nr & __X32_SYSCALL_BIT) &&
396 (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
397 nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
399 regs->ax = x32_sys_call_table[nr](regs);
402 instrumentation_end();
403 syscall_return_slowpath(regs);
407 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
408 static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
410 if (IS_ENABLED(CONFIG_IA32_EMULATION))
411 current_thread_info()->status |= TS_COMPAT;
413 * Subtlety here: if ptrace pokes something larger than 2^32-1 into
414 * orig_ax, the unsigned int return value truncates it. This may
415 * or may not be necessary, but it matches the old asm behavior.
417 return syscall_enter(regs, (unsigned int)regs->orig_ax);
421 * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
423 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
426 if (likely(nr < IA32_NR_syscalls)) {
427 instrumentation_begin();
428 nr = array_index_nospec(nr, IA32_NR_syscalls);
429 regs->ax = ia32_sys_call_table[nr](regs);
430 instrumentation_end();
434 /* Handles int $0x80 */
435 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
437 unsigned int nr = syscall_32_enter(regs);
439 do_syscall_32_irqs_on(regs, nr);
440 syscall_return_slowpath(regs);
443 static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
445 unsigned int nr = syscall_32_enter(regs);
448 instrumentation_begin();
449 /* Fetch EBP from where the vDSO stashed it. */
450 if (IS_ENABLED(CONFIG_X86_64)) {
452 * Micro-optimization: the pointer we're following is
453 * explicitly 32 bits, so it can't be out of range.
455 res = __get_user(*(u32 *)®s->bp,
456 (u32 __user __force *)(unsigned long)(u32)regs->sp);
458 res = get_user(*(u32 *)®s->bp,
459 (u32 __user __force *)(unsigned long)(u32)regs->sp);
461 instrumentation_end();
464 /* User code screwed up. */
466 syscall_return_slowpath(regs);
470 /* Now this is just like a normal syscall. */
471 do_syscall_32_irqs_on(regs, nr);
472 syscall_return_slowpath(regs);
476 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
477 __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
480 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
481 * convention. Adjust regs so it looks like we entered using int80.
483 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
484 vdso_image_32.sym_int80_landing_pad;
487 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
488 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
491 regs->ip = landing_pad;
493 /* Invoke the syscall. If it failed, keep it simple: use IRET. */
494 if (!__do_fast_syscall_32(regs))
499 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
500 * SYSRETL is available on all 64-bit CPUs, so we don't need to
501 * bother with SYSEXIT.
503 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
504 * because the ECX fixup above will ensure that this is essentially
507 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
508 regs->ip == landing_pad &&
509 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
512 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
514 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
515 * because the ECX fixup above will ensure that this is essentially
518 * We don't allow syscalls at all from VM86 mode, but we still
519 * need to check VM, because we might be returning from sys_vm86.
521 return static_cpu_has(X86_FEATURE_SEP) &&
522 regs->cs == __USER_CS && regs->ss == __USER_DS &&
523 regs->ip == landing_pad &&
524 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
528 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
529 __visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
531 /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
534 /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */
535 regs->flags |= X86_EFLAGS_IF;
537 return do_fast_syscall_32(regs);
541 SYSCALL_DEFINE0(ni_syscall)
547 * idtentry_enter - Handle state tracking on ordinary idtentries
548 * @regs: Pointer to pt_regs of interrupted context
551 * - lockdep irqflag state tracking as low level ASM entry disabled
554 * - Context tracking if the exception hit user mode.
556 * - The hardirq tracer to keep the state consistent as low level ASM
557 * entry disabled interrupts.
559 * As a precondition, this requires that the entry came from user mode,
560 * idle, or a kernel context in which RCU is watching.
562 * For kernel mode entries RCU handling is done conditional. If RCU is
563 * watching then the only RCU requirement is to check whether the tick has
564 * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
565 * invoked on entry and rcu_irq_exit() on exit.
567 * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
568 * solves the problem of kernel mode pagefaults which can schedule, which
569 * is not possible after invoking rcu_irq_enter() without undoing it.
571 * For user mode entries enter_from_user_mode() must be invoked to
572 * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
573 * would not be possible.
575 * Returns: An opaque object that must be passed to idtentry_exit()
577 * The return value must be fed into the state argument of
580 idtentry_state_t noinstr idtentry_enter(struct pt_regs *regs)
582 idtentry_state_t ret = {
586 if (user_mode(regs)) {
587 enter_from_user_mode(regs);
592 * If this entry hit the idle task invoke rcu_irq_enter() whether
593 * RCU is watching or not.
595 * Interupts can nest when the first interrupt invokes softirq
596 * processing on return which enables interrupts.
598 * Scheduler ticks in the idle task can mark quiescent state and
599 * terminate a grace period, if and only if the timer interrupt is
600 * not nested into another interrupt.
602 * Checking for __rcu_is_watching() here would prevent the nesting
603 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
604 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
605 * assume that it is the first interupt and eventually claim
606 * quiescient state and end grace periods prematurely.
608 * Unconditionally invoke rcu_irq_enter() so RCU state stays
611 * TINY_RCU does not support EQS, so let the compiler eliminate
612 * this part when enabled.
614 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
616 * If RCU is not watching then the same careful
617 * sequence vs. lockdep and tracing is required
618 * as in enter_from_user_mode().
620 lockdep_hardirqs_off(CALLER_ADDR0);
622 instrumentation_begin();
623 trace_hardirqs_off_finish();
624 instrumentation_end();
631 * If RCU is watching then RCU only wants to check whether it needs
632 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
633 * already contains a warning when RCU is not watching, so no point
634 * in having another one here.
636 instrumentation_begin();
637 rcu_irq_enter_check_tick();
638 /* Use the combo lockdep/tracing function */
639 trace_hardirqs_off();
640 instrumentation_end();
645 static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
647 if (may_sched && !preempt_count()) {
648 /* Sanity check RCU and thread stack */
649 rcu_irq_exit_check_preempt();
650 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
651 WARN_ON_ONCE(!on_thread_stack());
653 preempt_schedule_irq();
655 /* Covers both tracing and lockdep */
660 * idtentry_exit - Handle return from exception that used idtentry_enter()
661 * @regs: Pointer to pt_regs (exception entry regs)
662 * @state: Return value from matching call to idtentry_enter()
664 * Depending on the return target (kernel/user) this runs the necessary
665 * preemption and work checks if possible and reguired and returns to
666 * the caller with interrupts disabled and no further work pending.
668 * This is the last action before returning to the low level ASM code which
669 * just needs to return to the appropriate context.
671 * Counterpart to idtentry_enter(). The return value of the entry
672 * function must be fed into the @state argument.
674 void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state)
676 lockdep_assert_irqs_disabled();
678 /* Check whether this returns to user mode */
679 if (user_mode(regs)) {
680 prepare_exit_to_usermode(regs);
681 } else if (regs->flags & X86_EFLAGS_IF) {
683 * If RCU was not watching on entry this needs to be done
684 * carefully and needs the same ordering of lockdep/tracing
685 * and RCU as the return to user mode path.
687 if (state.exit_rcu) {
688 instrumentation_begin();
689 /* Tell the tracer that IRET will enable interrupts */
690 trace_hardirqs_on_prepare();
691 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
692 instrumentation_end();
694 lockdep_hardirqs_on(CALLER_ADDR0);
698 instrumentation_begin();
699 idtentry_exit_cond_resched(regs, IS_ENABLED(CONFIG_PREEMPTION));
700 instrumentation_end();
703 * IRQ flags state is correct already. Just tell RCU if it
704 * was not watching on entry.
712 * idtentry_enter_user - Handle state tracking on idtentry from user mode
713 * @regs: Pointer to pt_regs of interrupted context
715 * Invokes enter_from_user_mode() to establish the proper context for
716 * NOHZ_FULL. Otherwise scheduling on exit would not be possible.
718 void noinstr idtentry_enter_user(struct pt_regs *regs)
720 enter_from_user_mode(regs);
724 * idtentry_exit_user - Handle return from exception to user mode
725 * @regs: Pointer to pt_regs (exception entry regs)
727 * Runs the necessary preemption and work checks and returns to the caller
728 * with interrupts disabled and no further work pending.
730 * This is the last action before returning to the low level ASM code which
731 * just needs to return to the appropriate context.
733 * Counterpart to idtentry_enter_user().
735 void noinstr idtentry_exit_user(struct pt_regs *regs)
737 lockdep_assert_irqs_disabled();
739 prepare_exit_to_usermode(regs);
743 #ifndef CONFIG_PREEMPTION
745 * Some hypercalls issued by the toolstack can take many 10s of
746 * seconds. Allow tasks running hypercalls via the privcmd driver to
747 * be voluntarily preempted even if full kernel preemption is
750 * Such preemptible hypercalls are bracketed by
751 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
754 DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
755 EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
758 * In case of scheduling the flag must be cleared and restored after
759 * returning from schedule as the task might move to a different CPU.
761 static __always_inline bool get_and_clear_inhcall(void)
763 bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
765 __this_cpu_write(xen_in_preemptible_hcall, false);
769 static __always_inline void restore_inhcall(bool inhcall)
771 __this_cpu_write(xen_in_preemptible_hcall, inhcall);
774 static __always_inline bool get_and_clear_inhcall(void) { return false; }
775 static __always_inline void restore_inhcall(bool inhcall) { }
778 static void __xen_pv_evtchn_do_upcall(void)
781 inc_irq_stat(irq_hv_callback_count);
783 xen_hvm_evtchn_do_upcall();
788 __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
790 struct pt_regs *old_regs;
792 idtentry_state_t state;
794 state = idtentry_enter(regs);
795 old_regs = set_irq_regs(regs);
797 instrumentation_begin();
798 run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
799 instrumentation_begin();
801 set_irq_regs(old_regs);
803 inhcall = get_and_clear_inhcall();
804 if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
805 instrumentation_begin();
806 idtentry_exit_cond_resched(regs, true);
807 instrumentation_end();
808 restore_inhcall(inhcall);
810 idtentry_exit(regs, state);
813 #endif /* CONFIG_XEN_PV */