1 // SPDX-License-Identifier: GPL-2.0-only
3 * common.c - C code for kernel entry and exit
4 * Copyright (c) 2015 Andrew Lutomirski
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/tracehook.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/export.h>
22 #include <linux/context_tracking.h>
23 #include <linux/user-return-notifier.h>
24 #include <linux/nospec.h>
25 #include <linux/uprobes.h>
26 #include <linux/livepatch.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
31 #include <xen/xen-ops.h>
32 #include <xen/events.h>
36 #include <asm/traps.h>
38 #include <asm/cpufeature.h>
39 #include <asm/fpu/api.h>
40 #include <asm/nospec-branch.h>
41 #include <asm/io_bitmap.h>
42 #include <asm/syscall.h>
43 #include <asm/irq_stack.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/syscalls.h>
48 #ifdef CONFIG_CONTEXT_TRACKING
50 * enter_from_user_mode - Establish state when coming from user mode
52 * Syscall entry disables interrupts, but user mode is traced as interrupts
53 * enabled. Also with NO_HZ_FULL RCU might be idle.
55 * 1) Tell lockdep that interrupts are disabled
56 * 2) Invoke context tracking if enabled to reactivate RCU
57 * 3) Trace interrupts off state
59 static noinstr void enter_from_user_mode(void)
61 enum ctx_state state = ct_state();
63 lockdep_hardirqs_off(CALLER_ADDR0);
66 instrumentation_begin();
67 CT_WARN_ON(state != CONTEXT_USER);
68 trace_hardirqs_off_finish();
69 instrumentation_end();
72 static __always_inline void enter_from_user_mode(void)
74 lockdep_hardirqs_off(CALLER_ADDR0);
75 instrumentation_begin();
76 trace_hardirqs_off_finish();
77 instrumentation_end();
82 * exit_to_user_mode - Fixup state when exiting to user mode
84 * Syscall exit enables interrupts, but the kernel state is interrupts
85 * disabled when this is invoked. Also tell RCU about it.
87 * 1) Trace interrupts on state
88 * 2) Invoke context tracking if enabled to adjust RCU state
89 * 3) Clear CPU buffers if CPU is affected by MDS and the migitation is on.
90 * 4) Tell lockdep that interrupts are enabled
92 static __always_inline void exit_to_user_mode(void)
94 instrumentation_begin();
95 trace_hardirqs_on_prepare();
96 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
97 instrumentation_end();
100 mds_user_clear_cpu_buffers();
101 lockdep_hardirqs_on(CALLER_ADDR0);
104 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
107 if (arch == AUDIT_ARCH_X86_64) {
108 audit_syscall_entry(regs->orig_ax, regs->di,
109 regs->si, regs->dx, regs->r10);
113 audit_syscall_entry(regs->orig_ax, regs->bx,
114 regs->cx, regs->dx, regs->si);
119 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
120 * to skip the syscall.
122 static long syscall_trace_enter(struct pt_regs *regs)
124 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
126 struct thread_info *ti = current_thread_info();
127 unsigned long ret = 0;
130 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
131 BUG_ON(regs != task_pt_regs(current));
133 work = READ_ONCE(ti->flags);
135 if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
136 ret = tracehook_report_syscall_entry(regs);
137 if (ret || (work & _TIF_SYSCALL_EMU))
141 #ifdef CONFIG_SECCOMP
143 * Do seccomp after ptrace, to catch any tracer changes.
145 if (work & _TIF_SECCOMP) {
146 struct seccomp_data sd;
149 sd.nr = regs->orig_ax;
150 sd.instruction_pointer = regs->ip;
152 if (arch == AUDIT_ARCH_X86_64) {
153 sd.args[0] = regs->di;
154 sd.args[1] = regs->si;
155 sd.args[2] = regs->dx;
156 sd.args[3] = regs->r10;
157 sd.args[4] = regs->r8;
158 sd.args[5] = regs->r9;
162 sd.args[0] = regs->bx;
163 sd.args[1] = regs->cx;
164 sd.args[2] = regs->dx;
165 sd.args[3] = regs->si;
166 sd.args[4] = regs->di;
167 sd.args[5] = regs->bp;
170 ret = __secure_computing(&sd);
176 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
177 trace_sys_enter(regs, regs->orig_ax);
179 do_audit_syscall_entry(regs, arch);
181 return ret ?: regs->orig_ax;
184 #define EXIT_TO_USERMODE_LOOP_FLAGS \
185 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
186 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
188 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
191 * In order to return to user mode, we need to have IRQs off with
192 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
193 * can be set at any time on preemptible kernels if we have IRQs on,
194 * so we need to loop. Disabling preemption wouldn't help: doing the
195 * work to clear some of the flags can sleep.
198 /* We have work to do. */
201 if (cached_flags & _TIF_NEED_RESCHED)
204 if (cached_flags & _TIF_UPROBE)
205 uprobe_notify_resume(regs);
207 if (cached_flags & _TIF_PATCH_PENDING)
208 klp_update_patch_state(current);
210 /* deal with pending signal delivery */
211 if (cached_flags & _TIF_SIGPENDING)
214 if (cached_flags & _TIF_NOTIFY_RESUME) {
215 clear_thread_flag(TIF_NOTIFY_RESUME);
216 tracehook_notify_resume(regs);
217 rseq_handle_notify_resume(NULL, regs);
220 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
221 fire_user_return_notifiers();
223 /* Disable IRQs and retry */
226 cached_flags = READ_ONCE(current_thread_info()->flags);
228 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
233 static void __prepare_exit_to_usermode(struct pt_regs *regs)
235 struct thread_info *ti = current_thread_info();
238 addr_limit_user_check();
240 lockdep_assert_irqs_disabled();
243 cached_flags = READ_ONCE(ti->flags);
245 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
246 exit_to_usermode_loop(regs, cached_flags);
248 /* Reload ti->flags; we may have rescheduled above. */
249 cached_flags = READ_ONCE(ti->flags);
251 if (unlikely(cached_flags & _TIF_IO_BITMAP))
252 tss_update_io_bitmap();
254 fpregs_assert_state_consistent();
255 if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
260 * Compat syscalls set TS_COMPAT. Make sure we clear it before
261 * returning to user mode. We need to clear it *after* signal
262 * handling, because syscall restart has a fixup for compat
263 * syscalls. The fixup is exercised by the ptrace_syscall_32
266 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
267 * special case only applies after poking regs and before the
268 * very next return to user mode.
270 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
274 __visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
276 instrumentation_begin();
277 __prepare_exit_to_usermode(regs);
278 instrumentation_end();
282 #define SYSCALL_EXIT_WORK_FLAGS \
283 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
284 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
286 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
290 audit_syscall_exit(regs);
292 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
293 trace_sys_exit(regs, regs->ax);
296 * If TIF_SYSCALL_EMU is set, we only get here because of
297 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
298 * We already reported this syscall instruction in
299 * syscall_trace_enter().
302 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
304 if (step || cached_flags & _TIF_SYSCALL_TRACE)
305 tracehook_report_syscall_exit(regs, step);
308 static void __syscall_return_slowpath(struct pt_regs *regs)
310 struct thread_info *ti = current_thread_info();
311 u32 cached_flags = READ_ONCE(ti->flags);
313 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
315 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
316 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
322 * First do one-time work. If these work items are enabled, we
323 * want to run them exactly once per syscall exit with IRQs on.
325 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
326 syscall_slow_exit_work(regs, cached_flags);
329 __prepare_exit_to_usermode(regs);
333 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
334 * state such that we can immediately switch to user mode.
336 __visible noinstr void syscall_return_slowpath(struct pt_regs *regs)
338 instrumentation_begin();
339 __syscall_return_slowpath(regs);
340 instrumentation_end();
345 __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
347 struct thread_info *ti;
349 enter_from_user_mode();
350 instrumentation_begin();
353 ti = current_thread_info();
354 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
355 nr = syscall_trace_enter(regs);
357 if (likely(nr < NR_syscalls)) {
358 nr = array_index_nospec(nr, NR_syscalls);
359 regs->ax = sys_call_table[nr](regs);
360 #ifdef CONFIG_X86_X32_ABI
361 } else if (likely((nr & __X32_SYSCALL_BIT) &&
362 (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
363 nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
365 regs->ax = x32_sys_call_table[nr](regs);
368 __syscall_return_slowpath(regs);
370 instrumentation_end();
375 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
377 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
378 * all entry and exit work and returns with IRQs off. This function is
379 * extremely hot in workloads that use it, and it's usually called from
380 * do_fast_syscall_32, so forcibly inline it to improve performance.
382 static void do_syscall_32_irqs_on(struct pt_regs *regs)
384 struct thread_info *ti = current_thread_info();
385 unsigned int nr = (unsigned int)regs->orig_ax;
387 #ifdef CONFIG_IA32_EMULATION
388 ti->status |= TS_COMPAT;
391 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
393 * Subtlety here: if ptrace pokes something larger than
394 * 2^32-1 into orig_ax, this truncates it. This may or
395 * may not be necessary, but it matches the old asm
398 nr = syscall_trace_enter(regs);
401 if (likely(nr < IA32_NR_syscalls)) {
402 nr = array_index_nospec(nr, IA32_NR_syscalls);
403 regs->ax = ia32_sys_call_table[nr](regs);
406 __syscall_return_slowpath(regs);
409 /* Handles int $0x80 */
410 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
412 enter_from_user_mode();
413 instrumentation_begin();
416 do_syscall_32_irqs_on(regs);
418 instrumentation_end();
422 static bool __do_fast_syscall_32(struct pt_regs *regs)
426 /* Fetch EBP from where the vDSO stashed it. */
427 if (IS_ENABLED(CONFIG_X86_64)) {
429 * Micro-optimization: the pointer we're following is
430 * explicitly 32 bits, so it can't be out of range.
432 res = __get_user(*(u32 *)®s->bp,
433 (u32 __user __force *)(unsigned long)(u32)regs->sp);
435 res = get_user(*(u32 *)®s->bp,
436 (u32 __user __force *)(unsigned long)(u32)regs->sp);
440 /* User code screwed up. */
443 __prepare_exit_to_usermode(regs);
447 /* Now this is just like a normal syscall. */
448 do_syscall_32_irqs_on(regs);
452 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
453 __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
456 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
457 * convention. Adjust regs so it looks like we entered using int80.
459 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
460 vdso_image_32.sym_int80_landing_pad;
464 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
465 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
468 regs->ip = landing_pad;
470 enter_from_user_mode();
471 instrumentation_begin();
474 success = __do_fast_syscall_32(regs);
476 instrumentation_end();
479 /* If it failed, keep it simple: use IRET. */
485 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
486 * SYSRETL is available on all 64-bit CPUs, so we don't need to
487 * bother with SYSEXIT.
489 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
490 * because the ECX fixup above will ensure that this is essentially
493 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
494 regs->ip == landing_pad &&
495 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
498 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
500 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
501 * because the ECX fixup above will ensure that this is essentially
504 * We don't allow syscalls at all from VM86 mode, but we still
505 * need to check VM, because we might be returning from sys_vm86.
507 return static_cpu_has(X86_FEATURE_SEP) &&
508 regs->cs == __USER_CS && regs->ss == __USER_DS &&
509 regs->ip == landing_pad &&
510 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
515 SYSCALL_DEFINE0(ni_syscall)
521 * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
523 * @regs: Pointer to pt_regs of interrupted context
526 * - lockdep irqflag state tracking as low level ASM entry disabled
529 * - Context tracking if the exception hit user mode.
531 * - The hardirq tracer to keep the state consistent as low level ASM
532 * entry disabled interrupts.
534 * For kernel mode entries RCU handling is done conditional. If RCU is
535 * watching then the only RCU requirement is to check whether the tick has
536 * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
537 * invoked on entry and rcu_irq_exit() on exit.
539 * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
540 * solves the problem of kernel mode pagefaults which can schedule, which
541 * is not possible after invoking rcu_irq_enter() without undoing it.
543 * For user mode entries enter_from_user_mode() must be invoked to
544 * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
545 * would not be possible.
547 * Returns: True if RCU has been adjusted on a kernel entry
550 * The return value must be fed into the rcu_exit argument of
551 * idtentry_exit_cond_rcu().
553 bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
555 if (user_mode(regs)) {
556 enter_from_user_mode();
561 * If this entry hit the idle task invoke rcu_irq_enter() whether
562 * RCU is watching or not.
564 * Interupts can nest when the first interrupt invokes softirq
565 * processing on return which enables interrupts.
567 * Scheduler ticks in the idle task can mark quiescent state and
568 * terminate a grace period, if and only if the timer interrupt is
569 * not nested into another interrupt.
571 * Checking for __rcu_is_watching() here would prevent the nesting
572 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
573 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
574 * assume that it is the first interupt and eventually claim
575 * quiescient state and end grace periods prematurely.
577 * Unconditionally invoke rcu_irq_enter() so RCU state stays
580 * TINY_RCU does not support EQS, so let the compiler eliminate
581 * this part when enabled.
583 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
585 * If RCU is not watching then the same careful
586 * sequence vs. lockdep and tracing is required
587 * as in enter_from_user_mode().
589 lockdep_hardirqs_off(CALLER_ADDR0);
591 instrumentation_begin();
592 trace_hardirqs_off_finish();
593 instrumentation_end();
599 * If RCU is watching then RCU only wants to check whether it needs
600 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
601 * already contains a warning when RCU is not watching, so no point
602 * in having another one here.
604 instrumentation_begin();
605 rcu_irq_enter_check_tick();
606 /* Use the combo lockdep/tracing function */
607 trace_hardirqs_off();
608 instrumentation_end();
613 static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
615 if (may_sched && !preempt_count()) {
616 /* Sanity check RCU and thread stack */
617 rcu_irq_exit_check_preempt();
618 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
619 WARN_ON_ONCE(!on_thread_stack());
621 preempt_schedule_irq();
623 /* Covers both tracing and lockdep */
628 * idtentry_exit_cond_rcu - Handle return from exception with conditional RCU
630 * @regs: Pointer to pt_regs (exception entry regs)
631 * @rcu_exit: Invoke rcu_irq_exit() if true
633 * Depending on the return target (kernel/user) this runs the necessary
634 * preemption and work checks if possible and reguired and returns to
635 * the caller with interrupts disabled and no further work pending.
637 * This is the last action before returning to the low level ASM code which
638 * just needs to return to the appropriate context.
640 * Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
641 * function must be fed into the @rcu_exit argument.
643 void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
645 lockdep_assert_irqs_disabled();
647 /* Check whether this returns to user mode */
648 if (user_mode(regs)) {
649 prepare_exit_to_usermode(regs);
650 } else if (regs->flags & X86_EFLAGS_IF) {
652 * If RCU was not watching on entry this needs to be done
653 * carefully and needs the same ordering of lockdep/tracing
654 * and RCU as the return to user mode path.
657 instrumentation_begin();
658 /* Tell the tracer that IRET will enable interrupts */
659 trace_hardirqs_on_prepare();
660 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
661 instrumentation_end();
663 lockdep_hardirqs_on(CALLER_ADDR0);
667 instrumentation_begin();
668 idtentry_exit_cond_resched(regs, IS_ENABLED(CONFIG_PREEMPTION));
669 instrumentation_end();
672 * IRQ flags state is correct already. Just tell RCU if it
673 * was not watching on entry.
681 * idtentry_enter_user - Handle state tracking on idtentry from user mode
682 * @regs: Pointer to pt_regs of interrupted context
684 * Invokes enter_from_user_mode() to establish the proper context for
685 * NOHZ_FULL. Otherwise scheduling on exit would not be possible.
687 void noinstr idtentry_enter_user(struct pt_regs *regs)
689 enter_from_user_mode();
693 * idtentry_exit_user - Handle return from exception to user mode
694 * @regs: Pointer to pt_regs (exception entry regs)
696 * Runs the necessary preemption and work checks and returns to the caller
697 * with interrupts disabled and no further work pending.
699 * This is the last action before returning to the low level ASM code which
700 * just needs to return to the appropriate context.
702 * Counterpart to idtentry_enter_user().
704 void noinstr idtentry_exit_user(struct pt_regs *regs)
706 lockdep_assert_irqs_disabled();
708 prepare_exit_to_usermode(regs);
712 #ifndef CONFIG_PREEMPTION
714 * Some hypercalls issued by the toolstack can take many 10s of
715 * seconds. Allow tasks running hypercalls via the privcmd driver to
716 * be voluntarily preempted even if full kernel preemption is
719 * Such preemptible hypercalls are bracketed by
720 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
723 DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
724 EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
727 * In case of scheduling the flag must be cleared and restored after
728 * returning from schedule as the task might move to a different CPU.
730 static __always_inline bool get_and_clear_inhcall(void)
732 bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
734 __this_cpu_write(xen_in_preemptible_hcall, false);
738 static __always_inline void restore_inhcall(bool inhcall)
740 __this_cpu_write(xen_in_preemptible_hcall, inhcall);
743 static __always_inline bool get_and_clear_inhcall(void) { return false; }
744 static __always_inline void restore_inhcall(bool inhcall) { }
747 static void __xen_pv_evtchn_do_upcall(void)
750 inc_irq_stat(irq_hv_callback_count);
752 xen_hvm_evtchn_do_upcall();
757 __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
759 struct pt_regs *old_regs;
760 bool inhcall, rcu_exit;
762 rcu_exit = idtentry_enter_cond_rcu(regs);
763 old_regs = set_irq_regs(regs);
765 instrumentation_begin();
766 run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
767 instrumentation_begin();
769 set_irq_regs(old_regs);
771 inhcall = get_and_clear_inhcall();
772 if (inhcall && !WARN_ON_ONCE(rcu_exit)) {
773 instrumentation_begin();
774 idtentry_exit_cond_resched(regs, true);
775 instrumentation_end();
776 restore_inhcall(inhcall);
778 idtentry_exit_cond_rcu(regs, rcu_exit);
781 #endif /* CONFIG_XEN_PV */