1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_ENTRYCOMMON_H
3 #define __LINUX_ENTRYCOMMON_H
5 #include <linux/static_call_types.h>
6 #include <linux/ptrace.h>
7 #include <linux/syscalls.h>
8 #include <linux/seccomp.h>
9 #include <linux/sched.h>
10 #include <linux/context_tracking.h>
11 #include <linux/livepatch.h>
12 #include <linux/resume_user_mode.h>
13 #include <linux/tick.h>
14 #include <linux/kmsan.h>
16 #include <asm/entry-common.h>
19 * Define dummy _TIF work flags if not defined by the architecture or for
20 * disabled functionality.
22 #ifndef _TIF_PATCH_PENDING
23 # define _TIF_PATCH_PENDING (0)
27 # define _TIF_UPROBE (0)
31 * SYSCALL_WORK flags handled in syscall_enter_from_user_mode()
33 #ifndef ARCH_SYSCALL_WORK_ENTER
34 # define ARCH_SYSCALL_WORK_ENTER (0)
38 * SYSCALL_WORK flags handled in syscall_exit_to_user_mode()
40 #ifndef ARCH_SYSCALL_WORK_EXIT
41 # define ARCH_SYSCALL_WORK_EXIT (0)
44 #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \
45 SYSCALL_WORK_SYSCALL_TRACEPOINT | \
46 SYSCALL_WORK_SYSCALL_TRACE | \
47 SYSCALL_WORK_SYSCALL_EMU | \
48 SYSCALL_WORK_SYSCALL_AUDIT | \
49 SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
50 ARCH_SYSCALL_WORK_ENTER)
51 #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
52 SYSCALL_WORK_SYSCALL_TRACE | \
53 SYSCALL_WORK_SYSCALL_AUDIT | \
54 SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
55 SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
56 ARCH_SYSCALL_WORK_EXIT)
59 * TIF flags handled in exit_to_user_mode_loop()
61 #ifndef ARCH_EXIT_TO_USER_MODE_WORK
62 # define ARCH_EXIT_TO_USER_MODE_WORK (0)
65 #define EXIT_TO_USER_MODE_WORK \
66 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
67 _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
68 ARCH_EXIT_TO_USER_MODE_WORK)
71 * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
72 * @regs: Pointer to currents pt_regs
74 * Defaults to an empty implementation. Can be replaced by architecture
77 * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
78 * section. Use __always_inline so the compiler cannot push it out of line
79 * and make it instrumentable.
81 static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
83 #ifndef arch_enter_from_user_mode
84 static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
88 * enter_from_user_mode - Establish state when coming from user mode
90 * Syscall/interrupt entry disables interrupts, but user mode is traced as
91 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
93 * 1) Tell lockdep that interrupts are disabled
94 * 2) Invoke context tracking if enabled to reactivate RCU
95 * 3) Trace interrupts off state
97 * Invoked from architecture specific syscall entry code with interrupts
98 * disabled. The calling code has to be non-instrumentable. When the
99 * function returns all state is correct and interrupts are still
100 * disabled. The subsequent functions can be instrumented.
102 * This is invoked when there is architecture specific functionality to be
103 * done between establishing state and enabling interrupts. The caller must
104 * enable interrupts before invoking syscall_enter_from_user_mode_work().
106 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
108 arch_enter_from_user_mode(regs);
109 lockdep_hardirqs_off(CALLER_ADDR0);
111 CT_WARN_ON(__ct_state() != CONTEXT_USER);
114 instrumentation_begin();
115 kmsan_unpoison_entry_regs(regs);
116 trace_hardirqs_off_finish();
117 instrumentation_end();
121 * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
122 * @regs: Pointer to currents pt_regs
124 * Invoked from architecture specific syscall entry code with interrupts
125 * disabled. The calling code has to be non-instrumentable. When the
126 * function returns all state is correct, interrupts are enabled and the
127 * subsequent functions can be instrumented.
129 * This handles lockdep, RCU (context tracking) and tracing state, i.e.
130 * the functionality provided by enter_from_user_mode().
132 * This is invoked when there is extra architecture specific functionality
133 * to be done between establishing state and handling user mode entry work.
135 void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
137 long syscall_trace_enter(struct pt_regs *regs, long syscall,
141 * syscall_enter_from_user_mode_work - Check and handle work before invoking
143 * @regs: Pointer to currents pt_regs
144 * @syscall: The syscall number
146 * Invoked from architecture specific syscall entry code with interrupts
147 * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
148 * architecture specific work.
150 * Returns: The original or a modified syscall number
152 * If the returned syscall number is -1 then the syscall should be
153 * skipped. In this case the caller may invoke syscall_set_error() or
154 * syscall_set_return_value() first. If neither of those are called and -1
155 * is returned, then the syscall will fail with ENOSYS.
157 * It handles the following work items:
159 * 1) syscall_work flag dependent invocations of
160 * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
161 * 2) Invocation of audit_syscall_entry()
163 static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
165 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
167 if (work & SYSCALL_WORK_ENTER)
168 syscall = syscall_trace_enter(regs, syscall, work);
174 * syscall_enter_from_user_mode - Establish state and check and handle work
175 * before invoking a syscall
176 * @regs: Pointer to currents pt_regs
177 * @syscall: The syscall number
179 * Invoked from architecture specific syscall entry code with interrupts
180 * disabled. The calling code has to be non-instrumentable. When the
181 * function returns all state is correct, interrupts are enabled and the
182 * subsequent functions can be instrumented.
184 * This is combination of syscall_enter_from_user_mode_prepare() and
185 * syscall_enter_from_user_mode_work().
187 * Returns: The original or a modified syscall number. See
188 * syscall_enter_from_user_mode_work() for further explanation.
190 static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
194 enter_from_user_mode(regs);
196 instrumentation_begin();
198 ret = syscall_enter_from_user_mode_work(regs, syscall);
199 instrumentation_end();
205 * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
206 * @ti_work: Cached TIF flags gathered with interrupts disabled
208 * Defaults to local_irq_enable(). Can be supplied by architecture specific
211 static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
213 #ifndef local_irq_enable_exit_to_user
214 static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
221 * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
223 * Defaults to local_irq_disable(). Can be supplied by architecture specific
226 static inline void local_irq_disable_exit_to_user(void);
228 #ifndef local_irq_disable_exit_to_user
229 static inline void local_irq_disable_exit_to_user(void)
236 * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
238 * @regs: Pointer to currents pt_regs
239 * @ti_work: Cached TIF flags gathered with interrupts disabled
241 * Invoked from exit_to_user_mode_loop() with interrupt enabled
243 * Defaults to NOOP. Can be supplied by architecture specific code.
245 static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
246 unsigned long ti_work);
248 #ifndef arch_exit_to_user_mode_work
249 static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
250 unsigned long ti_work)
256 * arch_exit_to_user_mode_prepare - Architecture specific preparation for
258 * @regs: Pointer to currents pt_regs
259 * @ti_work: Cached TIF flags gathered with interrupts disabled
261 * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
262 * function before return. Defaults to NOOP.
264 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
265 unsigned long ti_work);
267 #ifndef arch_exit_to_user_mode_prepare
268 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
269 unsigned long ti_work)
275 * arch_exit_to_user_mode - Architecture specific final work before
278 * Invoked from exit_to_user_mode() with interrupt disabled as the last
279 * function before return. Defaults to NOOP.
281 * This needs to be __always_inline because it is non-instrumentable code
282 * invoked after context tracking switched to user mode.
284 * An architecture implementation must not do anything complex, no locking
285 * etc. The main purpose is for speculation mitigations.
287 static __always_inline void arch_exit_to_user_mode(void);
289 #ifndef arch_exit_to_user_mode
290 static __always_inline void arch_exit_to_user_mode(void) { }
294 * arch_do_signal_or_restart - Architecture specific signal delivery function
295 * @regs: Pointer to currents pt_regs
297 * Invoked from exit_to_user_mode_loop().
299 void arch_do_signal_or_restart(struct pt_regs *regs);
302 * exit_to_user_mode_loop - do any pending work before leaving to user space
304 unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
305 unsigned long ti_work);
308 * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
309 * @regs: Pointer to pt_regs on entry stack
311 * 1) check that interrupts are disabled
312 * 2) call tick_nohz_user_enter_prepare()
313 * 3) call exit_to_user_mode_loop() if any flags from
314 * EXIT_TO_USER_MODE_WORK are set
315 * 4) check that interrupts are still disabled
317 static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
319 unsigned long ti_work;
321 lockdep_assert_irqs_disabled();
323 /* Flush pending rcuog wakeup before the last need_resched() check */
324 tick_nohz_user_enter_prepare();
326 ti_work = read_thread_flags();
327 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
328 ti_work = exit_to_user_mode_loop(regs, ti_work);
330 arch_exit_to_user_mode_prepare(regs, ti_work);
332 /* Ensure that kernel state is sane for a return to userspace */
334 lockdep_assert_irqs_disabled();
339 * exit_to_user_mode - Fixup state when exiting to user mode
341 * Syscall/interrupt exit enables interrupts, but the kernel state is
342 * interrupts disabled when this is invoked. Also tell RCU about it.
344 * 1) Trace interrupts on state
345 * 2) Invoke context tracking if enabled to adjust RCU state
346 * 3) Invoke architecture specific last minute exit code, e.g. speculation
347 * mitigations, etc.: arch_exit_to_user_mode()
348 * 4) Tell lockdep that interrupts are enabled
350 * Invoked from architecture specific code when syscall_exit_to_user_mode()
351 * is not suitable as the last step before returning to userspace. Must be
352 * invoked with interrupts disabled and the caller must be
353 * non-instrumentable.
354 * The caller has to invoke syscall_exit_to_user_mode_work() before this.
356 static __always_inline void exit_to_user_mode(void)
358 instrumentation_begin();
359 trace_hardirqs_on_prepare();
360 lockdep_hardirqs_on_prepare();
361 instrumentation_end();
364 arch_exit_to_user_mode();
365 lockdep_hardirqs_on(CALLER_ADDR0);
369 * syscall_exit_to_user_mode_work - Handle work before returning to user mode
370 * @regs: Pointer to currents pt_regs
372 * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
373 * exit_to_user_mode() to perform the final transition to user mode.
375 * Calling convention is the same as for syscall_exit_to_user_mode() and it
376 * returns with all work handled and interrupts disabled. The caller must
377 * invoke exit_to_user_mode() before actually switching to user mode to
378 * make the final state transitions. Interrupts must stay disabled between
379 * return from this function and the invocation of exit_to_user_mode().
381 void syscall_exit_to_user_mode_work(struct pt_regs *regs);
384 * syscall_exit_to_user_mode - Handle work before returning to user mode
385 * @regs: Pointer to currents pt_regs
387 * Invoked with interrupts enabled and fully valid regs. Returns with all
388 * work handled, interrupts disabled such that the caller can immediately
389 * switch to user mode. Called from architecture specific syscall and ret
393 * 1) One-time syscall exit work:
394 * - rseq syscall exit
397 * - ptrace (single stepping)
399 * 2) Preparatory work
400 * - Exit to user mode loop (common TIF handling). Invokes
401 * arch_exit_to_user_mode_work() for architecture specific TIF work
402 * - Architecture specific one time work arch_exit_to_user_mode_prepare()
403 * - Address limit and lockdep checks
405 * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
406 * functionality in exit_to_user_mode().
408 * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
409 * exit_to_user_mode(). This function is preferred unless there is a
410 * compelling architectural reason to use the separate functions.
412 void syscall_exit_to_user_mode(struct pt_regs *regs);
415 * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
416 * @regs: Pointer to currents pt_regs
418 * Invoked from architecture specific entry code with interrupts disabled.
419 * Can only be called when the interrupt entry came from user mode. The
420 * calling code must be non-instrumentable. When the function returns all
421 * state is correct and the subsequent functions can be instrumented.
423 * The function establishes state (lockdep, RCU (context tracking), tracing)
425 void irqentry_enter_from_user_mode(struct pt_regs *regs);
428 * irqentry_exit_to_user_mode - Interrupt exit work
429 * @regs: Pointer to current's pt_regs
431 * Invoked with interrupts disabled and fully valid regs. Returns with all
432 * work handled, interrupts disabled such that the caller can immediately
433 * switch to user mode. Called from architecture specific interrupt
436 * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
437 * Interrupt exit is not invoking #1 which is the syscall specific one time
440 void irqentry_exit_to_user_mode(struct pt_regs *regs);
442 #ifndef irqentry_state
444 * struct irqentry_state - Opaque object for exception state storage
445 * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
446 * exit path has to invoke ct_irq_exit().
447 * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
448 * lockdep state is restored correctly on exit from nmi.
450 * This opaque object is filled in by the irqentry_*_enter() functions and
451 * must be passed back into the corresponding irqentry_*_exit() functions
452 * when the exception is complete.
454 * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
455 * and all members private. Descriptions of the members are provided to aid in
456 * the maintenance of the irqentry_*() functions.
458 typedef struct irqentry_state {
467 * irqentry_enter - Handle state tracking on ordinary interrupt entries
468 * @regs: Pointer to pt_regs of interrupted context
471 * - lockdep irqflag state tracking as low level ASM entry disabled
474 * - Context tracking if the exception hit user mode.
476 * - The hardirq tracer to keep the state consistent as low level ASM
477 * entry disabled interrupts.
479 * As a precondition, this requires that the entry came from user mode,
480 * idle, or a kernel context in which RCU is watching.
482 * For kernel mode entries RCU handling is done conditional. If RCU is
483 * watching then the only RCU requirement is to check whether the tick has
484 * to be restarted. If RCU is not watching then ct_irq_enter() has to be
485 * invoked on entry and ct_irq_exit() on exit.
487 * Avoiding the ct_irq_enter/exit() calls is an optimization but also
488 * solves the problem of kernel mode pagefaults which can schedule, which
489 * is not possible after invoking ct_irq_enter() without undoing it.
491 * For user mode entries irqentry_enter_from_user_mode() is invoked to
492 * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
493 * would not be possible.
495 * Returns: An opaque object that must be passed to idtentry_exit()
497 irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
500 * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
502 * Conditional reschedule with additional sanity checks.
504 void raw_irqentry_exit_cond_resched(void);
505 #ifdef CONFIG_PREEMPT_DYNAMIC
506 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
507 #define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
508 #define irqentry_exit_cond_resched_dynamic_disabled NULL
509 DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
510 #define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
511 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
512 DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
513 void dynamic_irqentry_exit_cond_resched(void);
514 #define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
516 #else /* CONFIG_PREEMPT_DYNAMIC */
517 #define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
518 #endif /* CONFIG_PREEMPT_DYNAMIC */
521 * irqentry_exit - Handle return from exception that used irqentry_enter()
522 * @regs: Pointer to pt_regs (exception entry regs)
523 * @state: Return value from matching call to irqentry_enter()
525 * Depending on the return target (kernel/user) this runs the necessary
526 * preemption and work checks if possible and required and returns to
527 * the caller with interrupts disabled and no further work pending.
529 * This is the last action before returning to the low level ASM code which
530 * just needs to return to the appropriate context.
532 * Counterpart to irqentry_enter().
534 void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
537 * irqentry_nmi_enter - Handle NMI entry
538 * @regs: Pointer to currents pt_regs
540 * Similar to irqentry_enter() but taking care of the NMI constraints.
542 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
545 * irqentry_nmi_exit - Handle return from NMI handling
546 * @regs: Pointer to pt_regs (NMI entry regs)
547 * @irq_state: Return value from matching call to irqentry_nmi_enter()
549 * Last action before returning to the low level assembly code.
551 * Counterpart to irqentry_nmi_enter().
553 void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);