1 // SPDX-License-Identifier: GPL-2.0-only
3 * Context tracking: Probe on high level context boundaries such as kernel
4 * and userspace. This includes syscalls and exceptions entry/exit.
6 * This is used by RCU to remove its dependency on the timer tick while a CPU
9 * Started by Frederic Weisbecker:
11 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
14 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
18 #include <linux/context_tracking.h>
19 #include <linux/rcupdate.h>
20 #include <linux/sched.h>
21 #include <linux/hardirq.h>
22 #include <linux/export.h>
23 #include <linux/kprobes.h>
25 #define CREATE_TRACE_POINTS
26 #include <trace/events/context_tracking.h>
28 DEFINE_STATIC_KEY_FALSE(context_tracking_key);
29 EXPORT_SYMBOL_GPL(context_tracking_key);
31 DEFINE_PER_CPU(struct context_tracking, context_tracking);
32 EXPORT_SYMBOL_GPL(context_tracking);
34 static noinstr bool context_tracking_recursion_enter(void)
38 recursion = __this_cpu_inc_return(context_tracking.recursion);
42 WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
43 __this_cpu_dec(context_tracking.recursion);
48 static __always_inline void context_tracking_recursion_exit(void)
50 __this_cpu_dec(context_tracking.recursion);
54 * context_tracking_enter - Inform the context tracking that the CPU is going
55 * enter user or guest space mode.
57 * This function must be called right before we switch from the kernel
58 * to user or guest space, when it's guaranteed the remaining kernel
59 * instructions to execute won't use any RCU read side critical section
60 * because this function sets RCU in extended quiescent state.
62 void noinstr __context_tracking_enter(enum ctx_state state)
64 /* Kernel threads aren't supposed to go to userspace */
65 WARN_ON_ONCE(!current->mm);
67 if (!context_tracking_recursion_enter())
70 if ( __this_cpu_read(context_tracking.state) != state) {
71 if (__this_cpu_read(context_tracking.active)) {
73 * At this stage, only low level arch entry code remains and
74 * then we'll run in userspace. We can assume there won't be
75 * any RCU read-side critical section until the next call to
76 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
79 if (state == CONTEXT_USER) {
80 instrumentation_begin();
82 vtime_user_enter(current);
83 instrumentation_end();
88 * Even if context tracking is disabled on this CPU, because it's outside
89 * the full dynticks mask for example, we still have to keep track of the
90 * context transitions and states to prevent inconsistency on those of
92 * If a task triggers an exception in userspace, sleep on the exception
93 * handler and then migrate to another CPU, that new CPU must know where
94 * the exception returns by the time we call exception_exit().
95 * This information can only be provided by the previous CPU when it called
97 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
98 * is false because we know that CPU is not tickless.
100 __this_cpu_write(context_tracking.state, state);
102 context_tracking_recursion_exit();
104 EXPORT_SYMBOL_GPL(__context_tracking_enter);
108 * This function should be noinstr but the below local_irq_restore() is
109 * unsafe because it involves illegal RCU uses through tracing and lockdep.
110 * This is unlikely to be fixed as this function is obsolete. The preferred
111 * way is to call __context_tracking_enter() through user_enter_irqoff()
112 * or context_tracking_guest_enter(). It should be the arch entry code
113 * responsibility to call into context tracking with IRQs disabled.
115 void context_tracking_enter(enum ctx_state state)
120 * Some contexts may involve an exception occuring in an irq,
121 * leading to that nesting:
122 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
123 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
124 * helpers are enough to protect RCU uses inside the exception. So
125 * just return immediately if we detect we are in an IRQ.
130 local_irq_save(flags);
131 __context_tracking_enter(state);
132 local_irq_restore(flags);
134 NOKPROBE_SYMBOL(context_tracking_enter);
135 EXPORT_SYMBOL_GPL(context_tracking_enter);
139 * This function should be noinstr but it unsafely calls local_irq_restore(),
140 * involving illegal RCU uses through tracing and lockdep.
141 * This is unlikely to be fixed as this function is obsolete. The preferred
142 * way is to call user_enter_irqoff(). It should be the arch entry code
143 * responsibility to call into context tracking with IRQs disabled.
145 void context_tracking_user_enter(void)
149 NOKPROBE_SYMBOL(context_tracking_user_enter);
152 * context_tracking_exit - Inform the context tracking that the CPU is
153 * exiting user or guest mode and entering the kernel.
155 * This function must be called after we entered the kernel from user or
156 * guest space before any use of RCU read side critical section. This
157 * potentially include any high level kernel code like syscalls, exceptions,
158 * signal handling, etc...
160 * This call supports re-entrancy. This way it can be called from any exception
161 * handler without needing to know if we came from userspace or not.
163 void noinstr __context_tracking_exit(enum ctx_state state)
165 if (!context_tracking_recursion_enter())
168 if (__this_cpu_read(context_tracking.state) == state) {
169 if (__this_cpu_read(context_tracking.active)) {
171 * We are going to run code that may use RCU. Inform
172 * RCU core about that (ie: we may need the tick again).
175 if (state == CONTEXT_USER) {
176 instrumentation_begin();
177 vtime_user_exit(current);
179 instrumentation_end();
182 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
184 context_tracking_recursion_exit();
186 EXPORT_SYMBOL_GPL(__context_tracking_exit);
190 * This function should be noinstr but the below local_irq_save() is
191 * unsafe because it involves illegal RCU uses through tracing and lockdep.
192 * This is unlikely to be fixed as this function is obsolete. The preferred
193 * way is to call __context_tracking_exit() through user_exit_irqoff()
194 * or context_tracking_guest_exit(). It should be the arch entry code
195 * responsibility to call into context tracking with IRQs disabled.
197 void context_tracking_exit(enum ctx_state state)
204 local_irq_save(flags);
205 __context_tracking_exit(state);
206 local_irq_restore(flags);
208 NOKPROBE_SYMBOL(context_tracking_exit);
209 EXPORT_SYMBOL_GPL(context_tracking_exit);
213 * This function should be noinstr but it unsafely calls local_irq_save(),
214 * involving illegal RCU uses through tracing and lockdep. This is unlikely
215 * to be fixed as this function is obsolete. The preferred way is to call
216 * user_exit_irqoff(). It should be the arch entry code responsibility to
217 * call into context tracking with IRQs disabled.
219 void context_tracking_user_exit(void)
223 NOKPROBE_SYMBOL(context_tracking_user_exit);
225 void __init context_tracking_cpu_set(int cpu)
227 static __initdata bool initialized = false;
229 if (!per_cpu(context_tracking.active, cpu)) {
230 per_cpu(context_tracking.active, cpu) = true;
231 static_branch_inc(&context_tracking_key);
237 #ifdef CONFIG_HAVE_TIF_NOHZ
239 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
240 * This assumes that init is the only task at this early boot stage.
242 set_tsk_thread_flag(&init_task, TIF_NOHZ);
244 WARN_ON_ONCE(!tasklist_empty());
249 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
250 void __init context_tracking_init(void)
254 for_each_possible_cpu(cpu)
255 context_tracking_cpu_set(cpu);