context_tracking: Take IRQ eqs entrypoints over RCU
[linux-2.6-microblaze.git] / kernel / context_tracking.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Context tracking: Probe on high level context boundaries such as kernel
4  * and userspace. This includes syscalls and exceptions entry/exit.
5  *
6  * This is used by RCU to remove its dependency on the timer tick while a CPU
7  * runs in userspace.
8  *
9  *  Started by Frederic Weisbecker:
10  *
11  * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
12  *
13  * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
14  * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
15  *
16  */
17
18 #include <linux/context_tracking.h>
19 #include <linux/rcupdate.h>
20 #include <linux/sched.h>
21 #include <linux/hardirq.h>
22 #include <linux/export.h>
23 #include <linux/kprobes.h>
24
25
26 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
27 noinstr void ct_idle_enter(void)
28 {
29         rcu_idle_enter();
30 }
31 EXPORT_SYMBOL_GPL(ct_idle_enter);
32
33 void ct_idle_exit(void)
34 {
35         rcu_idle_exit();
36 }
37 EXPORT_SYMBOL_GPL(ct_idle_exit);
38
39 noinstr void ct_irq_enter(void)
40 {
41         rcu_irq_enter();
42 }
43
44 noinstr void ct_irq_exit(void)
45 {
46         rcu_irq_exit();
47 }
48
49 void ct_irq_enter_irqson(void)
50 {
51         rcu_irq_enter_irqson();
52 }
53
54 void ct_irq_exit_irqson(void)
55 {
56         rcu_irq_exit_irqson();
57 }
58 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
59
60 #ifdef CONFIG_CONTEXT_TRACKING_USER
61
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/context_tracking.h>
64
65 DEFINE_STATIC_KEY_FALSE(context_tracking_key);
66 EXPORT_SYMBOL_GPL(context_tracking_key);
67
68 DEFINE_PER_CPU(struct context_tracking, context_tracking);
69 EXPORT_SYMBOL_GPL(context_tracking);
70
71 static noinstr bool context_tracking_recursion_enter(void)
72 {
73         int recursion;
74
75         recursion = __this_cpu_inc_return(context_tracking.recursion);
76         if (recursion == 1)
77                 return true;
78
79         WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
80         __this_cpu_dec(context_tracking.recursion);
81
82         return false;
83 }
84
85 static __always_inline void context_tracking_recursion_exit(void)
86 {
87         __this_cpu_dec(context_tracking.recursion);
88 }
89
90 /**
91  * __ct_user_enter - Inform the context tracking that the CPU is going
92  *                   to enter user or guest space mode.
93  *
94  * This function must be called right before we switch from the kernel
95  * to user or guest space, when it's guaranteed the remaining kernel
96  * instructions to execute won't use any RCU read side critical section
97  * because this function sets RCU in extended quiescent state.
98  */
99 void noinstr __ct_user_enter(enum ctx_state state)
100 {
101         /* Kernel threads aren't supposed to go to userspace */
102         WARN_ON_ONCE(!current->mm);
103
104         if (!context_tracking_recursion_enter())
105                 return;
106
107         if ( __this_cpu_read(context_tracking.state) != state) {
108                 if (__this_cpu_read(context_tracking.active)) {
109                         /*
110                          * At this stage, only low level arch entry code remains and
111                          * then we'll run in userspace. We can assume there won't be
112                          * any RCU read-side critical section until the next call to
113                          * user_exit() or ct_irq_enter(). Let's remove RCU's dependency
114                          * on the tick.
115                          */
116                         if (state == CONTEXT_USER) {
117                                 instrumentation_begin();
118                                 trace_user_enter(0);
119                                 vtime_user_enter(current);
120                                 instrumentation_end();
121                         }
122                         rcu_user_enter();
123                 }
124                 /*
125                  * Even if context tracking is disabled on this CPU, because it's outside
126                  * the full dynticks mask for example, we still have to keep track of the
127                  * context transitions and states to prevent inconsistency on those of
128                  * other CPUs.
129                  * If a task triggers an exception in userspace, sleep on the exception
130                  * handler and then migrate to another CPU, that new CPU must know where
131                  * the exception returns by the time we call exception_exit().
132                  * This information can only be provided by the previous CPU when it called
133                  * exception_enter().
134                  * OTOH we can spare the calls to vtime and RCU when context_tracking.active
135                  * is false because we know that CPU is not tickless.
136                  */
137                 __this_cpu_write(context_tracking.state, state);
138         }
139         context_tracking_recursion_exit();
140 }
141 EXPORT_SYMBOL_GPL(__ct_user_enter);
142
143 /*
144  * OBSOLETE:
145  * This function should be noinstr but the below local_irq_restore() is
146  * unsafe because it involves illegal RCU uses through tracing and lockdep.
147  * This is unlikely to be fixed as this function is obsolete. The preferred
148  * way is to call __context_tracking_enter() through user_enter_irqoff()
149  * or context_tracking_guest_enter(). It should be the arch entry code
150  * responsibility to call into context tracking with IRQs disabled.
151  */
152 void ct_user_enter(enum ctx_state state)
153 {
154         unsigned long flags;
155
156         /*
157          * Some contexts may involve an exception occuring in an irq,
158          * leading to that nesting:
159          * ct_irq_enter() rcu_user_exit() rcu_user_exit() ct_irq_exit()
160          * This would mess up the dyntick_nesting count though. And rcu_irq_*()
161          * helpers are enough to protect RCU uses inside the exception. So
162          * just return immediately if we detect we are in an IRQ.
163          */
164         if (in_interrupt())
165                 return;
166
167         local_irq_save(flags);
168         __ct_user_enter(state);
169         local_irq_restore(flags);
170 }
171 NOKPROBE_SYMBOL(ct_user_enter);
172 EXPORT_SYMBOL_GPL(ct_user_enter);
173
174 /**
175  * user_enter_callable() - Unfortunate ASM callable version of user_enter() for
176  *                         archs that didn't manage to check the context tracking
177  *                         static key from low level code.
178  *
179  * This OBSOLETE function should be noinstr but it unsafely calls
180  * local_irq_restore(), involving illegal RCU uses through tracing and lockdep.
181  * This is unlikely to be fixed as this function is obsolete. The preferred
182  * way is to call user_enter_irqoff(). It should be the arch entry code
183  * responsibility to call into context tracking with IRQs disabled.
184  */
185 void user_enter_callable(void)
186 {
187         user_enter();
188 }
189 NOKPROBE_SYMBOL(user_enter_callable);
190
191 /**
192  * __ct_user_exit - Inform the context tracking that the CPU is
193  *                  exiting user or guest mode and entering the kernel.
194  *
195  * This function must be called after we entered the kernel from user or
196  * guest space before any use of RCU read side critical section. This
197  * potentially include any high level kernel code like syscalls, exceptions,
198  * signal handling, etc...
199  *
200  * This call supports re-entrancy. This way it can be called from any exception
201  * handler without needing to know if we came from userspace or not.
202  */
203 void noinstr __ct_user_exit(enum ctx_state state)
204 {
205         if (!context_tracking_recursion_enter())
206                 return;
207
208         if (__this_cpu_read(context_tracking.state) == state) {
209                 if (__this_cpu_read(context_tracking.active)) {
210                         /*
211                          * We are going to run code that may use RCU. Inform
212                          * RCU core about that (ie: we may need the tick again).
213                          */
214                         rcu_user_exit();
215                         if (state == CONTEXT_USER) {
216                                 instrumentation_begin();
217                                 vtime_user_exit(current);
218                                 trace_user_exit(0);
219                                 instrumentation_end();
220                         }
221                 }
222                 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
223         }
224         context_tracking_recursion_exit();
225 }
226 EXPORT_SYMBOL_GPL(__ct_user_exit);
227
228 /*
229  * OBSOLETE:
230  * This function should be noinstr but the below local_irq_save() is
231  * unsafe because it involves illegal RCU uses through tracing and lockdep.
232  * This is unlikely to be fixed as this function is obsolete. The preferred
233  * way is to call __context_tracking_exit() through user_exit_irqoff()
234  * or context_tracking_guest_exit(). It should be the arch entry code
235  * responsibility to call into context tracking with IRQs disabled.
236  */
237 void ct_user_exit(enum ctx_state state)
238 {
239         unsigned long flags;
240
241         if (in_interrupt())
242                 return;
243
244         local_irq_save(flags);
245         __ct_user_exit(state);
246         local_irq_restore(flags);
247 }
248 NOKPROBE_SYMBOL(ct_user_exit);
249 EXPORT_SYMBOL_GPL(ct_user_exit);
250
251 /**
252  * user_exit_callable() - Unfortunate ASM callable version of user_exit() for
253  *                        archs that didn't manage to check the context tracking
254  *                        static key from low level code.
255  *
256  * This OBSOLETE function should be noinstr but it unsafely calls local_irq_save(),
257  * involving illegal RCU uses through tracing and lockdep. This is unlikely
258  * to be fixed as this function is obsolete. The preferred way is to call
259  * user_exit_irqoff(). It should be the arch entry code responsibility to
260  * call into context tracking with IRQs disabled.
261  */
262 void user_exit_callable(void)
263 {
264         user_exit();
265 }
266 NOKPROBE_SYMBOL(user_exit_callable);
267
268 void __init ct_cpu_track_user(int cpu)
269 {
270         static __initdata bool initialized = false;
271
272         if (!per_cpu(context_tracking.active, cpu)) {
273                 per_cpu(context_tracking.active, cpu) = true;
274                 static_branch_inc(&context_tracking_key);
275         }
276
277         if (initialized)
278                 return;
279
280 #ifdef CONFIG_HAVE_TIF_NOHZ
281         /*
282          * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
283          * This assumes that init is the only task at this early boot stage.
284          */
285         set_tsk_thread_flag(&init_task, TIF_NOHZ);
286 #endif
287         WARN_ON_ONCE(!tasklist_empty());
288
289         initialized = true;
290 }
291
292 #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
293 void __init context_tracking_init(void)
294 {
295         int cpu;
296
297         for_each_possible_cpu(cpu)
298                 ct_cpu_track_user(cpu);
299 }
300 #endif
301
302 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER */