1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_PREEMPT_H
3 #define __LINUX_PREEMPT_H
6 * include/linux/preempt.h - macros for accessing and manipulating
7 * preempt_count (used for kernel preemption, interrupt count, etc.)
10 #include <linux/linkage.h>
11 #include <linux/list.h>
14 * We put the hardirq and softirq counter into the preemption
15 * counter. The bitmask has the following meaning:
17 * - bits 0-7 are the preemption count (max preemption depth: 256)
18 * - bits 8-15 are the softirq count (max # of softirqs: 256)
20 * The hardirq count could in theory be the same as the number of
21 * interrupts in the system, but we run all interrupt handlers with
22 * interrupts disabled, so we cannot have nesting interrupts. Though
23 * there are a few palaeontologic drivers which reenable interrupts in
24 * the handler, so we need more than one bit here.
26 * PREEMPT_MASK: 0x000000ff
27 * SOFTIRQ_MASK: 0x0000ff00
28 * HARDIRQ_MASK: 0x000f0000
29 * NMI_MASK: 0x00100000
30 * PREEMPT_NEED_RESCHED: 0x80000000
32 #define PREEMPT_BITS 8
33 #define SOFTIRQ_BITS 8
34 #define HARDIRQ_BITS 4
37 #define PREEMPT_SHIFT 0
38 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
39 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
40 #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
42 #define __IRQ_MASK(x) ((1UL << (x))-1)
44 #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
45 #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
46 #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
47 #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
49 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
50 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
51 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
52 #define NMI_OFFSET (1UL << NMI_SHIFT)
54 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
56 #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
59 * Disable preemption until the scheduler is running -- use an unconditional
60 * value so that it also works on !PREEMPT_COUNT kernels.
62 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
64 #define INIT_PREEMPT_COUNT PREEMPT_OFFSET
67 * Initial preempt_count value; reflects the preempt_count schedule invariant
68 * which states that during context switches:
70 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
72 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
73 * Note: See finish_task_switch().
75 #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
77 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
78 #include <asm/preempt.h>
80 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
81 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
82 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
86 * Are we doing bottom half or hardware interrupt processing?
88 * in_irq() - We're in (hard) IRQ context
89 * in_softirq() - We have BH disabled, or are processing softirqs
90 * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
91 * in_serving_softirq() - We're in softirq context
92 * in_nmi() - We're in NMI context
93 * in_task() - We're in task context
95 * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
96 * should not be used in new code.
98 #define in_irq() (hardirq_count())
99 #define in_softirq() (softirq_count())
100 #define in_interrupt() (irq_count())
101 #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
102 #define in_nmi() (preempt_count() & NMI_MASK)
103 #define in_task() (!(preempt_count() & \
104 (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
107 * The preempt_count offset after preempt_disable();
109 #if defined(CONFIG_PREEMPT_COUNT)
110 # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
112 # define PREEMPT_DISABLE_OFFSET 0
116 * The preempt_count offset after spin_lock()
118 #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
121 * The preempt_count offset needed for things like:
125 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
126 * softirqs, such that unlock sequences of:
133 #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
136 * Are we running in atomic context? WARNING: this macro cannot
137 * always detect atomic context; in particular, it cannot know about
138 * held spinlocks in non-preemptible kernels. Thus it should not be
139 * used in the general case to determine whether sleeping is possible.
140 * Do not use in_atomic() in driver code.
142 #define in_atomic() (preempt_count() != 0)
145 * Check whether we were atomic before we did preempt_disable():
146 * (used by the scheduler)
148 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
150 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
151 extern void preempt_count_add(int val);
152 extern void preempt_count_sub(int val);
153 #define preempt_count_dec_and_test() \
154 ({ preempt_count_sub(1); should_resched(0); })
156 #define preempt_count_add(val) __preempt_count_add(val)
157 #define preempt_count_sub(val) __preempt_count_sub(val)
158 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
161 #define __preempt_count_inc() __preempt_count_add(1)
162 #define __preempt_count_dec() __preempt_count_sub(1)
164 #define preempt_count_inc() preempt_count_add(1)
165 #define preempt_count_dec() preempt_count_sub(1)
167 #ifdef CONFIG_PREEMPT_COUNT
169 #define preempt_disable() \
171 preempt_count_inc(); \
175 #define sched_preempt_enable_no_resched() \
178 preempt_count_dec(); \
181 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
183 #define preemptible() (preempt_count() == 0 && !irqs_disabled())
185 #ifdef CONFIG_PREEMPT
186 #define preempt_enable() \
189 if (unlikely(preempt_count_dec_and_test())) \
190 __preempt_schedule(); \
193 #define preempt_enable_notrace() \
196 if (unlikely(__preempt_count_dec_and_test())) \
197 __preempt_schedule_notrace(); \
200 #define preempt_check_resched() \
202 if (should_resched(0)) \
203 __preempt_schedule(); \
206 #else /* !CONFIG_PREEMPT */
207 #define preempt_enable() \
210 preempt_count_dec(); \
213 #define preempt_enable_notrace() \
216 __preempt_count_dec(); \
219 #define preempt_check_resched() do { } while (0)
220 #endif /* CONFIG_PREEMPT */
222 #define preempt_disable_notrace() \
224 __preempt_count_inc(); \
228 #define preempt_enable_no_resched_notrace() \
231 __preempt_count_dec(); \
234 #else /* !CONFIG_PREEMPT_COUNT */
237 * Even if we don't have any preemption, we need preempt disable/enable
238 * to be barriers, so that we don't have things like get_user/put_user
239 * that can cause faults and scheduling migrate into our preempt-protected
242 #define preempt_disable() barrier()
243 #define sched_preempt_enable_no_resched() barrier()
244 #define preempt_enable_no_resched() barrier()
245 #define preempt_enable() barrier()
246 #define preempt_check_resched() do { } while (0)
248 #define preempt_disable_notrace() barrier()
249 #define preempt_enable_no_resched_notrace() barrier()
250 #define preempt_enable_notrace() barrier()
251 #define preemptible() 0
253 #endif /* CONFIG_PREEMPT_COUNT */
257 * Modules have no business playing preemption tricks.
259 #undef sched_preempt_enable_no_resched
260 #undef preempt_enable_no_resched
261 #undef preempt_enable_no_resched_notrace
262 #undef preempt_check_resched
265 #define preempt_set_need_resched() \
267 set_preempt_need_resched(); \
269 #define preempt_fold_need_resched() \
271 if (tif_need_resched()) \
272 set_preempt_need_resched(); \
275 #ifdef CONFIG_PREEMPT_NOTIFIERS
277 struct preempt_notifier;
280 * preempt_ops - notifiers called when a task is preempted and rescheduled
281 * @sched_in: we're about to be rescheduled:
282 * notifier: struct preempt_notifier for the task being scheduled
283 * cpu: cpu we're scheduled on
284 * @sched_out: we've just been preempted
285 * notifier: struct preempt_notifier for the task being preempted
286 * next: the task that's kicking us out
288 * Please note that sched_in and out are called under different
289 * contexts. sched_out is called with rq lock held and irq disabled
290 * while sched_in is called without rq lock and irq enabled. This
291 * difference is intentional and depended upon by its users.
294 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
295 void (*sched_out)(struct preempt_notifier *notifier,
296 struct task_struct *next);
300 * preempt_notifier - key for installing preemption notifiers
301 * @link: internal use
302 * @ops: defines the notifier functions to be called
304 * Usually used in conjunction with container_of().
306 struct preempt_notifier {
307 struct hlist_node link;
308 struct preempt_ops *ops;
311 void preempt_notifier_inc(void);
312 void preempt_notifier_dec(void);
313 void preempt_notifier_register(struct preempt_notifier *notifier);
314 void preempt_notifier_unregister(struct preempt_notifier *notifier);
316 static inline void preempt_notifier_init(struct preempt_notifier *notifier,
317 struct preempt_ops *ops)
319 INIT_HLIST_NODE(¬ifier->link);
325 #endif /* __LINUX_PREEMPT_H */