genirq: Provide irq_enter/exit_rcu()
[linux-2.6-microblaze.git] / include / linux / hardirq.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_HARDIRQ_H
3 #define LINUX_HARDIRQ_H
4
5 #include <linux/context_tracking_state.h>
6 #include <linux/preempt.h>
7 #include <linux/lockdep.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/vtime.h>
10 #include <asm/hardirq.h>
11
12 extern void synchronize_irq(unsigned int irq);
13 extern bool synchronize_hardirq(unsigned int irq);
14
15 #ifdef CONFIG_NO_HZ_FULL
16 void __rcu_irq_enter_check_tick(void);
17 #else
18 static inline void __rcu_irq_enter_check_tick(void) { }
19 #endif
20
21 static __always_inline void rcu_irq_enter_check_tick(void)
22 {
23         if (context_tracking_enabled())
24                 __rcu_irq_enter_check_tick();
25 }
26
27 /*
28  * It is safe to do non-atomic ops on ->hardirq_context,
29  * because NMI handlers may not preempt and the ops are
30  * always balanced, so the interrupted value of ->hardirq_context
31  * will always be restored.
32  */
33 #define __irq_enter()                                   \
34         do {                                            \
35                 account_irq_enter_time(current);        \
36                 preempt_count_add(HARDIRQ_OFFSET);      \
37                 lockdep_hardirq_enter();                \
38         } while (0)
39
40 /*
41  * Enter irq context (on NO_HZ, update jiffies):
42  */
43 void irq_enter(void);
44 /*
45  * Like irq_enter(), but RCU is already watching.
46  */
47 void irq_enter_rcu(void);
48
49 /*
50  * Exit irq context without processing softirqs:
51  */
52 #define __irq_exit()                                    \
53         do {                                            \
54                 lockdep_hardirq_exit();                 \
55                 account_irq_exit_time(current);         \
56                 preempt_count_sub(HARDIRQ_OFFSET);      \
57         } while (0)
58
59 /*
60  * Exit irq context and process softirqs if needed:
61  */
62 void irq_exit(void);
63
64 /*
65  * Like irq_exit(), but return with RCU watching.
66  */
67 void irq_exit_rcu(void);
68
69 #ifndef arch_nmi_enter
70 #define arch_nmi_enter()        do { } while (0)
71 #define arch_nmi_exit()         do { } while (0)
72 #endif
73
74 #ifdef CONFIG_TINY_RCU
75 static inline void rcu_nmi_enter(void) { }
76 static inline void rcu_nmi_exit(void) { }
77 #else
78 extern void rcu_nmi_enter(void);
79 extern void rcu_nmi_exit(void);
80 #endif
81
82 /*
83  * NMI vs Tracing
84  * --------------
85  *
86  * We must not land in a tracer until (or after) we've changed preempt_count
87  * such that in_nmi() becomes true. To that effect all NMI C entry points must
88  * be marked 'notrace' and call nmi_enter() as soon as possible.
89  */
90
91 /*
92  * nmi_enter() can nest up to 15 times; see NMI_BITS.
93  */
94 #define nmi_enter()                                             \
95         do {                                                    \
96                 arch_nmi_enter();                               \
97                 printk_nmi_enter();                             \
98                 lockdep_off();                                  \
99                 BUG_ON(in_nmi() == NMI_MASK);                   \
100                 __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);       \
101                 rcu_nmi_enter();                                \
102                 lockdep_hardirq_enter();                        \
103                 instrumentation_begin();                        \
104                 ftrace_nmi_enter();                             \
105                 instrumentation_end();                          \
106         } while (0)
107
108 #define nmi_exit()                                              \
109         do {                                                    \
110                 instrumentation_begin();                        \
111                 ftrace_nmi_exit();                              \
112                 instrumentation_end();                          \
113                 lockdep_hardirq_exit();                         \
114                 rcu_nmi_exit();                                 \
115                 BUG_ON(!in_nmi());                              \
116                 __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET);       \
117                 lockdep_on();                                   \
118                 printk_nmi_exit();                              \
119                 arch_nmi_exit();                                \
120         } while (0)
121
122 #endif /* LINUX_HARDIRQ_H */