cpufreq: Introduce governor flags
[linux-2.6-microblaze.git] / include / linux / hardirq.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_HARDIRQ_H
3 #define LINUX_HARDIRQ_H
4
5 #include <linux/context_tracking_state.h>
6 #include <linux/preempt.h>
7 #include <linux/lockdep.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/vtime.h>
10 #include <asm/hardirq.h>
11
12 extern void synchronize_irq(unsigned int irq);
13 extern bool synchronize_hardirq(unsigned int irq);
14
15 #ifdef CONFIG_NO_HZ_FULL
16 void __rcu_irq_enter_check_tick(void);
17 #else
18 static inline void __rcu_irq_enter_check_tick(void) { }
19 #endif
20
21 static __always_inline void rcu_irq_enter_check_tick(void)
22 {
23         if (context_tracking_enabled())
24                 __rcu_irq_enter_check_tick();
25 }
26
27 /*
28  * It is safe to do non-atomic ops on ->hardirq_context,
29  * because NMI handlers may not preempt and the ops are
30  * always balanced, so the interrupted value of ->hardirq_context
31  * will always be restored.
32  */
33 #define __irq_enter()                                   \
34         do {                                            \
35                 account_irq_enter_time(current);        \
36                 preempt_count_add(HARDIRQ_OFFSET);      \
37                 lockdep_hardirq_enter();                \
38         } while (0)
39
40 /*
41  * Like __irq_enter() without time accounting for fast
42  * interrupts, e.g. reschedule IPI where time accounting
43  * is more expensive than the actual interrupt.
44  */
45 #define __irq_enter_raw()                               \
46         do {                                            \
47                 preempt_count_add(HARDIRQ_OFFSET);      \
48                 lockdep_hardirq_enter();                \
49         } while (0)
50
51 /*
52  * Enter irq context (on NO_HZ, update jiffies):
53  */
54 void irq_enter(void);
55 /*
56  * Like irq_enter(), but RCU is already watching.
57  */
58 void irq_enter_rcu(void);
59
60 /*
61  * Exit irq context without processing softirqs:
62  */
63 #define __irq_exit()                                    \
64         do {                                            \
65                 lockdep_hardirq_exit();                 \
66                 account_irq_exit_time(current);         \
67                 preempt_count_sub(HARDIRQ_OFFSET);      \
68         } while (0)
69
70 /*
71  * Like __irq_exit() without time accounting
72  */
73 #define __irq_exit_raw()                                \
74         do {                                            \
75                 lockdep_hardirq_exit();                 \
76                 preempt_count_sub(HARDIRQ_OFFSET);      \
77         } while (0)
78
79 /*
80  * Exit irq context and process softirqs if needed:
81  */
82 void irq_exit(void);
83
84 /*
85  * Like irq_exit(), but return with RCU watching.
86  */
87 void irq_exit_rcu(void);
88
89 #ifndef arch_nmi_enter
90 #define arch_nmi_enter()        do { } while (0)
91 #define arch_nmi_exit()         do { } while (0)
92 #endif
93
94 #ifdef CONFIG_TINY_RCU
95 static inline void rcu_nmi_enter(void) { }
96 static inline void rcu_nmi_exit(void) { }
97 #else
98 extern void rcu_nmi_enter(void);
99 extern void rcu_nmi_exit(void);
100 #endif
101
102 /*
103  * NMI vs Tracing
104  * --------------
105  *
106  * We must not land in a tracer until (or after) we've changed preempt_count
107  * such that in_nmi() becomes true. To that effect all NMI C entry points must
108  * be marked 'notrace' and call nmi_enter() as soon as possible.
109  */
110
111 /*
112  * nmi_enter() can nest up to 15 times; see NMI_BITS.
113  */
114 #define __nmi_enter()                                           \
115         do {                                                    \
116                 lockdep_off();                                  \
117                 arch_nmi_enter();                               \
118                 printk_nmi_enter();                             \
119                 BUG_ON(in_nmi() == NMI_MASK);                   \
120                 __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);       \
121         } while (0)
122
123 #define nmi_enter()                                             \
124         do {                                                    \
125                 __nmi_enter();                                  \
126                 lockdep_hardirq_enter();                        \
127                 rcu_nmi_enter();                                \
128                 instrumentation_begin();                        \
129                 ftrace_nmi_enter();                             \
130                 instrumentation_end();                          \
131         } while (0)
132
133 #define __nmi_exit()                                            \
134         do {                                                    \
135                 BUG_ON(!in_nmi());                              \
136                 __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET);       \
137                 printk_nmi_exit();                              \
138                 arch_nmi_exit();                                \
139                 lockdep_on();                                   \
140         } while (0)
141
142 #define nmi_exit()                                              \
143         do {                                                    \
144                 instrumentation_begin();                        \
145                 ftrace_nmi_exit();                              \
146                 instrumentation_end();                          \
147                 rcu_nmi_exit();                                 \
148                 lockdep_hardirq_exit();                         \
149                 __nmi_exit();                                   \
150         } while (0)
151
152 #endif /* LINUX_HARDIRQ_H */