powerpc: move NMI entry/exit code into wrapper
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / interrupt.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9
10 struct interrupt_state {
11 #ifdef CONFIG_PPC_BOOK3E_64
12         enum ctx_state ctx_state;
13 #endif
14 };
15
16 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
17 {
18         /*
19          * Book3E reconciles irq soft mask in asm
20          */
21 #ifdef CONFIG_PPC_BOOK3S_64
22         if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
23                 trace_hardirqs_off();
24         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
25
26         if (user_mode(regs)) {
27                 CT_WARN_ON(ct_state() != CONTEXT_USER);
28                 user_exit_irqoff();
29
30                 account_cpu_user_entry();
31                 account_stolen_time();
32         } else {
33                 /*
34                  * CT_WARN_ON comes here via program_check_exception,
35                  * so avoid recursion.
36                  */
37                 if (TRAP(regs) != 0x700)
38                         CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
39         }
40 #endif
41
42 #ifdef CONFIG_PPC_BOOK3E_64
43         state->ctx_state = exception_enter();
44         if (user_mode(regs))
45                 account_cpu_user_entry();
46 #endif
47 }
48
49 /*
50  * Care should be taken to note that interrupt_exit_prepare and
51  * interrupt_async_exit_prepare do not necessarily return immediately to
52  * regs context (e.g., if regs is usermode, we don't necessarily return to
53  * user mode). Other interrupts might be taken between here and return,
54  * context switch / preemption may occur in the exit path after this, or a
55  * signal may be delivered, etc.
56  *
57  * The real interrupt exit code is platform specific, e.g.,
58  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
59  *
60  * However interrupt_nmi_exit_prepare does return directly to regs, because
61  * NMIs do not do "exit work" or replay soft-masked interrupts.
62  */
63 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
64 {
65 #ifdef CONFIG_PPC_BOOK3E_64
66         exception_exit(state->ctx_state);
67 #endif
68
69         /*
70          * Book3S exits to user via interrupt_exit_user_prepare(), which does
71          * context tracking, which is a cleaner way to handle PREEMPT=y
72          * and avoid context entry/exit in e.g., preempt_schedule_irq()),
73          * which is likely to be where the core code wants to end up.
74          *
75          * The above comment explains why we can't do the
76          *
77          *     if (user_mode(regs))
78          *         user_exit_irqoff();
79          *
80          * sequence here.
81          */
82 }
83
84 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
85 {
86         interrupt_enter_prepare(regs, state);
87         irq_enter();
88 }
89
90 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
91 {
92         irq_exit();
93         interrupt_exit_prepare(regs, state);
94 }
95
96 struct interrupt_nmi_state {
97 #ifdef CONFIG_PPC64
98         u8 ftrace_enabled;
99 #endif
100 };
101
102 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
103 {
104 #ifdef CONFIG_PPC64
105         /* Allow DEC and PMI to be traced when they are soft-NMI */
106         if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
107                 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
108                 this_cpu_set_ftrace_enabled(0);
109         }
110 #endif
111
112         /*
113          * Do not use nmi_enter() for pseries hash guest taking a real-mode
114          * NMI because not everything it touches is within the RMA limit.
115          */
116         if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
117                         !firmware_has_feature(FW_FEATURE_LPAR) ||
118                         radix_enabled() || (mfmsr() & MSR_DR))
119                 nmi_enter();
120 }
121
122 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
123 {
124         if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
125                         !firmware_has_feature(FW_FEATURE_LPAR) ||
126                         radix_enabled() || (mfmsr() & MSR_DR))
127                 nmi_exit();
128
129 #ifdef CONFIG_PPC64
130         if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
131                 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
132 #endif
133 }
134
135 /**
136  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
137  * @func:       Function name of the entry point
138  * @returns:    Returns a value back to asm caller
139  */
140 #define DECLARE_INTERRUPT_HANDLER_RAW(func)                             \
141         __visible long func(struct pt_regs *regs)
142
143 /**
144  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
145  * @func:       Function name of the entry point
146  * @returns:    Returns a value back to asm caller
147  *
148  * @func is called from ASM entry code.
149  *
150  * This is a plain function which does no tracing, reconciling, etc.
151  * The macro is written so it acts as function definition. Append the
152  * body with a pair of curly brackets.
153  *
154  * raw interrupt handlers must not enable or disable interrupts, or
155  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
156  * not be advisable either, although may be possible in a pinch, the
157  * trace will look odd at least.
158  *
159  * A raw handler may call one of the other interrupt handler functions
160  * to be converted into that interrupt context without these restrictions.
161  *
162  * On PPC64, _RAW handlers may return with fast_interrupt_return.
163  *
164  * Specific handlers may have additional restrictions.
165  */
166 #define DEFINE_INTERRUPT_HANDLER_RAW(func)                              \
167 static __always_inline long ____##func(struct pt_regs *regs);           \
168                                                                         \
169 __visible noinstr long func(struct pt_regs *regs)                       \
170 {                                                                       \
171         long ret;                                                       \
172                                                                         \
173         ret = ____##func (regs);                                        \
174                                                                         \
175         return ret;                                                     \
176 }                                                                       \
177                                                                         \
178 static __always_inline long ____##func(struct pt_regs *regs)
179
180 /**
181  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
182  * @func:       Function name of the entry point
183  */
184 #define DECLARE_INTERRUPT_HANDLER(func)                                 \
185         __visible void func(struct pt_regs *regs)
186
187 /**
188  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
189  * @func:       Function name of the entry point
190  *
191  * @func is called from ASM entry code.
192  *
193  * The macro is written so it acts as function definition. Append the
194  * body with a pair of curly brackets.
195  */
196 #define DEFINE_INTERRUPT_HANDLER(func)                                  \
197 static __always_inline void ____##func(struct pt_regs *regs);           \
198                                                                         \
199 __visible noinstr void func(struct pt_regs *regs)                       \
200 {                                                                       \
201         struct interrupt_state state;                                   \
202                                                                         \
203         interrupt_enter_prepare(regs, &state);                          \
204                                                                         \
205         ____##func (regs);                                              \
206                                                                         \
207         interrupt_exit_prepare(regs, &state);                           \
208 }                                                                       \
209                                                                         \
210 static __always_inline void ____##func(struct pt_regs *regs)
211
212 /**
213  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
214  * @func:       Function name of the entry point
215  * @returns:    Returns a value back to asm caller
216  */
217 #define DECLARE_INTERRUPT_HANDLER_RET(func)                             \
218         __visible long func(struct pt_regs *regs)
219
220 /**
221  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
222  * @func:       Function name of the entry point
223  * @returns:    Returns a value back to asm caller
224  *
225  * @func is called from ASM entry code.
226  *
227  * The macro is written so it acts as function definition. Append the
228  * body with a pair of curly brackets.
229  */
230 #define DEFINE_INTERRUPT_HANDLER_RET(func)                              \
231 static __always_inline long ____##func(struct pt_regs *regs);           \
232                                                                         \
233 __visible noinstr long func(struct pt_regs *regs)                       \
234 {                                                                       \
235         struct interrupt_state state;                                   \
236         long ret;                                                       \
237                                                                         \
238         interrupt_enter_prepare(regs, &state);                          \
239                                                                         \
240         ret = ____##func (regs);                                        \
241                                                                         \
242         interrupt_exit_prepare(regs, &state);                           \
243                                                                         \
244         return ret;                                                     \
245 }                                                                       \
246                                                                         \
247 static __always_inline long ____##func(struct pt_regs *regs)
248
249 /**
250  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
251  * @func:       Function name of the entry point
252  */
253 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)                           \
254         __visible void func(struct pt_regs *regs)
255
256 /**
257  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
258  * @func:       Function name of the entry point
259  *
260  * @func is called from ASM entry code.
261  *
262  * The macro is written so it acts as function definition. Append the
263  * body with a pair of curly brackets.
264  */
265 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)                            \
266 static __always_inline void ____##func(struct pt_regs *regs);           \
267                                                                         \
268 __visible noinstr void func(struct pt_regs *regs)                       \
269 {                                                                       \
270         struct interrupt_state state;                                   \
271                                                                         \
272         interrupt_async_enter_prepare(regs, &state);                    \
273                                                                         \
274         ____##func (regs);                                              \
275                                                                         \
276         interrupt_async_exit_prepare(regs, &state);                     \
277 }                                                                       \
278                                                                         \
279 static __always_inline void ____##func(struct pt_regs *regs)
280
281 /**
282  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
283  * @func:       Function name of the entry point
284  * @returns:    Returns a value back to asm caller
285  */
286 #define DECLARE_INTERRUPT_HANDLER_NMI(func)                             \
287         __visible long func(struct pt_regs *regs)
288
289 /**
290  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
291  * @func:       Function name of the entry point
292  * @returns:    Returns a value back to asm caller
293  *
294  * @func is called from ASM entry code.
295  *
296  * The macro is written so it acts as function definition. Append the
297  * body with a pair of curly brackets.
298  */
299 #define DEFINE_INTERRUPT_HANDLER_NMI(func)                              \
300 static __always_inline long ____##func(struct pt_regs *regs);           \
301                                                                         \
302 __visible noinstr long func(struct pt_regs *regs)                       \
303 {                                                                       \
304         struct interrupt_nmi_state state;                               \
305         long ret;                                                       \
306                                                                         \
307         interrupt_nmi_enter_prepare(regs, &state);                      \
308                                                                         \
309         ret = ____##func (regs);                                        \
310                                                                         \
311         interrupt_nmi_exit_prepare(regs, &state);                       \
312                                                                         \
313         return ret;                                                     \
314 }                                                                       \
315                                                                         \
316 static __always_inline long ____##func(struct pt_regs *regs)
317
318
319 /* Interrupt handlers */
320 /* kernel/traps.c */
321 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
322 #ifdef CONFIG_PPC_BOOK3S_64
323 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
324 #else
325 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
326 #endif
327 DECLARE_INTERRUPT_HANDLER(SMIException);
328 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
329 DECLARE_INTERRUPT_HANDLER(unknown_exception);
330 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
331 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
332 DECLARE_INTERRUPT_HANDLER(RunModeException);
333 DECLARE_INTERRUPT_HANDLER(single_step_exception);
334 DECLARE_INTERRUPT_HANDLER(program_check_exception);
335 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
336 DECLARE_INTERRUPT_HANDLER(alignment_exception);
337 DECLARE_INTERRUPT_HANDLER(StackOverflow);
338 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
339 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
340 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
341 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
342 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
343 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
344 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
345 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
346 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
347 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
348 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
349 DECLARE_INTERRUPT_HANDLER(DebugException);
350 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
351 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
352 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
353 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
354 DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
355 DECLARE_INTERRUPT_HANDLER(WatchdogException);
356 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
357
358 /* slb.c */
359 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
360 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
361
362 /* hash_utils.c */
363 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
364
365 /* fault.c */
366 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
367 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
368
369 /* process.c */
370 DECLARE_INTERRUPT_HANDLER(do_break);
371
372 /* time.c */
373 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
374
375 /* mce.c */
376 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
377 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
378
379 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
380
381 void replay_system_reset(void);
382 void replay_soft_interrupts(void);
383
384 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
385 {
386         if (!arch_irq_disabled_regs(regs))
387                 local_irq_enable();
388 }
389
390 #endif /* _ASM_POWERPC_INTERRUPT_H */