1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
10 struct interrupt_state {
11 #ifdef CONFIG_PPC_BOOK3E_64
12 enum ctx_state ctx_state;
16 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
19 * Book3E reconciles irq soft mask in asm
21 #ifdef CONFIG_PPC_BOOK3S_64
22 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
24 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
26 if (user_mode(regs)) {
27 CT_WARN_ON(ct_state() != CONTEXT_USER);
30 account_cpu_user_entry();
31 account_stolen_time();
34 * CT_WARN_ON comes here via program_check_exception,
37 if (TRAP(regs) != 0x700)
38 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
42 #ifdef CONFIG_PPC_BOOK3E_64
43 state->ctx_state = exception_enter();
45 account_cpu_user_entry();
50 * Care should be taken to note that interrupt_exit_prepare and
51 * interrupt_async_exit_prepare do not necessarily return immediately to
52 * regs context (e.g., if regs is usermode, we don't necessarily return to
53 * user mode). Other interrupts might be taken between here and return,
54 * context switch / preemption may occur in the exit path after this, or a
55 * signal may be delivered, etc.
57 * The real interrupt exit code is platform specific, e.g.,
58 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
60 * However interrupt_nmi_exit_prepare does return directly to regs, because
61 * NMIs do not do "exit work" or replay soft-masked interrupts.
63 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
65 #ifdef CONFIG_PPC_BOOK3E_64
66 exception_exit(state->ctx_state);
70 * Book3S exits to user via interrupt_exit_user_prepare(), which does
71 * context tracking, which is a cleaner way to handle PREEMPT=y
72 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
73 * which is likely to be where the core code wants to end up.
75 * The above comment explains why we can't do the
77 * if (user_mode(regs))
84 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
86 interrupt_enter_prepare(regs, state);
90 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
93 interrupt_exit_prepare(regs, state);
96 struct interrupt_nmi_state {
102 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
105 /* Allow DEC and PMI to be traced when they are soft-NMI */
106 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
107 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
108 this_cpu_set_ftrace_enabled(0);
113 * Do not use nmi_enter() for pseries hash guest taking a real-mode
114 * NMI because not everything it touches is within the RMA limit.
116 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
117 !firmware_has_feature(FW_FEATURE_LPAR) ||
118 radix_enabled() || (mfmsr() & MSR_DR))
122 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
124 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
125 !firmware_has_feature(FW_FEATURE_LPAR) ||
126 radix_enabled() || (mfmsr() & MSR_DR))
130 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
131 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
136 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
137 * @func: Function name of the entry point
138 * @returns: Returns a value back to asm caller
140 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
141 __visible long func(struct pt_regs *regs)
144 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
145 * @func: Function name of the entry point
146 * @returns: Returns a value back to asm caller
148 * @func is called from ASM entry code.
150 * This is a plain function which does no tracing, reconciling, etc.
151 * The macro is written so it acts as function definition. Append the
152 * body with a pair of curly brackets.
154 * raw interrupt handlers must not enable or disable interrupts, or
155 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
156 * not be advisable either, although may be possible in a pinch, the
157 * trace will look odd at least.
159 * A raw handler may call one of the other interrupt handler functions
160 * to be converted into that interrupt context without these restrictions.
162 * On PPC64, _RAW handlers may return with fast_interrupt_return.
164 * Specific handlers may have additional restrictions.
166 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \
167 static __always_inline long ____##func(struct pt_regs *regs); \
169 __visible noinstr long func(struct pt_regs *regs) \
173 ret = ____##func (regs); \
178 static __always_inline long ____##func(struct pt_regs *regs)
181 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
182 * @func: Function name of the entry point
184 #define DECLARE_INTERRUPT_HANDLER(func) \
185 __visible void func(struct pt_regs *regs)
188 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
189 * @func: Function name of the entry point
191 * @func is called from ASM entry code.
193 * The macro is written so it acts as function definition. Append the
194 * body with a pair of curly brackets.
196 #define DEFINE_INTERRUPT_HANDLER(func) \
197 static __always_inline void ____##func(struct pt_regs *regs); \
199 __visible noinstr void func(struct pt_regs *regs) \
201 struct interrupt_state state; \
203 interrupt_enter_prepare(regs, &state); \
207 interrupt_exit_prepare(regs, &state); \
210 static __always_inline void ____##func(struct pt_regs *regs)
213 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
214 * @func: Function name of the entry point
215 * @returns: Returns a value back to asm caller
217 #define DECLARE_INTERRUPT_HANDLER_RET(func) \
218 __visible long func(struct pt_regs *regs)
221 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
222 * @func: Function name of the entry point
223 * @returns: Returns a value back to asm caller
225 * @func is called from ASM entry code.
227 * The macro is written so it acts as function definition. Append the
228 * body with a pair of curly brackets.
230 #define DEFINE_INTERRUPT_HANDLER_RET(func) \
231 static __always_inline long ____##func(struct pt_regs *regs); \
233 __visible noinstr long func(struct pt_regs *regs) \
235 struct interrupt_state state; \
238 interrupt_enter_prepare(regs, &state); \
240 ret = ____##func (regs); \
242 interrupt_exit_prepare(regs, &state); \
247 static __always_inline long ____##func(struct pt_regs *regs)
250 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
251 * @func: Function name of the entry point
253 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
254 __visible void func(struct pt_regs *regs)
257 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
258 * @func: Function name of the entry point
260 * @func is called from ASM entry code.
262 * The macro is written so it acts as function definition. Append the
263 * body with a pair of curly brackets.
265 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
266 static __always_inline void ____##func(struct pt_regs *regs); \
268 __visible noinstr void func(struct pt_regs *regs) \
270 struct interrupt_state state; \
272 interrupt_async_enter_prepare(regs, &state); \
276 interrupt_async_exit_prepare(regs, &state); \
279 static __always_inline void ____##func(struct pt_regs *regs)
282 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
283 * @func: Function name of the entry point
284 * @returns: Returns a value back to asm caller
286 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
287 __visible long func(struct pt_regs *regs)
290 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
291 * @func: Function name of the entry point
292 * @returns: Returns a value back to asm caller
294 * @func is called from ASM entry code.
296 * The macro is written so it acts as function definition. Append the
297 * body with a pair of curly brackets.
299 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \
300 static __always_inline long ____##func(struct pt_regs *regs); \
302 __visible noinstr long func(struct pt_regs *regs) \
304 struct interrupt_nmi_state state; \
307 interrupt_nmi_enter_prepare(regs, &state); \
309 ret = ____##func (regs); \
311 interrupt_nmi_exit_prepare(regs, &state); \
316 static __always_inline long ____##func(struct pt_regs *regs)
319 /* Interrupt handlers */
321 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
322 #ifdef CONFIG_PPC_BOOK3S_64
323 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
325 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
327 DECLARE_INTERRUPT_HANDLER(SMIException);
328 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
329 DECLARE_INTERRUPT_HANDLER(unknown_exception);
330 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
331 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
332 DECLARE_INTERRUPT_HANDLER(RunModeException);
333 DECLARE_INTERRUPT_HANDLER(single_step_exception);
334 DECLARE_INTERRUPT_HANDLER(program_check_exception);
335 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
336 DECLARE_INTERRUPT_HANDLER(alignment_exception);
337 DECLARE_INTERRUPT_HANDLER(StackOverflow);
338 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
339 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
340 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
341 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
342 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
343 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
344 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
345 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
346 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
347 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
348 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
349 DECLARE_INTERRUPT_HANDLER(DebugException);
350 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
351 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
352 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
353 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
354 DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
355 DECLARE_INTERRUPT_HANDLER(WatchdogException);
356 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
359 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
360 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
363 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
366 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
367 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
370 DECLARE_INTERRUPT_HANDLER(do_break);
373 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
376 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
377 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
379 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
381 void replay_system_reset(void);
382 void replay_soft_interrupts(void);
384 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
386 if (!arch_irq_disabled_regs(regs))
390 #endif /* _ASM_POWERPC_INTERRUPT_H */