1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/runlatch.h>
11 struct interrupt_state {
12 #ifdef CONFIG_PPC_BOOK3E_64
13 enum ctx_state ctx_state;
17 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
20 * Book3E reconciles irq soft mask in asm
22 #ifdef CONFIG_PPC_BOOK3S_64
23 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
25 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
27 if (user_mode(regs)) {
28 CT_WARN_ON(ct_state() != CONTEXT_USER);
31 account_cpu_user_entry();
32 account_stolen_time();
35 * CT_WARN_ON comes here via program_check_exception,
38 if (TRAP(regs) != 0x700)
39 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
43 #ifdef CONFIG_PPC_BOOK3E_64
44 state->ctx_state = exception_enter();
46 account_cpu_user_entry();
51 * Care should be taken to note that interrupt_exit_prepare and
52 * interrupt_async_exit_prepare do not necessarily return immediately to
53 * regs context (e.g., if regs is usermode, we don't necessarily return to
54 * user mode). Other interrupts might be taken between here and return,
55 * context switch / preemption may occur in the exit path after this, or a
56 * signal may be delivered, etc.
58 * The real interrupt exit code is platform specific, e.g.,
59 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
61 * However interrupt_nmi_exit_prepare does return directly to regs, because
62 * NMIs do not do "exit work" or replay soft-masked interrupts.
64 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
66 #ifdef CONFIG_PPC_BOOK3E_64
67 exception_exit(state->ctx_state);
71 * Book3S exits to user via interrupt_exit_user_prepare(), which does
72 * context tracking, which is a cleaner way to handle PREEMPT=y
73 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
74 * which is likely to be where the core code wants to end up.
76 * The above comment explains why we can't do the
78 * if (user_mode(regs))
85 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
87 #ifdef CONFIG_PPC_BOOK3S_64
88 if (cpu_has_feature(CPU_FTR_CTRL) &&
89 !test_thread_local_flags(_TLF_RUNLATCH))
90 __ppc64_runlatch_on();
93 interrupt_enter_prepare(regs, state);
97 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
100 interrupt_exit_prepare(regs, state);
103 struct interrupt_nmi_state {
105 #ifdef CONFIG_PPC_BOOK3S_64
113 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
116 #ifdef CONFIG_PPC_BOOK3S_64
117 state->irq_soft_mask = local_paca->irq_soft_mask;
118 state->irq_happened = local_paca->irq_happened;
121 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
122 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
123 * because that goes through irq tracing which we don't want in NMI.
125 local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
126 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
128 /* Don't do any per-CPU operations until interrupt state is fixed */
130 /* Allow DEC and PMI to be traced when they are soft-NMI */
131 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
132 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
133 this_cpu_set_ftrace_enabled(0);
138 * Do not use nmi_enter() for pseries hash guest taking a real-mode
139 * NMI because not everything it touches is within the RMA limit.
141 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
142 !firmware_has_feature(FW_FEATURE_LPAR) ||
143 radix_enabled() || (mfmsr() & MSR_DR))
147 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
149 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
150 !firmware_has_feature(FW_FEATURE_LPAR) ||
151 radix_enabled() || (mfmsr() & MSR_DR))
155 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
156 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
158 #ifdef CONFIG_PPC_BOOK3S_64
159 /* Check we didn't change the pending interrupt mask. */
160 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
161 local_paca->irq_happened = state->irq_happened;
162 local_paca->irq_soft_mask = state->irq_soft_mask;
168 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
169 * @func: Function name of the entry point
170 * @returns: Returns a value back to asm caller
172 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
173 __visible long func(struct pt_regs *regs)
176 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
177 * @func: Function name of the entry point
178 * @returns: Returns a value back to asm caller
180 * @func is called from ASM entry code.
182 * This is a plain function which does no tracing, reconciling, etc.
183 * The macro is written so it acts as function definition. Append the
184 * body with a pair of curly brackets.
186 * raw interrupt handlers must not enable or disable interrupts, or
187 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
188 * not be advisable either, although may be possible in a pinch, the
189 * trace will look odd at least.
191 * A raw handler may call one of the other interrupt handler functions
192 * to be converted into that interrupt context without these restrictions.
194 * On PPC64, _RAW handlers may return with fast_interrupt_return.
196 * Specific handlers may have additional restrictions.
198 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \
199 static __always_inline long ____##func(struct pt_regs *regs); \
201 __visible noinstr long func(struct pt_regs *regs) \
205 ret = ____##func (regs); \
210 static __always_inline long ____##func(struct pt_regs *regs)
213 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
214 * @func: Function name of the entry point
216 #define DECLARE_INTERRUPT_HANDLER(func) \
217 __visible void func(struct pt_regs *regs)
220 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
221 * @func: Function name of the entry point
223 * @func is called from ASM entry code.
225 * The macro is written so it acts as function definition. Append the
226 * body with a pair of curly brackets.
228 #define DEFINE_INTERRUPT_HANDLER(func) \
229 static __always_inline void ____##func(struct pt_regs *regs); \
231 __visible noinstr void func(struct pt_regs *regs) \
233 struct interrupt_state state; \
235 interrupt_enter_prepare(regs, &state); \
239 interrupt_exit_prepare(regs, &state); \
242 static __always_inline void ____##func(struct pt_regs *regs)
245 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
246 * @func: Function name of the entry point
247 * @returns: Returns a value back to asm caller
249 #define DECLARE_INTERRUPT_HANDLER_RET(func) \
250 __visible long func(struct pt_regs *regs)
253 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
254 * @func: Function name of the entry point
255 * @returns: Returns a value back to asm caller
257 * @func is called from ASM entry code.
259 * The macro is written so it acts as function definition. Append the
260 * body with a pair of curly brackets.
262 #define DEFINE_INTERRUPT_HANDLER_RET(func) \
263 static __always_inline long ____##func(struct pt_regs *regs); \
265 __visible noinstr long func(struct pt_regs *regs) \
267 struct interrupt_state state; \
270 interrupt_enter_prepare(regs, &state); \
272 ret = ____##func (regs); \
274 interrupt_exit_prepare(regs, &state); \
279 static __always_inline long ____##func(struct pt_regs *regs)
282 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
283 * @func: Function name of the entry point
285 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
286 __visible void func(struct pt_regs *regs)
289 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
290 * @func: Function name of the entry point
292 * @func is called from ASM entry code.
294 * The macro is written so it acts as function definition. Append the
295 * body with a pair of curly brackets.
297 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
298 static __always_inline void ____##func(struct pt_regs *regs); \
300 __visible noinstr void func(struct pt_regs *regs) \
302 struct interrupt_state state; \
304 interrupt_async_enter_prepare(regs, &state); \
308 interrupt_async_exit_prepare(regs, &state); \
311 static __always_inline void ____##func(struct pt_regs *regs)
314 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
315 * @func: Function name of the entry point
316 * @returns: Returns a value back to asm caller
318 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
319 __visible long func(struct pt_regs *regs)
322 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
323 * @func: Function name of the entry point
324 * @returns: Returns a value back to asm caller
326 * @func is called from ASM entry code.
328 * The macro is written so it acts as function definition. Append the
329 * body with a pair of curly brackets.
331 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \
332 static __always_inline long ____##func(struct pt_regs *regs); \
334 __visible noinstr long func(struct pt_regs *regs) \
336 struct interrupt_nmi_state state; \
339 interrupt_nmi_enter_prepare(regs, &state); \
341 ret = ____##func (regs); \
343 interrupt_nmi_exit_prepare(regs, &state); \
348 static __always_inline long ____##func(struct pt_regs *regs)
351 /* Interrupt handlers */
353 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
354 #ifdef CONFIG_PPC_BOOK3S_64
355 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
357 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
359 DECLARE_INTERRUPT_HANDLER(SMIException);
360 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
361 DECLARE_INTERRUPT_HANDLER(unknown_exception);
362 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
363 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
364 DECLARE_INTERRUPT_HANDLER(RunModeException);
365 DECLARE_INTERRUPT_HANDLER(single_step_exception);
366 DECLARE_INTERRUPT_HANDLER(program_check_exception);
367 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
368 DECLARE_INTERRUPT_HANDLER(alignment_exception);
369 DECLARE_INTERRUPT_HANDLER(StackOverflow);
370 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
371 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
372 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
373 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
374 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
375 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
376 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
377 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
378 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
379 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
380 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
381 DECLARE_INTERRUPT_HANDLER(DebugException);
382 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
383 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
384 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
385 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
386 DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
387 DECLARE_INTERRUPT_HANDLER(WatchdogException);
388 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
391 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
392 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
395 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
398 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
399 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
402 DECLARE_INTERRUPT_HANDLER(do_break);
405 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
408 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
409 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
411 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
413 void replay_system_reset(void);
414 void replay_soft_interrupts(void);
416 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
418 if (!arch_irq_disabled_regs(regs))
422 #endif /* _ASM_POWERPC_INTERRUPT_H */