1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
12 struct interrupt_state {
13 #ifdef CONFIG_PPC_BOOK3E_64
14 enum ctx_state ctx_state;
18 static inline void booke_restore_dbcr0(void)
20 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
21 unsigned long dbcr0 = current->thread.debug.dbcr0;
23 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
25 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
30 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
33 * Book3E reconciles irq soft mask in asm
35 #ifdef CONFIG_PPC_BOOK3S_64
36 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
38 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
40 if (user_mode(regs)) {
41 CT_WARN_ON(ct_state() != CONTEXT_USER);
44 account_cpu_user_entry();
45 account_stolen_time();
48 * CT_WARN_ON comes here via program_check_exception,
51 if (TRAP(regs) != 0x700)
52 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
56 #ifdef CONFIG_PPC_BOOK3E_64
57 state->ctx_state = exception_enter();
59 account_cpu_user_entry();
64 * Care should be taken to note that interrupt_exit_prepare and
65 * interrupt_async_exit_prepare do not necessarily return immediately to
66 * regs context (e.g., if regs is usermode, we don't necessarily return to
67 * user mode). Other interrupts might be taken between here and return,
68 * context switch / preemption may occur in the exit path after this, or a
69 * signal may be delivered, etc.
71 * The real interrupt exit code is platform specific, e.g.,
72 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
74 * However interrupt_nmi_exit_prepare does return directly to regs, because
75 * NMIs do not do "exit work" or replay soft-masked interrupts.
77 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
79 #ifdef CONFIG_PPC_BOOK3E_64
80 exception_exit(state->ctx_state);
84 * Book3S exits to user via interrupt_exit_user_prepare(), which does
85 * context tracking, which is a cleaner way to handle PREEMPT=y
86 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
87 * which is likely to be where the core code wants to end up.
89 * The above comment explains why we can't do the
91 * if (user_mode(regs))
98 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
100 #ifdef CONFIG_PPC_BOOK3S_64
101 if (cpu_has_feature(CPU_FTR_CTRL) &&
102 !test_thread_local_flags(_TLF_RUNLATCH))
103 __ppc64_runlatch_on();
106 interrupt_enter_prepare(regs, state);
110 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
113 interrupt_exit_prepare(regs, state);
116 struct interrupt_nmi_state {
118 #ifdef CONFIG_PPC_BOOK3S_64
126 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
129 #ifdef CONFIG_PPC_BOOK3S_64
130 state->irq_soft_mask = local_paca->irq_soft_mask;
131 state->irq_happened = local_paca->irq_happened;
134 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
135 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
136 * because that goes through irq tracing which we don't want in NMI.
138 local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
139 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
141 /* Don't do any per-CPU operations until interrupt state is fixed */
143 /* Allow DEC and PMI to be traced when they are soft-NMI */
144 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
145 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
146 this_cpu_set_ftrace_enabled(0);
151 * Do not use nmi_enter() for pseries hash guest taking a real-mode
152 * NMI because not everything it touches is within the RMA limit.
154 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
155 !firmware_has_feature(FW_FEATURE_LPAR) ||
156 radix_enabled() || (mfmsr() & MSR_DR))
160 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
162 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
163 !firmware_has_feature(FW_FEATURE_LPAR) ||
164 radix_enabled() || (mfmsr() & MSR_DR))
168 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
169 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
171 #ifdef CONFIG_PPC_BOOK3S_64
172 /* Check we didn't change the pending interrupt mask. */
173 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
174 local_paca->irq_happened = state->irq_happened;
175 local_paca->irq_soft_mask = state->irq_soft_mask;
181 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
182 * function definition. The reason for this is the noinstr section is placed
183 * after the main text section, i.e., very far away from the interrupt entry
184 * asm. That creates problems with fitting linker stubs when building large
187 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
190 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
191 * @func: Function name of the entry point
192 * @returns: Returns a value back to asm caller
194 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
195 __visible long func(struct pt_regs *regs)
198 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
199 * @func: Function name of the entry point
200 * @returns: Returns a value back to asm caller
202 * @func is called from ASM entry code.
204 * This is a plain function which does no tracing, reconciling, etc.
205 * The macro is written so it acts as function definition. Append the
206 * body with a pair of curly brackets.
208 * raw interrupt handlers must not enable or disable interrupts, or
209 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
210 * not be advisable either, although may be possible in a pinch, the
211 * trace will look odd at least.
213 * A raw handler may call one of the other interrupt handler functions
214 * to be converted into that interrupt context without these restrictions.
216 * On PPC64, _RAW handlers may return with fast_interrupt_return.
218 * Specific handlers may have additional restrictions.
220 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \
221 static __always_inline long ____##func(struct pt_regs *regs); \
223 interrupt_handler long func(struct pt_regs *regs) \
227 ret = ____##func (regs); \
231 NOKPROBE_SYMBOL(func); \
233 static __always_inline long ____##func(struct pt_regs *regs)
236 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
237 * @func: Function name of the entry point
239 #define DECLARE_INTERRUPT_HANDLER(func) \
240 __visible void func(struct pt_regs *regs)
243 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
244 * @func: Function name of the entry point
246 * @func is called from ASM entry code.
248 * The macro is written so it acts as function definition. Append the
249 * body with a pair of curly brackets.
251 #define DEFINE_INTERRUPT_HANDLER(func) \
252 static __always_inline void ____##func(struct pt_regs *regs); \
254 interrupt_handler void func(struct pt_regs *regs) \
256 struct interrupt_state state; \
258 interrupt_enter_prepare(regs, &state); \
262 interrupt_exit_prepare(regs, &state); \
264 NOKPROBE_SYMBOL(func); \
266 static __always_inline void ____##func(struct pt_regs *regs)
269 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
270 * @func: Function name of the entry point
271 * @returns: Returns a value back to asm caller
273 #define DECLARE_INTERRUPT_HANDLER_RET(func) \
274 __visible long func(struct pt_regs *regs)
277 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
278 * @func: Function name of the entry point
279 * @returns: Returns a value back to asm caller
281 * @func is called from ASM entry code.
283 * The macro is written so it acts as function definition. Append the
284 * body with a pair of curly brackets.
286 #define DEFINE_INTERRUPT_HANDLER_RET(func) \
287 static __always_inline long ____##func(struct pt_regs *regs); \
289 interrupt_handler long func(struct pt_regs *regs) \
291 struct interrupt_state state; \
294 interrupt_enter_prepare(regs, &state); \
296 ret = ____##func (regs); \
298 interrupt_exit_prepare(regs, &state); \
302 NOKPROBE_SYMBOL(func); \
304 static __always_inline long ____##func(struct pt_regs *regs)
307 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
308 * @func: Function name of the entry point
310 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
311 __visible void func(struct pt_regs *regs)
314 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
315 * @func: Function name of the entry point
317 * @func is called from ASM entry code.
319 * The macro is written so it acts as function definition. Append the
320 * body with a pair of curly brackets.
322 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
323 static __always_inline void ____##func(struct pt_regs *regs); \
325 interrupt_handler void func(struct pt_regs *regs) \
327 struct interrupt_state state; \
329 interrupt_async_enter_prepare(regs, &state); \
333 interrupt_async_exit_prepare(regs, &state); \
335 NOKPROBE_SYMBOL(func); \
337 static __always_inline void ____##func(struct pt_regs *regs)
340 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
341 * @func: Function name of the entry point
342 * @returns: Returns a value back to asm caller
344 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
345 __visible long func(struct pt_regs *regs)
348 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
349 * @func: Function name of the entry point
350 * @returns: Returns a value back to asm caller
352 * @func is called from ASM entry code.
354 * The macro is written so it acts as function definition. Append the
355 * body with a pair of curly brackets.
357 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \
358 static __always_inline long ____##func(struct pt_regs *regs); \
360 interrupt_handler long func(struct pt_regs *regs) \
362 struct interrupt_nmi_state state; \
365 interrupt_nmi_enter_prepare(regs, &state); \
367 ret = ____##func (regs); \
369 interrupt_nmi_exit_prepare(regs, &state); \
373 NOKPROBE_SYMBOL(func); \
375 static __always_inline long ____##func(struct pt_regs *regs)
378 /* Interrupt handlers */
380 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
381 #ifdef CONFIG_PPC_BOOK3S_64
382 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
384 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
386 DECLARE_INTERRUPT_HANDLER(SMIException);
387 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
388 DECLARE_INTERRUPT_HANDLER(unknown_exception);
389 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
390 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
391 DECLARE_INTERRUPT_HANDLER(RunModeException);
392 DECLARE_INTERRUPT_HANDLER(single_step_exception);
393 DECLARE_INTERRUPT_HANDLER(program_check_exception);
394 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
395 DECLARE_INTERRUPT_HANDLER(alignment_exception);
396 DECLARE_INTERRUPT_HANDLER(StackOverflow);
397 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
398 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
399 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
400 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
401 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
402 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
403 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
404 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
405 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
406 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
407 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
408 DECLARE_INTERRUPT_HANDLER(DebugException);
409 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
410 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
411 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
412 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
413 DECLARE_INTERRUPT_HANDLER(WatchdogException);
414 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
417 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
418 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
421 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
424 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
425 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
428 DECLARE_INTERRUPT_HANDLER(do_break);
431 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
434 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
435 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
437 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
439 void unrecoverable_exception(struct pt_regs *regs);
441 void replay_system_reset(void);
442 void replay_soft_interrupts(void);
444 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
446 if (!arch_irq_disabled_regs(regs))
450 #endif /* _ASM_POWERPC_INTERRUPT_H */