1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
12 struct interrupt_state {
13 #ifdef CONFIG_PPC_BOOK3E_64
14 enum ctx_state ctx_state;
18 static inline void booke_restore_dbcr0(void)
20 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
21 unsigned long dbcr0 = current->thread.debug.dbcr0;
23 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
25 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
30 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
33 if (!arch_irq_disabled_regs(regs))
36 if (user_mode(regs)) {
38 account_cpu_user_entry();
42 * Book3E reconciles irq soft mask in asm
44 #ifdef CONFIG_PPC_BOOK3S_64
45 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
47 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
49 if (user_mode(regs)) {
50 CT_WARN_ON(ct_state() != CONTEXT_USER);
53 account_cpu_user_entry();
54 account_stolen_time();
57 * CT_WARN_ON comes here via program_check_exception,
60 if (TRAP(regs) != 0x700)
61 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
65 #ifdef CONFIG_PPC_BOOK3E_64
66 state->ctx_state = exception_enter();
68 account_cpu_user_entry();
71 booke_restore_dbcr0();
75 * Care should be taken to note that interrupt_exit_prepare and
76 * interrupt_async_exit_prepare do not necessarily return immediately to
77 * regs context (e.g., if regs is usermode, we don't necessarily return to
78 * user mode). Other interrupts might be taken between here and return,
79 * context switch / preemption may occur in the exit path after this, or a
80 * signal may be delivered, etc.
82 * The real interrupt exit code is platform specific, e.g.,
83 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
85 * However interrupt_nmi_exit_prepare does return directly to regs, because
86 * NMIs do not do "exit work" or replay soft-masked interrupts.
88 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
90 #ifdef CONFIG_PPC_BOOK3E_64
91 exception_exit(state->ctx_state);
97 * Book3S exits to user via interrupt_exit_user_prepare(), which does
98 * context tracking, which is a cleaner way to handle PREEMPT=y
99 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
100 * which is likely to be where the core code wants to end up.
102 * The above comment explains why we can't do the
104 * if (user_mode(regs))
105 * user_exit_irqoff();
111 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
113 #ifdef CONFIG_PPC_BOOK3S_64
114 if (cpu_has_feature(CPU_FTR_CTRL) &&
115 !test_thread_local_flags(_TLF_RUNLATCH))
116 __ppc64_runlatch_on();
119 interrupt_enter_prepare(regs, state);
123 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
126 interrupt_exit_prepare(regs, state);
129 struct interrupt_nmi_state {
131 #ifdef CONFIG_PPC_BOOK3S_64
139 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
142 #ifdef CONFIG_PPC_BOOK3S_64
143 state->irq_soft_mask = local_paca->irq_soft_mask;
144 state->irq_happened = local_paca->irq_happened;
147 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
148 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
149 * because that goes through irq tracing which we don't want in NMI.
151 local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
152 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
154 /* Don't do any per-CPU operations until interrupt state is fixed */
156 /* Allow DEC and PMI to be traced when they are soft-NMI */
157 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
158 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
159 this_cpu_set_ftrace_enabled(0);
164 * Do not use nmi_enter() for pseries hash guest taking a real-mode
165 * NMI because not everything it touches is within the RMA limit.
167 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
168 !firmware_has_feature(FW_FEATURE_LPAR) ||
169 radix_enabled() || (mfmsr() & MSR_DR))
173 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
175 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
176 !firmware_has_feature(FW_FEATURE_LPAR) ||
177 radix_enabled() || (mfmsr() & MSR_DR))
181 if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
182 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
184 #ifdef CONFIG_PPC_BOOK3S_64
185 /* Check we didn't change the pending interrupt mask. */
186 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
187 local_paca->irq_happened = state->irq_happened;
188 local_paca->irq_soft_mask = state->irq_soft_mask;
194 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
195 * function definition. The reason for this is the noinstr section is placed
196 * after the main text section, i.e., very far away from the interrupt entry
197 * asm. That creates problems with fitting linker stubs when building large
200 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
203 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
204 * @func: Function name of the entry point
205 * @returns: Returns a value back to asm caller
207 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
208 __visible long func(struct pt_regs *regs)
211 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
212 * @func: Function name of the entry point
213 * @returns: Returns a value back to asm caller
215 * @func is called from ASM entry code.
217 * This is a plain function which does no tracing, reconciling, etc.
218 * The macro is written so it acts as function definition. Append the
219 * body with a pair of curly brackets.
221 * raw interrupt handlers must not enable or disable interrupts, or
222 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
223 * not be advisable either, although may be possible in a pinch, the
224 * trace will look odd at least.
226 * A raw handler may call one of the other interrupt handler functions
227 * to be converted into that interrupt context without these restrictions.
229 * On PPC64, _RAW handlers may return with fast_interrupt_return.
231 * Specific handlers may have additional restrictions.
233 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \
234 static __always_inline long ____##func(struct pt_regs *regs); \
236 interrupt_handler long func(struct pt_regs *regs) \
240 ret = ____##func (regs); \
244 NOKPROBE_SYMBOL(func); \
246 static __always_inline long ____##func(struct pt_regs *regs)
249 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
250 * @func: Function name of the entry point
252 #define DECLARE_INTERRUPT_HANDLER(func) \
253 __visible void func(struct pt_regs *regs)
256 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
257 * @func: Function name of the entry point
259 * @func is called from ASM entry code.
261 * The macro is written so it acts as function definition. Append the
262 * body with a pair of curly brackets.
264 #define DEFINE_INTERRUPT_HANDLER(func) \
265 static __always_inline void ____##func(struct pt_regs *regs); \
267 interrupt_handler void func(struct pt_regs *regs) \
269 struct interrupt_state state; \
271 interrupt_enter_prepare(regs, &state); \
275 interrupt_exit_prepare(regs, &state); \
277 NOKPROBE_SYMBOL(func); \
279 static __always_inline void ____##func(struct pt_regs *regs)
282 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
283 * @func: Function name of the entry point
284 * @returns: Returns a value back to asm caller
286 #define DECLARE_INTERRUPT_HANDLER_RET(func) \
287 __visible long func(struct pt_regs *regs)
290 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
291 * @func: Function name of the entry point
292 * @returns: Returns a value back to asm caller
294 * @func is called from ASM entry code.
296 * The macro is written so it acts as function definition. Append the
297 * body with a pair of curly brackets.
299 #define DEFINE_INTERRUPT_HANDLER_RET(func) \
300 static __always_inline long ____##func(struct pt_regs *regs); \
302 interrupt_handler long func(struct pt_regs *regs) \
304 struct interrupt_state state; \
307 interrupt_enter_prepare(regs, &state); \
309 ret = ____##func (regs); \
311 interrupt_exit_prepare(regs, &state); \
315 NOKPROBE_SYMBOL(func); \
317 static __always_inline long ____##func(struct pt_regs *regs)
320 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
321 * @func: Function name of the entry point
323 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
324 __visible void func(struct pt_regs *regs)
327 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
328 * @func: Function name of the entry point
330 * @func is called from ASM entry code.
332 * The macro is written so it acts as function definition. Append the
333 * body with a pair of curly brackets.
335 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
336 static __always_inline void ____##func(struct pt_regs *regs); \
338 interrupt_handler void func(struct pt_regs *regs) \
340 struct interrupt_state state; \
342 interrupt_async_enter_prepare(regs, &state); \
346 interrupt_async_exit_prepare(regs, &state); \
348 NOKPROBE_SYMBOL(func); \
350 static __always_inline void ____##func(struct pt_regs *regs)
353 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
354 * @func: Function name of the entry point
355 * @returns: Returns a value back to asm caller
357 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
358 __visible long func(struct pt_regs *regs)
361 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
362 * @func: Function name of the entry point
363 * @returns: Returns a value back to asm caller
365 * @func is called from ASM entry code.
367 * The macro is written so it acts as function definition. Append the
368 * body with a pair of curly brackets.
370 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \
371 static __always_inline long ____##func(struct pt_regs *regs); \
373 interrupt_handler long func(struct pt_regs *regs) \
375 struct interrupt_nmi_state state; \
378 interrupt_nmi_enter_prepare(regs, &state); \
380 ret = ____##func (regs); \
382 interrupt_nmi_exit_prepare(regs, &state); \
386 NOKPROBE_SYMBOL(func); \
388 static __always_inline long ____##func(struct pt_regs *regs)
391 /* Interrupt handlers */
393 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
394 #ifdef CONFIG_PPC_BOOK3S_64
395 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
397 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
399 DECLARE_INTERRUPT_HANDLER(SMIException);
400 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
401 DECLARE_INTERRUPT_HANDLER(unknown_exception);
402 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
403 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
404 DECLARE_INTERRUPT_HANDLER(RunModeException);
405 DECLARE_INTERRUPT_HANDLER(single_step_exception);
406 DECLARE_INTERRUPT_HANDLER(program_check_exception);
407 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
408 DECLARE_INTERRUPT_HANDLER(alignment_exception);
409 DECLARE_INTERRUPT_HANDLER(StackOverflow);
410 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
411 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
412 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
413 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
414 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
415 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
416 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
417 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
418 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
419 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
420 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
421 DECLARE_INTERRUPT_HANDLER(DebugException);
422 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
423 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
424 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
425 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
426 DECLARE_INTERRUPT_HANDLER(WatchdogException);
427 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
430 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
431 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
434 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
437 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
438 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
441 DECLARE_INTERRUPT_HANDLER(do_break);
444 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
447 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
448 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
450 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
452 void __noreturn unrecoverable_exception(struct pt_regs *regs);
454 void replay_system_reset(void);
455 void replay_soft_interrupts(void);
457 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
459 if (!arch_irq_disabled_regs(regs))
463 #endif /* _ASM_POWERPC_INTERRUPT_H */