1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
6 #define INTERRUPT_CRITICAL_INPUT 0x100
9 #define INTERRUPT_DEBUG 0xd00
11 #define INTERRUPT_PERFMON 0x260
12 #define INTERRUPT_DOORBELL 0x280
16 #define INTERRUPT_MACHINE_CHECK 0x200
19 #define INTERRUPT_SYSTEM_RESET 0x100
22 #define INTERRUPT_DATA_SEGMENT 0x380
23 #define INTERRUPT_INST_SEGMENT 0x480
24 #define INTERRUPT_TRACE 0xd00
25 #define INTERRUPT_H_DATA_STORAGE 0xe00
26 #define INTERRUPT_HMI 0xe60
27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80
28 #ifdef CONFIG_PPC_BOOK3S
29 #define INTERRUPT_DOORBELL 0xa00
30 #define INTERRUPT_PERFMON 0xf00
31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
34 /* BookE/BookS/4xx/8xx */
35 #define INTERRUPT_DATA_STORAGE 0x300
36 #define INTERRUPT_INST_STORAGE 0x400
37 #define INTERRUPT_EXTERNAL 0x500
38 #define INTERRUPT_ALIGNMENT 0x600
39 #define INTERRUPT_PROGRAM 0x700
40 #define INTERRUPT_SYSCALL 0xc00
41 #define INTERRUPT_TRACE 0xd00
44 #define INTERRUPT_FP_UNAVAIL 0x800
46 /* BookE/BookS/44x/8xx */
47 #define INTERRUPT_DECREMENTER 0x900
49 #ifndef INTERRUPT_PERFMON
50 #define INTERRUPT_PERFMON 0x0
54 #define INTERRUPT_SOFT_EMU_8xx 0x1000
55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100
56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
63 #define INTERRUPT_INST_TLB_MISS_603 0x1000
64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
69 #include <linux/context_tracking.h>
70 #include <linux/hardirq.h>
71 #include <asm/cputime.h>
72 #include <asm/firmware.h>
73 #include <asm/ftrace.h>
74 #include <asm/kprobes.h>
75 #include <asm/runlatch.h>
77 #ifdef CONFIG_PPC_BOOK3S_64
78 extern char __end_soft_masked[];
79 bool search_kernel_soft_mask_table(unsigned long addr);
80 unsigned long search_kernel_restart_table(unsigned long addr);
82 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
84 static inline bool is_implicit_soft_masked(struct pt_regs *regs)
86 if (regs->msr & MSR_PR)
89 if (regs->nip >= (unsigned long)__end_soft_masked)
92 return search_kernel_soft_mask_table(regs->nip);
95 static inline void srr_regs_clobbered(void)
97 local_paca->srr_valid = 0;
98 local_paca->hsrr_valid = 0;
101 static inline unsigned long search_kernel_restart_table(unsigned long addr)
106 static inline bool is_implicit_soft_masked(struct pt_regs *regs)
111 static inline void srr_regs_clobbered(void)
116 static inline void nap_adjust_return(struct pt_regs *regs)
118 #ifdef CONFIG_PPC_970_NAP
119 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
120 /* Can avoid a test-and-clear because NMIs do not call this */
121 clear_thread_local_flags(_TLF_NAPPING);
122 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
127 static inline void booke_restore_dbcr0(void)
129 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
130 unsigned long dbcr0 = current->thread.debug.dbcr0;
132 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
133 mtspr(SPRN_DBSR, -1);
134 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
139 static inline void interrupt_enter_prepare(struct pt_regs *regs)
142 if (!arch_irq_disabled_regs(regs))
143 trace_hardirqs_off();
148 kuap_save_and_lock(regs);
151 account_cpu_user_entry();
155 bool trace_enable = false;
157 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
158 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
161 irq_soft_mask_set(IRQS_ALL_DISABLED);
165 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
166 * Asynchronous interrupts get here with HARD_DIS set (see below), so
167 * this enables MSR[EE] for synchronous interrupts. IRQs remain
168 * soft-masked. The interrupt handler may later call
169 * interrupt_cond_local_irq_enable() to achieve a regular process
172 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
173 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
174 BUG_ON(!(regs->msr & MSR_EE));
180 /* Do this when RI=1 because it can cause SLB faults */
182 trace_hardirqs_off();
184 if (user_mode(regs)) {
186 CT_WARN_ON(ct_state() != CONTEXT_USER);
189 account_cpu_user_entry();
190 account_stolen_time();
192 kuap_save_and_lock(regs);
194 * CT_WARN_ON comes here via program_check_exception,
195 * so avoid recursion.
197 if (TRAP(regs) != INTERRUPT_PROGRAM) {
198 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
199 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
200 BUG_ON(is_implicit_soft_masked(regs));
203 /* Move this under a debugging check */
204 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) &&
205 arch_irq_disabled_regs(regs))
206 BUG_ON(search_kernel_restart_table(regs->nip));
208 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
209 BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
212 booke_restore_dbcr0();
216 * Care should be taken to note that interrupt_exit_prepare and
217 * interrupt_async_exit_prepare do not necessarily return immediately to
218 * regs context (e.g., if regs is usermode, we don't necessarily return to
219 * user mode). Other interrupts might be taken between here and return,
220 * context switch / preemption may occur in the exit path after this, or a
221 * signal may be delivered, etc.
223 * The real interrupt exit code is platform specific, e.g.,
224 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
226 * However interrupt_nmi_exit_prepare does return directly to regs, because
227 * NMIs do not do "exit work" or replay soft-masked interrupts.
229 static inline void interrupt_exit_prepare(struct pt_regs *regs)
233 static inline void interrupt_async_enter_prepare(struct pt_regs *regs)
236 /* Ensure interrupt_enter_prepare does not enable MSR[EE] */
237 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
239 interrupt_enter_prepare(regs);
240 #ifdef CONFIG_PPC_BOOK3S_64
242 * RI=1 is set by interrupt_enter_prepare, so this thread flags access
243 * has to come afterward (it can cause SLB faults).
245 if (cpu_has_feature(CPU_FTR_CTRL) &&
246 !test_thread_local_flags(_TLF_RUNLATCH))
247 __ppc64_runlatch_on();
252 static inline void interrupt_async_exit_prepare(struct pt_regs *regs)
255 * Adjust at exit so the main handler sees the true NIA. This must
256 * come before irq_exit() because irq_exit can enable interrupts, and
257 * if another interrupt is taken before nap_adjust_return has run
258 * here, then that interrupt would return directly to idle nap return.
260 nap_adjust_return(regs);
263 interrupt_exit_prepare(regs);
266 struct interrupt_nmi_state {
275 static inline bool nmi_disables_ftrace(struct pt_regs *regs)
277 /* Allow DEC and PMI to be traced when they are soft-NMI */
278 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
279 if (TRAP(regs) == INTERRUPT_DECREMENTER)
281 if (TRAP(regs) == INTERRUPT_PERFMON)
284 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
285 if (TRAP(regs) == INTERRUPT_PERFMON)
292 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
295 state->irq_soft_mask = local_paca->irq_soft_mask;
296 state->irq_happened = local_paca->irq_happened;
297 state->softe = regs->softe;
300 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
301 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
302 * because that goes through irq tracing which we don't want in NMI.
304 local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
305 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
307 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
309 * Adjust regs->softe to be soft-masked if it had not been
310 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
311 * not yet set disabled), or if it was in an implicit soft
312 * masked state. This makes arch_irq_disabled_regs(regs)
313 * behave as expected.
315 regs->softe = IRQS_ALL_DISABLED;
320 /* Don't do any per-CPU operations until interrupt state is fixed */
322 if (nmi_disables_ftrace(regs)) {
323 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
324 this_cpu_set_ftrace_enabled(0);
328 /* If data relocations are enabled, it's safe to use nmi_enter() */
329 if (mfmsr() & MSR_DR) {
335 * But do not use nmi_enter() for pseries hash guest taking a real-mode
336 * NMI because not everything it touches is within the RMA limit.
338 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
339 firmware_has_feature(FW_FEATURE_LPAR) &&
344 * Likewise, don't use it if we have some form of instrumentation (like
345 * KASAN shadow) that is not safe to access in real mode (even on radix)
347 if (IS_ENABLED(CONFIG_KASAN))
350 /* Otherwise, it should be safe to call it */
354 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
356 if (mfmsr() & MSR_DR) {
357 // nmi_exit if relocations are on
359 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
360 firmware_has_feature(FW_FEATURE_LPAR) &&
362 // no nmi_exit for a pseries hash guest taking a real mode exception
363 } else if (IS_ENABLED(CONFIG_KASAN)) {
364 // no nmi_exit for KASAN in real mode
370 * nmi does not call nap_adjust_return because nmi should not create
371 * new work to do (must use irq_work for that).
375 #ifdef CONFIG_PPC_BOOK3S
376 if (arch_irq_disabled_regs(regs)) {
377 unsigned long rst = search_kernel_restart_table(regs->nip);
379 regs_set_return_ip(regs, rst);
383 if (nmi_disables_ftrace(regs))
384 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
386 /* Check we didn't change the pending interrupt mask. */
387 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
388 regs->softe = state->softe;
389 local_paca->irq_happened = state->irq_happened;
390 local_paca->irq_soft_mask = state->irq_soft_mask;
395 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
396 * function definition. The reason for this is the noinstr section is placed
397 * after the main text section, i.e., very far away from the interrupt entry
398 * asm. That creates problems with fitting linker stubs when building large
401 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
404 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
405 * @func: Function name of the entry point
406 * @returns: Returns a value back to asm caller
408 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
409 __visible long func(struct pt_regs *regs)
412 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
413 * @func: Function name of the entry point
414 * @returns: Returns a value back to asm caller
416 * @func is called from ASM entry code.
418 * This is a plain function which does no tracing, reconciling, etc.
419 * The macro is written so it acts as function definition. Append the
420 * body with a pair of curly brackets.
422 * raw interrupt handlers must not enable or disable interrupts, or
423 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
424 * not be advisable either, although may be possible in a pinch, the
425 * trace will look odd at least.
427 * A raw handler may call one of the other interrupt handler functions
428 * to be converted into that interrupt context without these restrictions.
430 * On PPC64, _RAW handlers may return with fast_interrupt_return.
432 * Specific handlers may have additional restrictions.
434 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \
435 static __always_inline __no_sanitize_address __no_kcsan long \
436 ____##func(struct pt_regs *regs); \
438 interrupt_handler long func(struct pt_regs *regs) \
442 __hard_RI_enable(); \
444 ret = ____##func (regs); \
448 NOKPROBE_SYMBOL(func); \
450 static __always_inline __no_sanitize_address __no_kcsan long \
451 ____##func(struct pt_regs *regs)
454 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
455 * @func: Function name of the entry point
457 #define DECLARE_INTERRUPT_HANDLER(func) \
458 __visible void func(struct pt_regs *regs)
461 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
462 * @func: Function name of the entry point
464 * @func is called from ASM entry code.
466 * The macro is written so it acts as function definition. Append the
467 * body with a pair of curly brackets.
469 #define DEFINE_INTERRUPT_HANDLER(func) \
470 static __always_inline void ____##func(struct pt_regs *regs); \
472 interrupt_handler void func(struct pt_regs *regs) \
474 interrupt_enter_prepare(regs); \
478 interrupt_exit_prepare(regs); \
480 NOKPROBE_SYMBOL(func); \
482 static __always_inline void ____##func(struct pt_regs *regs)
485 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
486 * @func: Function name of the entry point
487 * @returns: Returns a value back to asm caller
489 #define DECLARE_INTERRUPT_HANDLER_RET(func) \
490 __visible long func(struct pt_regs *regs)
493 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
494 * @func: Function name of the entry point
495 * @returns: Returns a value back to asm caller
497 * @func is called from ASM entry code.
499 * The macro is written so it acts as function definition. Append the
500 * body with a pair of curly brackets.
502 #define DEFINE_INTERRUPT_HANDLER_RET(func) \
503 static __always_inline long ____##func(struct pt_regs *regs); \
505 interrupt_handler long func(struct pt_regs *regs) \
509 interrupt_enter_prepare(regs); \
511 ret = ____##func (regs); \
513 interrupt_exit_prepare(regs); \
517 NOKPROBE_SYMBOL(func); \
519 static __always_inline long ____##func(struct pt_regs *regs)
522 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
523 * @func: Function name of the entry point
525 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
526 __visible void func(struct pt_regs *regs)
529 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
530 * @func: Function name of the entry point
532 * @func is called from ASM entry code.
534 * The macro is written so it acts as function definition. Append the
535 * body with a pair of curly brackets.
537 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
538 static __always_inline void ____##func(struct pt_regs *regs); \
540 interrupt_handler void func(struct pt_regs *regs) \
542 interrupt_async_enter_prepare(regs); \
546 interrupt_async_exit_prepare(regs); \
548 NOKPROBE_SYMBOL(func); \
550 static __always_inline void ____##func(struct pt_regs *regs)
553 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
554 * @func: Function name of the entry point
555 * @returns: Returns a value back to asm caller
557 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
558 __visible long func(struct pt_regs *regs)
561 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
562 * @func: Function name of the entry point
563 * @returns: Returns a value back to asm caller
565 * @func is called from ASM entry code.
567 * The macro is written so it acts as function definition. Append the
568 * body with a pair of curly brackets.
570 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \
571 static __always_inline __no_sanitize_address __no_kcsan long \
572 ____##func(struct pt_regs *regs); \
574 interrupt_handler long func(struct pt_regs *regs) \
576 struct interrupt_nmi_state state; \
579 interrupt_nmi_enter_prepare(regs, &state); \
581 ret = ____##func (regs); \
583 interrupt_nmi_exit_prepare(regs, &state); \
587 NOKPROBE_SYMBOL(func); \
589 static __always_inline __no_sanitize_address __no_kcsan long \
590 ____##func(struct pt_regs *regs)
593 /* Interrupt handlers */
595 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
596 #ifdef CONFIG_PPC_BOOK3S_64
597 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
599 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
600 DECLARE_INTERRUPT_HANDLER(SMIException);
601 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
602 DECLARE_INTERRUPT_HANDLER(unknown_exception);
603 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
604 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
605 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
606 DECLARE_INTERRUPT_HANDLER(RunModeException);
607 DECLARE_INTERRUPT_HANDLER(single_step_exception);
608 DECLARE_INTERRUPT_HANDLER(program_check_exception);
609 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
610 DECLARE_INTERRUPT_HANDLER(alignment_exception);
611 DECLARE_INTERRUPT_HANDLER(StackOverflow);
612 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
613 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
614 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
615 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
616 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
617 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
618 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
619 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
620 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
621 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
622 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
623 DECLARE_INTERRUPT_HANDLER(DebugException);
624 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
625 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
626 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
627 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
628 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
629 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
632 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
633 DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt);
636 DECLARE_INTERRUPT_HANDLER(do_hash_fault);
639 DECLARE_INTERRUPT_HANDLER(do_page_fault);
640 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
643 DECLARE_INTERRUPT_HANDLER(do_break);
646 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
649 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
650 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
652 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
655 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
657 void __noreturn unrecoverable_exception(struct pt_regs *regs);
659 void replay_system_reset(void);
660 void replay_soft_interrupts(void);
662 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
664 if (!arch_irq_disabled_regs(regs))
668 long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8,
669 unsigned long r0, struct pt_regs *regs);
670 notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
671 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
672 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
674 unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
675 unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
676 unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
679 #endif /* __ASSEMBLY__ */
681 #endif /* _ASM_POWERPC_INTERRUPT_H */