1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
18 * PACA flags in paca->irq_happened.
20 * This bits are set when interrupts occur while soft-disabled
21 * and allow a proper replay.
23 * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
24 * always in synch with the MSR[EE] state, except:
25 * - A window in interrupt entry, where hardware disables MSR[EE] and that
26 * must be "reconciled" with the soft mask state.
27 * - NMI interrupts that hit in awkward places, until they fix the state.
28 * - When local irqs are being enabled and state is being fixed up.
29 * - When returning from an interrupt there are some windows where this
30 * can become out of synch, but gets fixed before the RFI or before
31 * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
33 #define PACA_IRQ_HARD_DIS 0x01
34 #define PACA_IRQ_DBELL 0x02
35 #define PACA_IRQ_EE 0x04
36 #define PACA_IRQ_DEC 0x08 /* Or FIT */
37 #define PACA_IRQ_HMI 0x10
38 #define PACA_IRQ_PMI 0x20
41 * Some soft-masked interrupts must be hard masked until they are replayed
42 * (e.g., because the soft-masked handler does not clear the exception).
44 #ifdef CONFIG_PPC_BOOK3S
45 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
47 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
50 #endif /* CONFIG_PPC64 */
53 * flags for paca->irq_soft_mask
55 #define IRQS_ENABLED 0
56 #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
57 #define IRQS_PMI_DISABLED 2
58 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
62 static inline void __hard_irq_enable(void)
64 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
66 else if (IS_ENABLED(CONFIG_PPC_8xx))
68 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
69 __mtmsrd(MSR_EE | MSR_RI, 1);
71 mtmsr(mfmsr() | MSR_EE);
74 static inline void __hard_irq_disable(void)
76 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
78 else if (IS_ENABLED(CONFIG_PPC_8xx))
80 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
83 mtmsr(mfmsr() & ~MSR_EE);
86 static inline void __hard_EE_RI_disable(void)
88 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
90 else if (IS_ENABLED(CONFIG_PPC_8xx))
92 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
95 mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
98 static inline void __hard_RI_enable(void)
100 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
103 if (IS_ENABLED(CONFIG_PPC_8xx))
105 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
108 mtmsr(mfmsr() | MSR_RI);
112 #include <asm/paca.h>
114 static inline notrace unsigned long irq_soft_mask_return(void)
116 return READ_ONCE(local_paca->irq_soft_mask);
120 * The "memory" clobber acts as both a compiler barrier
121 * for the critical section and as a clobber because
122 * we changed paca->irq_soft_mask
124 static inline notrace void irq_soft_mask_set(unsigned long mask)
127 * The irq mask must always include the STD bit if any are set.
129 * and interrupts don't get replayed until the standard
130 * interrupt (local_irq_disable()) is unmasked.
132 * Other masks must only provide additional masking beyond
133 * the standard, and they are also not replayed until the
134 * standard interrupt becomes unmasked.
136 * This could be changed, but it will require partial
137 * unmasks to be replayed, among other things. For now, take
138 * the simple approach.
140 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
141 WARN_ON(mask && !(mask & IRQS_DISABLED));
143 WRITE_ONCE(local_paca->irq_soft_mask, mask);
147 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
149 unsigned long flags = irq_soft_mask_return();
151 irq_soft_mask_set(mask);
156 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
158 unsigned long flags = irq_soft_mask_return();
160 irq_soft_mask_set(flags | mask);
165 static inline unsigned long arch_local_save_flags(void)
167 return irq_soft_mask_return();
170 static inline void arch_local_irq_disable(void)
172 irq_soft_mask_set(IRQS_DISABLED);
175 extern void arch_local_irq_restore(unsigned long);
177 static inline void arch_local_irq_enable(void)
179 arch_local_irq_restore(IRQS_ENABLED);
182 static inline unsigned long arch_local_irq_save(void)
184 return irq_soft_mask_set_return(IRQS_DISABLED);
187 static inline bool arch_irqs_disabled_flags(unsigned long flags)
189 return flags & IRQS_DISABLED;
192 static inline bool arch_irqs_disabled(void)
194 return arch_irqs_disabled_flags(arch_local_save_flags());
197 static inline void set_pmi_irq_pending(void)
200 * Invoked from PMU callback functions to set PMI bit in the paca.
201 * This has to be called with irq's disabled (via hard_irq_disable()).
203 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
204 WARN_ON_ONCE(mfmsr() & MSR_EE);
206 get_paca()->irq_happened |= PACA_IRQ_PMI;
209 static inline void clear_pmi_irq_pending(void)
212 * Invoked from PMU callback functions to clear the pending PMI bit
215 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
216 WARN_ON_ONCE(mfmsr() & MSR_EE);
218 get_paca()->irq_happened &= ~PACA_IRQ_PMI;
221 static inline bool pmi_irq_pending(void)
224 * Invoked from PMU callback functions to check if there is a pending
225 * PMI bit in the paca.
227 if (get_paca()->irq_happened & PACA_IRQ_PMI)
233 #ifdef CONFIG_PPC_BOOK3S
235 * To support disabling and enabling of irq with PMI, set of
236 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
237 * functions are added. These macros are implemented using generic
238 * linux local_irq_* code from include/linux/irqflags.h.
240 #define raw_local_irq_pmu_save(flags) \
242 typecheck(unsigned long, flags); \
243 flags = irq_soft_mask_or_return(IRQS_DISABLED | \
244 IRQS_PMI_DISABLED); \
247 #define raw_local_irq_pmu_restore(flags) \
249 typecheck(unsigned long, flags); \
250 arch_local_irq_restore(flags); \
253 #ifdef CONFIG_TRACE_IRQFLAGS
254 #define powerpc_local_irq_pmu_save(flags) \
256 raw_local_irq_pmu_save(flags); \
257 if (!raw_irqs_disabled_flags(flags)) \
258 trace_hardirqs_off(); \
260 #define powerpc_local_irq_pmu_restore(flags) \
262 if (!raw_irqs_disabled_flags(flags)) \
263 trace_hardirqs_on(); \
264 raw_local_irq_pmu_restore(flags); \
267 #define powerpc_local_irq_pmu_save(flags) \
269 raw_local_irq_pmu_save(flags); \
271 #define powerpc_local_irq_pmu_restore(flags) \
273 raw_local_irq_pmu_restore(flags); \
275 #endif /* CONFIG_TRACE_IRQFLAGS */
277 #endif /* CONFIG_PPC_BOOK3S */
279 #define hard_irq_disable() do { \
280 unsigned long flags; \
281 __hard_irq_disable(); \
282 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
284 if (!arch_irqs_disabled_flags(flags)) { \
285 WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\
286 trace_hardirqs_off(); \
290 static inline bool __lazy_irq_pending(u8 irq_happened)
292 return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
296 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
298 static inline bool lazy_irq_pending(void)
300 return __lazy_irq_pending(get_paca()->irq_happened);
304 * Check if a lazy IRQ is pending, with no debugging checks.
305 * Should be called with IRQs hard disabled.
306 * For use in RI disabled code or other constrained situations.
308 static inline bool lazy_irq_pending_nocheck(void)
310 return __lazy_irq_pending(local_paca->irq_happened);
313 bool power_pmu_wants_prompt_pmi(void);
316 * This is called by asynchronous interrupts to check whether to
317 * conditionally re-enable hard interrupts after having cleared
318 * the source of the interrupt. They are kept disabled if there
319 * is a different soft-masked interrupt pending that requires hard
322 static inline bool should_hard_irq_enable(void)
324 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
325 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
326 WARN_ON(mfmsr() & MSR_EE);
329 if (!IS_ENABLED(CONFIG_PERF_EVENTS))
332 * If the PMU is not running, there is not much reason to enable
333 * MSR[EE] in irq handlers because any interrupts would just be
336 * TODO: Add test for 64e
338 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
341 if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
348 * Do the hard enabling, only call this if should_hard_irq_enable is true.
350 static inline void do_hard_irq_enable(void)
352 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
353 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
354 WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
355 WARN_ON(mfmsr() & MSR_EE);
358 * This allows PMI interrupts (and watchdog soft-NMIs) through.
359 * There is no other reason to enable this way.
361 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
365 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
367 return (regs->softe & IRQS_DISABLED);
370 extern bool prep_irq_for_idle(void);
371 extern bool prep_irq_for_idle_irqsoff(void);
372 extern void irq_set_pending_from_srr1(unsigned long srr1);
374 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
376 extern void force_external_irq_replay(void);
378 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
382 #else /* CONFIG_PPC64 */
384 static inline notrace unsigned long irq_soft_mask_return(void)
389 static inline unsigned long arch_local_save_flags(void)
394 static inline void arch_local_irq_restore(unsigned long flags)
396 if (IS_ENABLED(CONFIG_BOOKE))
402 static inline unsigned long arch_local_irq_save(void)
404 unsigned long flags = arch_local_save_flags();
406 if (IS_ENABLED(CONFIG_BOOKE))
408 else if (IS_ENABLED(CONFIG_PPC_8xx))
411 mtmsr(flags & ~MSR_EE);
416 static inline void arch_local_irq_disable(void)
418 __hard_irq_disable();
421 static inline void arch_local_irq_enable(void)
426 static inline bool arch_irqs_disabled_flags(unsigned long flags)
428 return (flags & MSR_EE) == 0;
431 static inline bool arch_irqs_disabled(void)
433 return arch_irqs_disabled_flags(arch_local_save_flags());
436 #define hard_irq_disable() arch_local_irq_disable()
438 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
440 return !(regs->msr & MSR_EE);
443 static __always_inline bool should_hard_irq_enable(void)
448 static inline void do_hard_irq_enable(void)
453 static inline void clear_pmi_irq_pending(void) { }
454 static inline void set_pmi_irq_pending(void) { }
455 static inline bool pmi_irq_pending(void) { return false; }
457 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
460 #endif /* CONFIG_PPC64 */
462 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
464 #endif /* __ASSEMBLY__ */
465 #endif /* __KERNEL__ */
466 #endif /* _ASM_POWERPC_HW_IRQ_H */