1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan
9 * Adapted for Power Macintosh by Paul Mackerras
10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
20 * mask register (of which only 16 are defined), hence the weird shifting
21 * and complement of the cached_irq_mask. I want to be able to stuff
22 * this right into the SIU SMASK register.
23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
24 * to reduce code space and undefined function references.
29 #include <linux/export.h>
30 #include <linux/threads.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/signal.h>
33 #include <linux/sched.h>
34 #include <linux/ptrace.h>
35 #include <linux/ioport.h>
36 #include <linux/interrupt.h>
37 #include <linux/timex.h>
38 #include <linux/init.h>
39 #include <linux/slab.h>
40 #include <linux/delay.h>
41 #include <linux/irq.h>
42 #include <linux/seq_file.h>
43 #include <linux/cpumask.h>
44 #include <linux/profile.h>
45 #include <linux/bitops.h>
46 #include <linux/list.h>
47 #include <linux/radix-tree.h>
48 #include <linux/mutex.h>
49 #include <linux/pci.h>
50 #include <linux/debugfs.h>
52 #include <linux/of_irq.h>
53 #include <linux/vmalloc.h>
54 #include <linux/pgtable.h>
56 #include <linux/uaccess.h>
57 #include <asm/interrupt.h>
60 #include <asm/cache.h>
62 #include <asm/ptrace.h>
63 #include <asm/machdep.h>
66 #include <asm/livepatch.h>
67 #include <asm/asm-prototypes.h>
68 #include <asm/hw_irq.h>
69 #include <asm/softirq_stack.h>
73 #include <asm/firmware.h>
74 #include <asm/lv1call.h>
75 #include <asm/dbell.h>
77 #define CREATE_TRACE_POINTS
78 #include <asm/trace.h>
79 #include <asm/cpu_has_feature.h>
81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
82 EXPORT_PER_CPU_SYMBOL(irq_stat);
85 atomic_t ppc_n_lost_interrupts;
88 extern int tau_initialized;
89 u32 tau_interrupts(unsigned long cpu);
91 #endif /* CONFIG_PPC32 */
95 int distribute_irqs = 1;
97 static inline notrace unsigned long get_irq_happened(void)
99 unsigned long happened;
101 __asm__ __volatile__("lbz %0,%1(13)"
102 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
107 void replay_soft_interrupts(void)
112 * Be careful here, calling these interrupt handlers can cause
113 * softirqs to be raised, which they may run when calling irq_exit,
114 * which will cause local_irq_enable() to be run, which can then
115 * recurse into this function. Don't keep any state across
116 * interrupt handler calls which may change underneath us.
118 * We use local_paca rather than get_paca() to avoid all the
119 * debug_smp_processor_id() business in this low level function.
122 ppc_save_regs(®s);
123 regs.softe = IRQS_ENABLED;
127 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
128 WARN_ON_ONCE(mfmsr() & MSR_EE);
131 * Force the delivery of pending soft-disabled interrupts on PS3.
132 * Any HV call will have this side effect.
134 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
136 lv1_get_version_info(&tmp, &tmp2);
140 * Check if an hypervisor Maintenance interrupt happened.
141 * This is a higher priority interrupt than the others, so
144 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
145 local_paca->irq_happened &= ~PACA_IRQ_HMI;
146 regs.trap = INTERRUPT_HMI;
147 handle_hmi_exception(®s);
148 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
152 if (local_paca->irq_happened & PACA_IRQ_DEC) {
153 local_paca->irq_happened &= ~PACA_IRQ_DEC;
154 regs.trap = INTERRUPT_DECREMENTER;
155 timer_interrupt(®s);
156 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
160 if (local_paca->irq_happened & PACA_IRQ_EE) {
161 local_paca->irq_happened &= ~PACA_IRQ_EE;
162 regs.trap = INTERRUPT_EXTERNAL;
164 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
168 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
169 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
170 regs.trap = INTERRUPT_DOORBELL;
171 doorbell_exception(®s);
172 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
176 /* Book3E does not support soft-masking PMI interrupts */
177 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
178 local_paca->irq_happened &= ~PACA_IRQ_PMI;
179 regs.trap = INTERRUPT_PERFMON;
180 performance_monitor_exception(®s);
181 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
185 if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
187 * We are responding to the next interrupt, so interrupt-off
188 * latencies should be reset here.
191 trace_hardirqs_off();
196 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
197 static inline void replay_soft_interrupts_irqrestore(void)
199 unsigned long kuap_state = get_kuap();
202 * Check if anything calls local_irq_enable/restore() when KUAP is
203 * disabled (user access enabled). We handle that case here by saving
204 * and re-locking AMR but we shouldn't get here in the first place,
207 kuap_assert_locked();
209 if (kuap_state != AMR_KUAP_BLOCKED)
210 set_kuap(AMR_KUAP_BLOCKED);
212 replay_soft_interrupts();
214 if (kuap_state != AMR_KUAP_BLOCKED)
215 set_kuap(kuap_state);
218 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
221 #ifdef CONFIG_CC_HAS_ASM_GOTO
222 notrace void arch_local_irq_restore(unsigned long mask)
224 unsigned char irq_happened;
226 /* Write the new soft-enabled value if it is a disable */
228 irq_soft_mask_set(mask);
233 * After the stb, interrupts are unmasked and there are no interrupts
234 * pending replay. The restart sequence makes this atomic with
235 * respect to soft-masked interrupts. If this was just a simple code
236 * sequence, a soft-masked interrupt could become pending right after
237 * the comparison and before the stb.
239 * This allows interrupts to be unmasked without hard disabling, and
240 * also without new hard interrupts coming in ahead of pending ones.
246 " bne %l[happened] \n"
249 RESTART_TABLE(1b, 2b, 1b)
250 : : "i" (offsetof(struct paca_struct, irq_happened)),
251 "i" (offsetof(struct paca_struct, irq_soft_mask))
255 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
256 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
261 irq_happened = get_irq_happened();
262 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
263 WARN_ON_ONCE(!irq_happened);
265 if (irq_happened == PACA_IRQ_HARD_DIS) {
266 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
267 WARN_ON_ONCE(mfmsr() & MSR_EE);
268 irq_soft_mask_set(IRQS_ENABLED);
269 local_paca->irq_happened = 0;
274 /* Have interrupts to replay, need to hard disable first */
275 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
276 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
277 if (!(mfmsr() & MSR_EE)) {
279 * An interrupt could have come in and cleared
280 * MSR[EE] and set IRQ_HARD_DIS, so check
281 * IRQ_HARD_DIS again and warn if it is still
284 irq_happened = get_irq_happened();
285 WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
288 __hard_irq_disable();
289 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
291 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
292 if (WARN_ON_ONCE(mfmsr() & MSR_EE))
293 __hard_irq_disable();
298 * Disable preempt here, so that the below preempt_enable will
299 * perform resched if required (a replayed interrupt may set
303 irq_soft_mask_set(IRQS_ALL_DISABLED);
304 trace_hardirqs_off();
306 replay_soft_interrupts_irqrestore();
307 local_paca->irq_happened = 0;
310 irq_soft_mask_set(IRQS_ENABLED);
315 notrace void arch_local_irq_restore(unsigned long mask)
317 unsigned char irq_happened;
319 /* Write the new soft-enabled value */
320 irq_soft_mask_set(mask);
325 * From this point onward, we can take interrupts, preempt,
326 * etc... unless we got hard-disabled. We check if an event
327 * happened. If none happened, we know we can just return.
329 * We may have preempted before the check below, in which case
330 * we are checking the "new" CPU instead of the old one. This
331 * is only a problem if an event happened on the "old" CPU.
333 * External interrupt events will have caused interrupts to
334 * be hard-disabled, so there is no problem, we
335 * cannot have preempted.
337 irq_happened = get_irq_happened();
339 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
340 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
344 /* We need to hard disable to replay. */
345 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
346 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
347 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
348 __hard_irq_disable();
349 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
352 * We should already be hard disabled here. We had bugs
353 * where that wasn't the case so let's dbl check it and
354 * warn if we are wrong. Only do that when IRQ tracing
355 * is enabled as mfmsr() can be costly.
357 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
358 if (WARN_ON_ONCE(mfmsr() & MSR_EE))
359 __hard_irq_disable();
362 if (irq_happened == PACA_IRQ_HARD_DIS) {
363 local_paca->irq_happened = 0;
370 * Disable preempt here, so that the below preempt_enable will
371 * perform resched if required (a replayed interrupt may set
375 irq_soft_mask_set(IRQS_ALL_DISABLED);
376 trace_hardirqs_off();
378 replay_soft_interrupts_irqrestore();
379 local_paca->irq_happened = 0;
382 irq_soft_mask_set(IRQS_ENABLED);
387 EXPORT_SYMBOL(arch_local_irq_restore);
390 * This is a helper to use when about to go into idle low-power
391 * when the latter has the side effect of re-enabling interrupts
392 * (such as calling H_CEDE under pHyp).
394 * You call this function with interrupts soft-disabled (this is
395 * already the case when ppc_md.power_save is called). The function
396 * will return whether to enter power save or just return.
398 * In the former case, it will have notified lockdep of interrupts
399 * being re-enabled and generally sanitized the lazy irq state,
400 * and in the latter case it will leave with interrupts hard
401 * disabled and marked as such, so the local_irq_enable() call
402 * in arch_cpu_idle() will properly re-enable everything.
404 bool prep_irq_for_idle(void)
407 * First we need to hard disable to ensure no interrupt
408 * occurs before we effectively enter the low power state
410 __hard_irq_disable();
411 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
414 * If anything happened while we were soft-disabled,
415 * we return now and do not enter the low power state.
417 if (lazy_irq_pending())
420 /* Tell lockdep we are about to re-enable */
424 * Mark interrupts as soft-enabled and clear the
425 * PACA_IRQ_HARD_DIS from the pending mask since we
426 * are about to hard enable as well as a side effect
427 * of entering the low power state.
429 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
430 irq_soft_mask_set(IRQS_ENABLED);
432 /* Tell the caller to enter the low power state */
436 #ifdef CONFIG_PPC_BOOK3S
438 * This is for idle sequences that return with IRQs off, but the
439 * idle state itself wakes on interrupt. Tell the irq tracer that
440 * IRQs are enabled for the duration of idle so it does not get long
441 * off times. Must be paired with fini_irq_for_idle_irqsoff.
443 bool prep_irq_for_idle_irqsoff(void)
445 WARN_ON(!irqs_disabled());
448 * First we need to hard disable to ensure no interrupt
449 * occurs before we effectively enter the low power state
451 __hard_irq_disable();
452 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
455 * If anything happened while we were soft-disabled,
456 * we return now and do not enter the low power state.
458 if (lazy_irq_pending())
461 /* Tell lockdep we are about to re-enable */
468 * Take the SRR1 wakeup reason, index into this table to find the
469 * appropriate irq_happened bit.
471 * Sytem reset exceptions taken in idle state also come through here,
472 * but they are NMI interrupts so do not need to wait for IRQs to be
473 * restored, and should be taken as early as practical. These are marked
474 * with 0xff in the table. The Power ISA specifies 0100b as the system
475 * reset interrupt reason.
477 #define IRQ_SYSTEM_RESET 0xff
479 static const u8 srr1_to_lazyirq[0x10] = {
491 void replay_system_reset(void)
495 ppc_save_regs(®s);
497 get_paca()->in_nmi = 1;
498 system_reset_exception(®s);
499 get_paca()->in_nmi = 0;
501 EXPORT_SYMBOL_GPL(replay_system_reset);
503 void irq_set_pending_from_srr1(unsigned long srr1)
505 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
506 u8 reason = srr1_to_lazyirq[idx];
509 * Take the system reset now, which is immediately after registers
510 * are restored from idle. It's an NMI, so interrupts need not be
511 * re-enabled before it is taken.
513 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
514 replay_system_reset();
518 if (reason == PACA_IRQ_DBELL) {
520 * When doorbell triggers a system reset wakeup, the message
521 * is not cleared, so if the doorbell interrupt is replayed
522 * and the IPI handled, the doorbell interrupt would still
523 * fire when EE is enabled.
525 * To avoid taking the superfluous doorbell interrupt,
526 * execute a msgclr here before the interrupt is replayed.
528 ppc_msgclr(PPC_DBELL_MSGTYPE);
532 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
533 * so this can be called unconditionally with the SRR1 wake
534 * reason as returned by the idle code, which uses 0 to mean no
537 * If a future CPU was to designate this as an interrupt reason,
538 * then a new index for no interrupt must be assigned.
540 local_paca->irq_happened |= reason;
542 #endif /* CONFIG_PPC_BOOK3S */
545 * Force a replay of the external interrupt handler on this CPU.
547 void force_external_irq_replay(void)
550 * This must only be called with interrupts soft-disabled,
551 * the replay will happen when re-enabling.
553 WARN_ON(!arch_irqs_disabled());
556 * Interrupts must always be hard disabled before irq_happened is
557 * modified (to prevent lost update in case of interrupt between
560 __hard_irq_disable();
561 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
563 /* Indicate in the PACA that we have an interrupt to replay */
564 local_paca->irq_happened |= PACA_IRQ_EE;
567 #endif /* CONFIG_PPC64 */
569 int arch_show_interrupts(struct seq_file *p, int prec)
573 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
574 if (tau_initialized) {
575 seq_printf(p, "%*s: ", prec, "TAU");
576 for_each_online_cpu(j)
577 seq_printf(p, "%10u ", tau_interrupts(j));
578 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
580 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
582 seq_printf(p, "%*s: ", prec, "LOC");
583 for_each_online_cpu(j)
584 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
585 seq_printf(p, " Local timer interrupts for timer event device\n");
587 seq_printf(p, "%*s: ", prec, "BCT");
588 for_each_online_cpu(j)
589 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
590 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
592 seq_printf(p, "%*s: ", prec, "LOC");
593 for_each_online_cpu(j)
594 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
595 seq_printf(p, " Local timer interrupts for others\n");
597 seq_printf(p, "%*s: ", prec, "SPU");
598 for_each_online_cpu(j)
599 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
600 seq_printf(p, " Spurious interrupts\n");
602 seq_printf(p, "%*s: ", prec, "PMI");
603 for_each_online_cpu(j)
604 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
605 seq_printf(p, " Performance monitoring interrupts\n");
607 seq_printf(p, "%*s: ", prec, "MCE");
608 for_each_online_cpu(j)
609 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
610 seq_printf(p, " Machine check exceptions\n");
612 #ifdef CONFIG_PPC_BOOK3S_64
613 if (cpu_has_feature(CPU_FTR_HVMODE)) {
614 seq_printf(p, "%*s: ", prec, "HMI");
615 for_each_online_cpu(j)
616 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
617 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
621 seq_printf(p, "%*s: ", prec, "NMI");
622 for_each_online_cpu(j)
623 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
624 seq_printf(p, " System Reset interrupts\n");
626 #ifdef CONFIG_PPC_WATCHDOG
627 seq_printf(p, "%*s: ", prec, "WDG");
628 for_each_online_cpu(j)
629 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
630 seq_printf(p, " Watchdog soft-NMI interrupts\n");
633 #ifdef CONFIG_PPC_DOORBELL
634 if (cpu_has_feature(CPU_FTR_DBELL)) {
635 seq_printf(p, "%*s: ", prec, "DBL");
636 for_each_online_cpu(j)
637 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
638 seq_printf(p, " Doorbell interrupts\n");
648 u64 arch_irq_stat_cpu(unsigned int cpu)
650 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
652 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
653 sum += per_cpu(irq_stat, cpu).pmu_irqs;
654 sum += per_cpu(irq_stat, cpu).mce_exceptions;
655 sum += per_cpu(irq_stat, cpu).spurious_irqs;
656 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
657 #ifdef CONFIG_PPC_BOOK3S_64
658 sum += paca_ptrs[cpu]->hmi_irqs;
660 sum += per_cpu(irq_stat, cpu).sreset_irqs;
661 #ifdef CONFIG_PPC_WATCHDOG
662 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
664 #ifdef CONFIG_PPC_DOORBELL
665 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
671 static inline void check_stack_overflow(void)
675 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
678 sp = current_stack_pointer & (THREAD_SIZE - 1);
680 /* check for stack overflow: is there less than 2KB free? */
681 if (unlikely(sp < 2048)) {
682 pr_err("do_IRQ: stack overflow: %ld\n", sp);
687 static __always_inline void call_do_softirq(const void *sp)
689 /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
691 PPC_STLU " %%r1, %[offset](%[sp]) ;"
694 PPC_LL " %%r1, 0(%%r1) ;"
697 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
698 [callee] "i" (__do_softirq)
700 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
701 "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
706 static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
708 register unsigned long r3 asm("r3") = (unsigned long)regs;
710 /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
712 PPC_STLU " %%r1, %[offset](%[sp]) ;"
715 PPC_LL " %%r1, 0(%%r1) ;"
719 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
720 [callee] "i" (__do_irq)
722 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
723 "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
728 void __do_irq(struct pt_regs *regs)
732 trace_irq_entry(regs);
735 * Query the platform PIC for the interrupt & ack it.
737 * This will typically lower the interrupt line to the CPU
739 irq = ppc_md.get_irq();
741 /* We can hard enable interrupts now to allow perf interrupts */
742 may_hard_irq_enable();
744 /* And finally process it */
746 __this_cpu_inc(irq_stat.spurious_irqs);
748 generic_handle_irq(irq);
750 trace_irq_exit(regs);
753 void __do_IRQ(struct pt_regs *regs)
755 struct pt_regs *old_regs = set_irq_regs(regs);
756 void *cursp, *irqsp, *sirqsp;
758 /* Switch to the irq stack to handle this */
759 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
760 irqsp = hardirq_ctx[raw_smp_processor_id()];
761 sirqsp = softirq_ctx[raw_smp_processor_id()];
763 check_stack_overflow();
765 /* Already there ? */
766 if (unlikely(cursp == irqsp || cursp == sirqsp)) {
768 set_irq_regs(old_regs);
771 /* Switch stack and call */
772 call_do_irq(regs, irqsp);
774 set_irq_regs(old_regs);
777 DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
782 static void *__init alloc_vm_stack(void)
784 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
785 NUMA_NO_NODE, (void *)_RET_IP_);
788 static void __init vmap_irqstack_init(void)
792 for_each_possible_cpu(i) {
793 softirq_ctx[i] = alloc_vm_stack();
794 hardirq_ctx[i] = alloc_vm_stack();
799 void __init init_IRQ(void)
801 if (IS_ENABLED(CONFIG_VMAP_STACK))
802 vmap_irqstack_init();
808 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
809 void *critirq_ctx[NR_CPUS] __read_mostly;
810 void *dbgirq_ctx[NR_CPUS] __read_mostly;
811 void *mcheckirq_ctx[NR_CPUS] __read_mostly;
814 void *softirq_ctx[NR_CPUS] __read_mostly;
815 void *hardirq_ctx[NR_CPUS] __read_mostly;
817 void do_softirq_own_stack(void)
819 call_do_softirq(softirq_ctx[smp_processor_id()]);
822 irq_hw_number_t virq_to_hw(unsigned int virq)
824 struct irq_data *irq_data = irq_get_irq_data(virq);
825 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
827 EXPORT_SYMBOL_GPL(virq_to_hw);
830 int irq_choose_cpu(const struct cpumask *mask)
834 if (cpumask_equal(mask, cpu_online_mask)) {
835 static int irq_rover;
836 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
839 /* Round-robin distribution... */
841 raw_spin_lock_irqsave(&irq_rover_lock, flags);
843 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
844 if (irq_rover >= nr_cpu_ids)
845 irq_rover = cpumask_first(cpu_online_mask);
849 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
851 cpuid = cpumask_first_and(mask, cpu_online_mask);
852 if (cpuid >= nr_cpu_ids)
856 return get_hard_smp_processor_id(cpuid);
859 int irq_choose_cpu(const struct cpumask *mask)
861 return hard_smp_processor_id();
866 static int __init setup_noirqdistrib(char *str)
872 __setup("noirqdistrib", setup_noirqdistrib);
873 #endif /* CONFIG_PPC64 */