1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan
9 * Adapted for Power Macintosh by Paul Mackerras
10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
20 * mask register (of which only 16 are defined), hence the weird shifting
21 * and complement of the cached_irq_mask. I want to be able to stuff
22 * this right into the SIU SMASK register.
23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
24 * to reduce code space and undefined function references.
29 #include <linux/export.h>
30 #include <linux/threads.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/signal.h>
33 #include <linux/sched.h>
34 #include <linux/ptrace.h>
35 #include <linux/ioport.h>
36 #include <linux/interrupt.h>
37 #include <linux/timex.h>
38 #include <linux/init.h>
39 #include <linux/slab.h>
40 #include <linux/delay.h>
41 #include <linux/irq.h>
42 #include <linux/seq_file.h>
43 #include <linux/cpumask.h>
44 #include <linux/profile.h>
45 #include <linux/bitops.h>
46 #include <linux/list.h>
47 #include <linux/radix-tree.h>
48 #include <linux/mutex.h>
49 #include <linux/pci.h>
50 #include <linux/debugfs.h>
52 #include <linux/of_irq.h>
53 #include <linux/vmalloc.h>
54 #include <linux/pgtable.h>
56 #include <linux/uaccess.h>
59 #include <asm/cache.h>
61 #include <asm/ptrace.h>
62 #include <asm/machdep.h>
65 #include <asm/livepatch.h>
66 #include <asm/asm-prototypes.h>
67 #include <asm/hw_irq.h>
71 #include <asm/firmware.h>
72 #include <asm/lv1call.h>
73 #include <asm/dbell.h>
75 #define CREATE_TRACE_POINTS
76 #include <asm/trace.h>
77 #include <asm/cpu_has_feature.h>
79 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80 EXPORT_PER_CPU_SYMBOL(irq_stat);
83 atomic_t ppc_n_lost_interrupts;
86 extern int tau_initialized;
87 u32 tau_interrupts(unsigned long cpu);
89 #endif /* CONFIG_PPC32 */
93 int distribute_irqs = 1;
95 static inline notrace unsigned long get_irq_happened(void)
97 unsigned long happened;
99 __asm__ __volatile__("lbz %0,%1(13)"
100 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
105 static inline notrace int decrementer_check_overflow(void)
107 u64 now = get_tb_or_rtc();
108 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
110 return now >= *next_tb;
113 #ifdef CONFIG_PPC_BOOK3E
115 /* This is called whenever we are re-enabling interrupts
116 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
117 * there's an EE, DEC or DBELL to generate.
119 * This is called in two contexts: From arch_local_irq_restore()
120 * before soft-enabling interrupts, and from the exception exit
121 * path when returning from an interrupt from a soft-disabled to
122 * a soft enabled context. In both case we have interrupts hard
125 * We take care of only clearing the bits we handled in the
126 * PACA irq_happened field since we can only re-emit one at a
127 * time and we don't want to "lose" one.
129 notrace unsigned int __check_irq_replay(void)
132 * We use local_paca rather than get_paca() to avoid all
133 * the debug_smp_processor_id() business in this low level
136 unsigned char happened = local_paca->irq_happened;
139 * We are responding to the next interrupt, so interrupt-off
140 * latencies should be reset here.
143 trace_hardirqs_off();
146 * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
147 * not be set, which means interrupts have only just been hard
148 * disabled as part of the local_irq_restore or interrupt return
149 * code. In that case, skip the decrementr check becaus it's
150 * expensive to read the TB.
152 * HARD_DIS then gets cleared here, but it's reconciled later.
153 * Either local_irq_disable will replay the interrupt and that
154 * will reconcile state like other hard interrupts. Or interrupt
155 * retur will replay the interrupt and in that case it sets
156 * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
158 if (happened & PACA_IRQ_HARD_DIS) {
159 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
162 * We may have missed a decrementer interrupt if hard disabled.
163 * Check the decrementer register in case we had a rollover
164 * while hard disabled.
166 if (!(happened & PACA_IRQ_DEC)) {
167 if (decrementer_check_overflow()) {
168 local_paca->irq_happened |= PACA_IRQ_DEC;
169 happened |= PACA_IRQ_DEC;
174 if (happened & PACA_IRQ_DEC) {
175 local_paca->irq_happened &= ~PACA_IRQ_DEC;
179 if (happened & PACA_IRQ_EE) {
180 local_paca->irq_happened &= ~PACA_IRQ_EE;
185 * Check if an EPR external interrupt happened this bit is typically
186 * set if we need to handle another "edge" interrupt from within the
187 * MPIC "EPR" handler.
189 if (happened & PACA_IRQ_EE_EDGE) {
190 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
194 if (happened & PACA_IRQ_DBELL) {
195 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
199 /* There should be nothing left ! */
200 BUG_ON(local_paca->irq_happened != 0);
204 #endif /* CONFIG_PPC_BOOK3E */
206 void replay_soft_interrupts(void)
209 * We use local_paca rather than get_paca() to avoid all
210 * the debug_smp_processor_id() business in this low level
213 unsigned char happened = local_paca->irq_happened;
216 ppc_save_regs(®s);
217 regs.softe = IRQS_ENABLED;
220 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
221 WARN_ON_ONCE(mfmsr() & MSR_EE);
223 if (happened & PACA_IRQ_HARD_DIS) {
225 * We may have missed a decrementer interrupt if hard disabled.
226 * Check the decrementer register in case we had a rollover
227 * while hard disabled.
229 if (!(happened & PACA_IRQ_DEC)) {
230 if (decrementer_check_overflow())
231 happened |= PACA_IRQ_DEC;
236 * Force the delivery of pending soft-disabled interrupts on PS3.
237 * Any HV call will have this side effect.
239 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
241 lv1_get_version_info(&tmp, &tmp2);
245 * Check if an hypervisor Maintenance interrupt happened.
246 * This is a higher priority interrupt than the others, so
249 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
250 local_paca->irq_happened &= ~PACA_IRQ_HMI;
252 handle_hmi_exception(®s);
253 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
257 if (happened & PACA_IRQ_DEC) {
258 local_paca->irq_happened &= ~PACA_IRQ_DEC;
260 timer_interrupt(®s);
261 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
265 if (happened & PACA_IRQ_EE) {
266 local_paca->irq_happened &= ~PACA_IRQ_EE;
269 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
274 * Check if an EPR external interrupt happened this bit is typically
275 * set if we need to handle another "edge" interrupt from within the
276 * MPIC "EPR" handler.
278 if (IS_ENABLED(CONFIG_PPC_BOOK3E) && (happened & PACA_IRQ_EE_EDGE)) {
279 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
282 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
286 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
287 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
288 if (IS_ENABLED(CONFIG_PPC_BOOK3E))
292 doorbell_exception(®s);
293 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
297 /* Book3E does not support soft-masking PMI interrupts */
298 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
299 local_paca->irq_happened &= ~PACA_IRQ_PMI;
301 performance_monitor_exception(®s);
302 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
306 happened = local_paca->irq_happened;
307 if (happened & ~PACA_IRQ_HARD_DIS) {
309 * We are responding to the next interrupt, so interrupt-off
310 * latencies should be reset here.
313 trace_hardirqs_off();
318 notrace void arch_local_irq_restore(unsigned long mask)
320 unsigned char irq_happened;
322 /* Write the new soft-enabled value */
323 irq_soft_mask_set(mask);
328 * From this point onward, we can take interrupts, preempt,
329 * etc... unless we got hard-disabled. We check if an event
330 * happened. If none happened, we know we can just return.
332 * We may have preempted before the check below, in which case
333 * we are checking the "new" CPU instead of the old one. This
334 * is only a problem if an event happened on the "old" CPU.
336 * External interrupt events will have caused interrupts to
337 * be hard-disabled, so there is no problem, we
338 * cannot have preempted.
340 irq_happened = get_irq_happened();
342 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
343 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
347 /* We need to hard disable to replay. */
348 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
349 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
350 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
351 __hard_irq_disable();
354 * We should already be hard disabled here. We had bugs
355 * where that wasn't the case so let's dbl check it and
356 * warn if we are wrong. Only do that when IRQ tracing
357 * is enabled as mfmsr() can be costly.
359 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
360 if (WARN_ON_ONCE(mfmsr() & MSR_EE))
361 __hard_irq_disable();
364 if (irq_happened == PACA_IRQ_HARD_DIS) {
365 local_paca->irq_happened = 0;
372 * Disable preempt here, so that the below preempt_enable will
373 * perform resched if required (a replayed interrupt may set
377 irq_soft_mask_set(IRQS_ALL_DISABLED);
378 trace_hardirqs_off();
380 replay_soft_interrupts();
381 local_paca->irq_happened = 0;
384 irq_soft_mask_set(IRQS_ENABLED);
388 EXPORT_SYMBOL(arch_local_irq_restore);
391 * This is specifically called by assembly code to re-enable interrupts
392 * if they are currently disabled. This is typically called before
393 * schedule() or do_signal() when returning to userspace. We do it
394 * in C to avoid the burden of dealing with lockdep etc...
396 * NOTE: This is called with interrupts hard disabled but not marked
397 * as such in paca->irq_happened, so we need to resync this.
399 void notrace restore_interrupts(void)
401 if (irqs_disabled()) {
402 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
409 * This is a helper to use when about to go into idle low-power
410 * when the latter has the side effect of re-enabling interrupts
411 * (such as calling H_CEDE under pHyp).
413 * You call this function with interrupts soft-disabled (this is
414 * already the case when ppc_md.power_save is called). The function
415 * will return whether to enter power save or just return.
417 * In the former case, it will have notified lockdep of interrupts
418 * being re-enabled and generally sanitized the lazy irq state,
419 * and in the latter case it will leave with interrupts hard
420 * disabled and marked as such, so the local_irq_enable() call
421 * in arch_cpu_idle() will properly re-enable everything.
423 bool prep_irq_for_idle(void)
426 * First we need to hard disable to ensure no interrupt
427 * occurs before we effectively enter the low power state
429 __hard_irq_disable();
430 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
433 * If anything happened while we were soft-disabled,
434 * we return now and do not enter the low power state.
436 if (lazy_irq_pending())
439 /* Tell lockdep we are about to re-enable */
443 * Mark interrupts as soft-enabled and clear the
444 * PACA_IRQ_HARD_DIS from the pending mask since we
445 * are about to hard enable as well as a side effect
446 * of entering the low power state.
448 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
449 irq_soft_mask_set(IRQS_ENABLED);
451 /* Tell the caller to enter the low power state */
455 #ifdef CONFIG_PPC_BOOK3S
457 * This is for idle sequences that return with IRQs off, but the
458 * idle state itself wakes on interrupt. Tell the irq tracer that
459 * IRQs are enabled for the duration of idle so it does not get long
460 * off times. Must be paired with fini_irq_for_idle_irqsoff.
462 bool prep_irq_for_idle_irqsoff(void)
464 WARN_ON(!irqs_disabled());
467 * First we need to hard disable to ensure no interrupt
468 * occurs before we effectively enter the low power state
470 __hard_irq_disable();
471 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
474 * If anything happened while we were soft-disabled,
475 * we return now and do not enter the low power state.
477 if (lazy_irq_pending())
480 /* Tell lockdep we are about to re-enable */
487 * Take the SRR1 wakeup reason, index into this table to find the
488 * appropriate irq_happened bit.
490 * Sytem reset exceptions taken in idle state also come through here,
491 * but they are NMI interrupts so do not need to wait for IRQs to be
492 * restored, and should be taken as early as practical. These are marked
493 * with 0xff in the table. The Power ISA specifies 0100b as the system
494 * reset interrupt reason.
496 #define IRQ_SYSTEM_RESET 0xff
498 static const u8 srr1_to_lazyirq[0x10] = {
510 void replay_system_reset(void)
514 ppc_save_regs(®s);
516 get_paca()->in_nmi = 1;
517 system_reset_exception(®s);
518 get_paca()->in_nmi = 0;
520 EXPORT_SYMBOL_GPL(replay_system_reset);
522 void irq_set_pending_from_srr1(unsigned long srr1)
524 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
525 u8 reason = srr1_to_lazyirq[idx];
528 * Take the system reset now, which is immediately after registers
529 * are restored from idle. It's an NMI, so interrupts need not be
530 * re-enabled before it is taken.
532 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
533 replay_system_reset();
537 if (reason == PACA_IRQ_DBELL) {
539 * When doorbell triggers a system reset wakeup, the message
540 * is not cleared, so if the doorbell interrupt is replayed
541 * and the IPI handled, the doorbell interrupt would still
542 * fire when EE is enabled.
544 * To avoid taking the superfluous doorbell interrupt,
545 * execute a msgclr here before the interrupt is replayed.
547 ppc_msgclr(PPC_DBELL_MSGTYPE);
551 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
552 * so this can be called unconditionally with the SRR1 wake
553 * reason as returned by the idle code, which uses 0 to mean no
556 * If a future CPU was to designate this as an interrupt reason,
557 * then a new index for no interrupt must be assigned.
559 local_paca->irq_happened |= reason;
561 #endif /* CONFIG_PPC_BOOK3S */
564 * Force a replay of the external interrupt handler on this CPU.
566 void force_external_irq_replay(void)
569 * This must only be called with interrupts soft-disabled,
570 * the replay will happen when re-enabling.
572 WARN_ON(!arch_irqs_disabled());
575 * Interrupts must always be hard disabled before irq_happened is
576 * modified (to prevent lost update in case of interrupt between
579 __hard_irq_disable();
580 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
582 /* Indicate in the PACA that we have an interrupt to replay */
583 local_paca->irq_happened |= PACA_IRQ_EE;
586 #endif /* CONFIG_PPC64 */
588 int arch_show_interrupts(struct seq_file *p, int prec)
592 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
593 if (tau_initialized) {
594 seq_printf(p, "%*s: ", prec, "TAU");
595 for_each_online_cpu(j)
596 seq_printf(p, "%10u ", tau_interrupts(j));
597 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
599 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
601 seq_printf(p, "%*s: ", prec, "LOC");
602 for_each_online_cpu(j)
603 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
604 seq_printf(p, " Local timer interrupts for timer event device\n");
606 seq_printf(p, "%*s: ", prec, "BCT");
607 for_each_online_cpu(j)
608 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
609 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
611 seq_printf(p, "%*s: ", prec, "LOC");
612 for_each_online_cpu(j)
613 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
614 seq_printf(p, " Local timer interrupts for others\n");
616 seq_printf(p, "%*s: ", prec, "SPU");
617 for_each_online_cpu(j)
618 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
619 seq_printf(p, " Spurious interrupts\n");
621 seq_printf(p, "%*s: ", prec, "PMI");
622 for_each_online_cpu(j)
623 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
624 seq_printf(p, " Performance monitoring interrupts\n");
626 seq_printf(p, "%*s: ", prec, "MCE");
627 for_each_online_cpu(j)
628 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
629 seq_printf(p, " Machine check exceptions\n");
631 #ifdef CONFIG_PPC_BOOK3S_64
632 if (cpu_has_feature(CPU_FTR_HVMODE)) {
633 seq_printf(p, "%*s: ", prec, "HMI");
634 for_each_online_cpu(j)
635 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
636 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
640 seq_printf(p, "%*s: ", prec, "NMI");
641 for_each_online_cpu(j)
642 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
643 seq_printf(p, " System Reset interrupts\n");
645 #ifdef CONFIG_PPC_WATCHDOG
646 seq_printf(p, "%*s: ", prec, "WDG");
647 for_each_online_cpu(j)
648 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
649 seq_printf(p, " Watchdog soft-NMI interrupts\n");
652 #ifdef CONFIG_PPC_DOORBELL
653 if (cpu_has_feature(CPU_FTR_DBELL)) {
654 seq_printf(p, "%*s: ", prec, "DBL");
655 for_each_online_cpu(j)
656 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
657 seq_printf(p, " Doorbell interrupts\n");
667 u64 arch_irq_stat_cpu(unsigned int cpu)
669 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
671 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
672 sum += per_cpu(irq_stat, cpu).pmu_irqs;
673 sum += per_cpu(irq_stat, cpu).mce_exceptions;
674 sum += per_cpu(irq_stat, cpu).spurious_irqs;
675 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
676 #ifdef CONFIG_PPC_BOOK3S_64
677 sum += paca_ptrs[cpu]->hmi_irqs;
679 sum += per_cpu(irq_stat, cpu).sreset_irqs;
680 #ifdef CONFIG_PPC_WATCHDOG
681 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
683 #ifdef CONFIG_PPC_DOORBELL
684 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
690 static inline void check_stack_overflow(void)
694 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
697 sp = current_stack_pointer & (THREAD_SIZE - 1);
699 /* check for stack overflow: is there less than 2KB free? */
700 if (unlikely(sp < 2048)) {
701 pr_err("do_IRQ: stack overflow: %ld\n", sp);
706 void __do_irq(struct pt_regs *regs)
712 trace_irq_entry(regs);
715 * Query the platform PIC for the interrupt & ack it.
717 * This will typically lower the interrupt line to the CPU
719 irq = ppc_md.get_irq();
721 /* We can hard enable interrupts now to allow perf interrupts */
722 may_hard_irq_enable();
724 /* And finally process it */
726 __this_cpu_inc(irq_stat.spurious_irqs);
728 generic_handle_irq(irq);
730 trace_irq_exit(regs);
735 void do_IRQ(struct pt_regs *regs)
737 struct pt_regs *old_regs = set_irq_regs(regs);
738 void *cursp, *irqsp, *sirqsp;
740 /* Switch to the irq stack to handle this */
741 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
742 irqsp = hardirq_ctx[raw_smp_processor_id()];
743 sirqsp = softirq_ctx[raw_smp_processor_id()];
745 check_stack_overflow();
747 /* Already there ? */
748 if (unlikely(cursp == irqsp || cursp == sirqsp)) {
750 set_irq_regs(old_regs);
753 /* Switch stack and call */
754 call_do_irq(regs, irqsp);
756 set_irq_regs(old_regs);
759 static void *__init alloc_vm_stack(void)
761 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
762 NUMA_NO_NODE, (void *)_RET_IP_);
765 static void __init vmap_irqstack_init(void)
769 for_each_possible_cpu(i) {
770 softirq_ctx[i] = alloc_vm_stack();
771 hardirq_ctx[i] = alloc_vm_stack();
776 void __init init_IRQ(void)
778 if (IS_ENABLED(CONFIG_VMAP_STACK))
779 vmap_irqstack_init();
785 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
786 void *critirq_ctx[NR_CPUS] __read_mostly;
787 void *dbgirq_ctx[NR_CPUS] __read_mostly;
788 void *mcheckirq_ctx[NR_CPUS] __read_mostly;
791 void *softirq_ctx[NR_CPUS] __read_mostly;
792 void *hardirq_ctx[NR_CPUS] __read_mostly;
794 void do_softirq_own_stack(void)
796 call_do_softirq(softirq_ctx[smp_processor_id()]);
799 irq_hw_number_t virq_to_hw(unsigned int virq)
801 struct irq_data *irq_data = irq_get_irq_data(virq);
802 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
804 EXPORT_SYMBOL_GPL(virq_to_hw);
807 int irq_choose_cpu(const struct cpumask *mask)
811 if (cpumask_equal(mask, cpu_online_mask)) {
812 static int irq_rover;
813 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
816 /* Round-robin distribution... */
818 raw_spin_lock_irqsave(&irq_rover_lock, flags);
820 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
821 if (irq_rover >= nr_cpu_ids)
822 irq_rover = cpumask_first(cpu_online_mask);
826 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
828 cpuid = cpumask_first_and(mask, cpu_online_mask);
829 if (cpuid >= nr_cpu_ids)
833 return get_hard_smp_processor_id(cpuid);
836 int irq_choose_cpu(const struct cpumask *mask)
838 return hard_smp_processor_id();
843 static int __init setup_noirqdistrib(char *str)
849 __setup("noirqdistrib", setup_noirqdistrib);
850 #endif /* CONFIG_PPC64 */