1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
9 #include <linux/threads.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/debugfs.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/msi.h>
22 #include <linux/vmalloc.h>
24 #include <asm/debugfs.h>
28 #include <asm/machdep.h>
30 #include <asm/errno.h>
32 #include <asm/xive-regs.h>
35 #include "xive-internal.h"
41 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
42 smp_processor_id(), ## __VA_ARGS__)
44 #define DBG_VERBOSE(fmt...) do { } while(0)
48 EXPORT_SYMBOL_GPL(__xive_enabled);
49 bool xive_cmdline_disabled;
51 /* We use only one priority for now */
52 static u8 xive_irq_priority;
54 /* TIMA exported to KVM */
55 void __iomem *xive_tima;
56 EXPORT_SYMBOL_GPL(xive_tima);
60 static const struct xive_ops *xive_ops;
62 /* Our global interrupt domain */
63 static struct irq_domain *xive_irq_domain;
66 /* The IPIs use the same logical irq number when on the same chip */
67 static struct xive_ipi_desc {
74 * Use early_cpu_to_node() for hot-plugged CPUs
76 static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
78 return xive_ipis[early_cpu_to_node(cpu)].irq;
82 /* Xive state for each CPU */
83 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
85 /* An invalid CPU target */
86 #define XIVE_INVALID_TARGET (-1)
89 * Read the next entry in a queue, return its content if it's valid
90 * or 0 if there is no new entry.
92 * The queue pointer is moved forward unless "just_peek" is set
94 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
100 cur = be32_to_cpup(q->qpage + q->idx);
102 /* Check valid bit (31) vs current toggle polarity */
103 if ((cur >> 31) == q->toggle)
106 /* If consuming from the queue ... */
109 q->idx = (q->idx + 1) & q->msk;
111 /* Wrap around: flip valid toggle */
115 /* Mask out the valid bit (31) */
116 return cur & 0x7fffffff;
120 * Scans all the queue that may have interrupts in them
121 * (based on "pending_prio") in priority order until an
122 * interrupt is found or all the queues are empty.
124 * Then updates the CPPR (Current Processor Priority
125 * Register) based on the most favored interrupt found
126 * (0xff if none) and return what was found (0 if none).
128 * If just_peek is set, return the most favored pending
129 * interrupt if any but don't update the queue pointers.
131 * Note: This function can operate generically on any number
132 * of queues (up to 8). The current implementation of the XIVE
133 * driver only uses a single queue however.
135 * Note2: This will also "flush" "the pending_count" of a queue
136 * into the "count" when that queue is observed to be empty.
137 * This is used to keep track of the amount of interrupts
138 * targetting a queue. When an interrupt is moved away from
139 * a queue, we only decrement that queue count once the queue
140 * has been observed empty to avoid races.
142 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
147 /* Find highest pending priority */
148 while (xc->pending_prio != 0) {
151 prio = ffs(xc->pending_prio) - 1;
152 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
155 irq = xive_read_eq(&xc->queue[prio], just_peek);
157 /* Found something ? That's it */
159 if (just_peek || irq_to_desc(irq))
162 * We should never get here; if we do then we must
163 * have failed to synchronize the interrupt properly
164 * when shutting it down.
166 pr_crit("xive: got interrupt %d without descriptor, dropping\n",
172 /* Clear pending bits */
173 xc->pending_prio &= ~(1 << prio);
176 * Check if the queue count needs adjusting due to
177 * interrupts being moved away. See description of
178 * xive_dec_target_count()
180 q = &xc->queue[prio];
181 if (atomic_read(&q->pending_count)) {
182 int p = atomic_xchg(&q->pending_count, 0);
184 WARN_ON(p > atomic_read(&q->count));
185 atomic_sub(p, &q->count);
190 /* If nothing was found, set CPPR to 0xff */
194 /* Update HW CPPR to match if necessary */
195 if (prio != xc->cppr) {
196 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
198 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
205 * This is used to perform the magic loads from an ESB
206 * described in xive-regs.h
208 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
212 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
213 offset |= XIVE_ESB_LD_ST_MO;
215 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
216 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
218 val = in_be64(xd->eoi_mmio + offset);
223 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
225 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
226 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
228 out_be64(xd->eoi_mmio + offset, data);
232 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
239 i0 = be32_to_cpup(q->qpage + idx);
240 idx = (idx + 1) & q->msk;
241 i1 = be32_to_cpup(q->qpage + idx);
242 xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
243 q->idx, q->toggle, i0, i1);
246 notrace void xmon_xive_do_dump(int cpu)
248 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
250 xmon_printf("CPU %d:", cpu);
252 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
256 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
258 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
259 val & XIVE_ESB_VAL_P ? 'P' : '-',
260 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
263 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
268 static struct irq_data *xive_get_irq_data(u32 hw_irq)
270 unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
272 return irq ? irq_get_irq_data(irq) : NULL;
275 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
282 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
284 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
288 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
289 hw_irq, target, prio, lirq);
292 d = xive_get_irq_data(hw_irq);
295 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
296 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
298 xmon_printf("flags=%c%c%c PQ=%c%c",
299 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
300 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
301 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
302 val & XIVE_ESB_VAL_P ? 'P' : '-',
303 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
310 void xmon_xive_get_irq_all(void)
313 struct irq_desc *desc;
315 for_each_irq_desc(i, desc) {
316 struct irq_data *d = irq_desc_get_irq_data(desc);
317 unsigned int hwirq = (unsigned int)irqd_to_hwirq(d);
319 if (d->domain == xive_irq_domain)
320 xmon_xive_get_irq_config(hwirq, d);
324 #endif /* CONFIG_XMON */
326 static unsigned int xive_get_irq(void)
328 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
332 * This can be called either as a result of a HW interrupt or
333 * as a "replay" because EOI decided there was still something
334 * in one of the queues.
336 * First we perform an ACK cycle in order to update our mask
337 * of pending priorities. This will also have the effect of
338 * updating the CPPR to the most favored pending interrupts.
340 * In the future, if we have a way to differentiate a first
341 * entry (on HW interrupt) from a replay triggered by EOI,
342 * we could skip this on replays unless we soft-mask tells us
343 * that a new HW interrupt occurred.
345 xive_ops->update_pending(xc);
347 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
349 /* Scan our queue(s) for interrupts */
350 irq = xive_scan_interrupts(xc, false);
352 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
353 irq, xc->pending_prio);
355 /* Return pending interrupt if any */
356 if (irq == XIVE_BAD_IRQ)
362 * After EOI'ing an interrupt, we need to re-check the queue
363 * to see if another interrupt is pending since multiple
364 * interrupts can coalesce into a single notification to the
367 * If we find that there is indeed more in there, we call
368 * force_external_irq_replay() to make Linux synthetize an
369 * external interrupt on the next call to local_irq_restore().
371 static void xive_do_queue_eoi(struct xive_cpu *xc)
373 if (xive_scan_interrupts(xc, true) != 0) {
374 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
375 force_external_irq_replay();
380 * EOI an interrupt at the source. There are several methods
381 * to do this depending on the HW version and source type
383 static void xive_do_source_eoi(struct xive_irq_data *xd)
389 /* If the XIVE supports the new "store EOI facility, use it */
390 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) {
391 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
396 * For LSIs, we use the "EOI cycle" special load rather than
397 * PQ bits, as they are automatically re-triggered in HW when
400 if (xd->flags & XIVE_IRQ_FLAG_LSI) {
401 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
406 * Otherwise, we use the special MMIO that does a clear of
407 * both P and Q and returns the old Q. This allows us to then
408 * do a re-trigger if Q was set rather than synthesizing an
409 * interrupt in software
411 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
412 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
414 /* Re-trigger if needed */
415 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
416 out_be64(xd->trig_mmio, 0);
419 /* irq_chip eoi callback, called with irq descriptor lock held */
420 static void xive_irq_eoi(struct irq_data *d)
422 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
423 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
425 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
426 d->irq, irqd_to_hwirq(d), xc->pending_prio);
429 * EOI the source if it hasn't been disabled and hasn't
430 * been passed-through to a KVM guest
432 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
433 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
434 xive_do_source_eoi(xd);
439 * Clear saved_p to indicate that it's no longer occupying
440 * a queue slot on the target queue
444 /* Check for more work in the queue */
445 xive_do_queue_eoi(xc);
449 * Helper used to mask and unmask an interrupt source.
451 static void xive_do_source_set_mask(struct xive_irq_data *xd,
457 * If the interrupt had P set, it may be in a queue.
459 * We need to make sure we don't re-enable it until it
460 * has been fetched from that queue and EOId. We keep
461 * a copy of that P state and use it to restore the
462 * ESB accordingly on unmask.
465 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
466 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
469 } else if (xd->saved_p) {
470 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
473 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
479 * Try to chose "cpu" as a new interrupt target. Increments
480 * the queue accounting for that target if it's not already
483 static bool xive_try_pick_target(int cpu)
485 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
486 struct xive_q *q = &xc->queue[xive_irq_priority];
490 * Calculate max number of interrupts in that queue.
492 * We leave a gap of 1 just in case...
494 max = (q->msk + 1) - 1;
495 return !!atomic_add_unless(&q->count, 1, max);
499 * Un-account an interrupt for a target CPU. We don't directly
500 * decrement q->count since the interrupt might still be present
503 * Instead increment a separate counter "pending_count" which
504 * will be substracted from "count" later when that CPU observes
505 * the queue to be empty.
507 static void xive_dec_target_count(int cpu)
509 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
510 struct xive_q *q = &xc->queue[xive_irq_priority];
512 if (WARN_ON(cpu < 0 || !xc)) {
513 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
518 * We increment the "pending count" which will be used
519 * to decrement the target queue count whenever it's next
520 * processed and found empty. This ensure that we don't
521 * decrement while we still have the interrupt there
524 atomic_inc(&q->pending_count);
527 /* Find a tentative CPU target in a CPU mask */
528 static int xive_find_target_in_mask(const struct cpumask *mask,
531 int cpu, first, num, i;
533 /* Pick up a starting point CPU in the mask based on fuzz */
534 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
538 cpu = cpumask_first(mask);
539 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
540 cpu = cpumask_next(cpu, mask);
543 if (WARN_ON(cpu >= nr_cpu_ids))
544 cpu = cpumask_first(cpu_online_mask);
546 /* Remember first one to handle wrap-around */
550 * Now go through the entire mask until we find a valid
555 * We re-check online as the fallback case passes us
556 * an untested affinity mask
558 if (cpu_online(cpu) && xive_try_pick_target(cpu))
560 cpu = cpumask_next(cpu, mask);
562 if (cpu >= nr_cpu_ids)
563 cpu = cpumask_first(mask);
564 } while (cpu != first);
570 * Pick a target CPU for an interrupt. This is done at
571 * startup or if the affinity is changed in a way that
572 * invalidates the current target.
574 static int xive_pick_irq_target(struct irq_data *d,
575 const struct cpumask *affinity)
577 static unsigned int fuzz;
578 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
583 * If we have chip IDs, first we try to build a mask of
584 * CPUs matching the CPU and find a target in there
586 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
587 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
588 /* Build a mask of matching chip IDs */
589 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
590 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
591 if (xc->chip_id == xd->src_chip)
592 cpumask_set_cpu(cpu, mask);
594 /* Try to find a target */
595 if (cpumask_empty(mask))
598 cpu = xive_find_target_in_mask(mask, fuzz++);
599 free_cpumask_var(mask);
605 /* No chip IDs, fallback to using the affinity mask */
606 return xive_find_target_in_mask(affinity, fuzz++);
609 static unsigned int xive_irq_startup(struct irq_data *d)
611 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
612 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
617 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
620 #ifdef CONFIG_PCI_MSI
622 * The generic MSI code returns with the interrupt disabled on the
623 * card, using the MSI mask bits. Firmware doesn't appear to unmask
624 * at that level, so we do it here by hand.
626 if (irq_data_get_msi_desc(d))
627 pci_msi_unmask_irq(d);
631 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
632 if (target == XIVE_INVALID_TARGET) {
633 /* Try again breaking affinity */
634 target = xive_pick_irq_target(d, cpu_online_mask);
635 if (target == XIVE_INVALID_TARGET)
637 pr_warn("irq %d started with broken affinity\n", d->irq);
641 if (WARN_ON(target == XIVE_INVALID_TARGET ||
642 target >= nr_cpu_ids))
643 target = smp_processor_id();
648 * Configure the logical number to be the Linux IRQ number
649 * and set the target queue
651 rc = xive_ops->configure_irq(hw_irq,
652 get_hard_smp_processor_id(target),
653 xive_irq_priority, d->irq);
658 xive_do_source_set_mask(xd, false);
663 /* called with irq descriptor lock held */
664 static void xive_irq_shutdown(struct irq_data *d)
666 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
667 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
669 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
672 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
675 /* Mask the interrupt at the source */
676 xive_do_source_set_mask(xd, true);
679 * Mask the interrupt in HW in the IVT/EAS and set the number
680 * to be the "bad" IRQ number
682 xive_ops->configure_irq(hw_irq,
683 get_hard_smp_processor_id(xd->target),
686 xive_dec_target_count(xd->target);
687 xd->target = XIVE_INVALID_TARGET;
690 static void xive_irq_unmask(struct irq_data *d)
692 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
694 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
696 xive_do_source_set_mask(xd, false);
699 static void xive_irq_mask(struct irq_data *d)
701 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
703 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
705 xive_do_source_set_mask(xd, true);
708 static int xive_irq_set_affinity(struct irq_data *d,
709 const struct cpumask *cpumask,
712 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
713 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
714 u32 target, old_target;
717 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
719 /* Is this valid ? */
720 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
723 /* Don't do anything if the interrupt isn't started */
724 if (!irqd_is_started(d))
725 return IRQ_SET_MASK_OK;
728 * If existing target is already in the new mask, and is
729 * online then do nothing.
731 if (xd->target != XIVE_INVALID_TARGET &&
732 cpu_online(xd->target) &&
733 cpumask_test_cpu(xd->target, cpumask))
734 return IRQ_SET_MASK_OK;
736 /* Pick a new target */
737 target = xive_pick_irq_target(d, cpumask);
739 /* No target found */
740 if (target == XIVE_INVALID_TARGET)
744 if (WARN_ON(target >= nr_cpu_ids))
745 target = smp_processor_id();
747 old_target = xd->target;
750 * Only configure the irq if it's not currently passed-through to
753 if (!irqd_is_forwarded_to_vcpu(d))
754 rc = xive_ops->configure_irq(hw_irq,
755 get_hard_smp_processor_id(target),
756 xive_irq_priority, d->irq);
758 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
762 pr_devel(" target: 0x%x\n", target);
765 /* Give up previous target */
766 if (old_target != XIVE_INVALID_TARGET)
767 xive_dec_target_count(old_target);
769 return IRQ_SET_MASK_OK;
772 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
774 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
777 * We only support these. This has really no effect other than setting
778 * the corresponding descriptor bits mind you but those will in turn
779 * affect the resend function when re-enabling an edge interrupt.
781 * Set set the default to edge as explained in map().
783 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
784 flow_type = IRQ_TYPE_EDGE_RISING;
786 if (flow_type != IRQ_TYPE_EDGE_RISING &&
787 flow_type != IRQ_TYPE_LEVEL_LOW)
790 irqd_set_trigger_type(d, flow_type);
793 * Double check it matches what the FW thinks
795 * NOTE: We don't know yet if the PAPR interface will provide
796 * the LSI vs MSI information apart from the device-tree so
797 * this check might have to move into an optional backend call
798 * that is specific to the native backend
800 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
801 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
802 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
803 d->irq, (u32)irqd_to_hwirq(d),
804 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
805 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
808 return IRQ_SET_MASK_OK_NOCOPY;
811 static int xive_irq_retrigger(struct irq_data *d)
813 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
815 /* This should be only for MSIs */
816 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
820 * To perform a retrigger, we first set the PQ bits to
821 * 11, then perform an EOI.
823 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
824 xive_do_source_eoi(xd);
830 * Caller holds the irq descriptor lock, so this won't be called
831 * concurrently with xive_get_irqchip_state on the same interrupt.
833 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
835 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
836 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
841 * This is called by KVM with state non-NULL for enabling
842 * pass-through or NULL for disabling it
845 irqd_set_forwarded_to_vcpu(d);
847 /* Set it to PQ=10 state to prevent further sends */
848 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
850 xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
851 xd->stale_p = !xd->saved_p;
854 /* No target ? nothing to do */
855 if (xd->target == XIVE_INVALID_TARGET) {
857 * An untargetted interrupt should have been
858 * also masked at the source
860 WARN_ON(xd->saved_p);
866 * If P was set, adjust state to PQ=11 to indicate
867 * that a resend is needed for the interrupt to reach
868 * the guest. Also remember the value of P.
870 * This also tells us that it's in flight to a host queue
871 * or has already been fetched but hasn't been EOIed yet
872 * by the host. This it's potentially using up a host
873 * queue slot. This is important to know because as long
874 * as this is the case, we must not hard-unmask it when
875 * "returning" that interrupt to the host.
877 * This saved_p is cleared by the host EOI, when we know
878 * for sure the queue slot is no longer in use.
881 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
884 * Sync the XIVE source HW to ensure the interrupt
885 * has gone through the EAS before we change its
886 * target to the guest. That should guarantee us
887 * that we *will* eventually get an EOI for it on
888 * the host. Otherwise there would be a small window
889 * for P to be seen here but the interrupt going
890 * to the guest queue.
892 if (xive_ops->sync_source)
893 xive_ops->sync_source(hw_irq);
896 irqd_clr_forwarded_to_vcpu(d);
898 /* No host target ? hard mask and return */
899 if (xd->target == XIVE_INVALID_TARGET) {
900 xive_do_source_set_mask(xd, true);
905 * Sync the XIVE source HW to ensure the interrupt
906 * has gone through the EAS before we change its
907 * target to the host.
909 if (xive_ops->sync_source)
910 xive_ops->sync_source(hw_irq);
913 * By convention we are called with the interrupt in
914 * a PQ=10 or PQ=11 state, ie, it won't fire and will
915 * have latched in Q whether there's a pending HW
918 * First reconfigure the target.
920 rc = xive_ops->configure_irq(hw_irq,
921 get_hard_smp_processor_id(xd->target),
922 xive_irq_priority, d->irq);
927 * Then if saved_p is not set, effectively re-enable the
928 * interrupt with an EOI. If it is set, we know there is
929 * still a message in a host queue somewhere that will be
932 * Note: We don't check irqd_irq_disabled(). Effectively,
933 * we *will* let the irq get through even if masked if the
934 * HW is still firing it in order to deal with the whole
935 * saved_p business properly. If the interrupt triggers
936 * while masked, the generic code will re-mask it anyway.
939 xive_do_source_eoi(xd);
945 /* Called with irq descriptor lock held. */
946 static int xive_get_irqchip_state(struct irq_data *data,
947 enum irqchip_irq_state which, bool *state)
949 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
953 case IRQCHIP_STATE_ACTIVE:
954 pq = xive_esb_read(xd, XIVE_ESB_GET);
957 * The esb value being all 1's means we couldn't get
958 * the PQ state of the interrupt through mmio. It may
959 * happen, for example when querying a PHB interrupt
960 * while the PHB is in an error state. We consider the
961 * interrupt to be inactive in that case.
963 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
964 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
971 static struct irq_chip xive_irq_chip = {
973 .irq_startup = xive_irq_startup,
974 .irq_shutdown = xive_irq_shutdown,
975 .irq_eoi = xive_irq_eoi,
976 .irq_mask = xive_irq_mask,
977 .irq_unmask = xive_irq_unmask,
978 .irq_set_affinity = xive_irq_set_affinity,
979 .irq_set_type = xive_irq_set_type,
980 .irq_retrigger = xive_irq_retrigger,
981 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
982 .irq_get_irqchip_state = xive_get_irqchip_state,
985 bool is_xive_irq(struct irq_chip *chip)
987 return chip == &xive_irq_chip;
989 EXPORT_SYMBOL_GPL(is_xive_irq);
991 void xive_cleanup_irq_data(struct xive_irq_data *xd)
994 iounmap(xd->eoi_mmio);
995 if (xd->eoi_mmio == xd->trig_mmio)
996 xd->trig_mmio = NULL;
1000 iounmap(xd->trig_mmio);
1001 xd->trig_mmio = NULL;
1004 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
1006 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
1008 struct xive_irq_data *xd;
1011 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
1014 rc = xive_ops->populate_irq_data(hw, xd);
1019 xd->target = XIVE_INVALID_TARGET;
1020 irq_set_handler_data(virq, xd);
1023 * Turn OFF by default the interrupt being mapped. A side
1024 * effect of this check is the mapping the ESB page of the
1025 * interrupt in the Linux address space. This prevents page
1026 * fault issues in the crash handler which masks all
1029 xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1034 static void xive_irq_free_data(unsigned int virq)
1036 struct xive_irq_data *xd = irq_get_handler_data(virq);
1040 irq_set_handler_data(virq, NULL);
1041 xive_cleanup_irq_data(xd);
1047 static void xive_cause_ipi(int cpu)
1049 struct xive_cpu *xc;
1050 struct xive_irq_data *xd;
1052 xc = per_cpu(xive_cpu, cpu);
1054 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1055 smp_processor_id(), cpu, xc->hw_ipi);
1058 if (WARN_ON(!xd->trig_mmio))
1060 out_be64(xd->trig_mmio, 0);
1063 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1065 return smp_ipi_demux();
1068 static void xive_ipi_eoi(struct irq_data *d)
1070 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1072 /* Handle possible race with unplug and drop stale IPIs */
1076 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1077 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1079 xive_do_source_eoi(&xc->ipi_data);
1080 xive_do_queue_eoi(xc);
1083 static void xive_ipi_do_nothing(struct irq_data *d)
1086 * Nothing to do, we never mask/unmask IPIs, but the callback
1087 * has to exist for the struct irq_chip.
1091 static struct irq_chip xive_ipi_chip = {
1093 .irq_eoi = xive_ipi_eoi,
1094 .irq_mask = xive_ipi_do_nothing,
1095 .irq_unmask = xive_ipi_do_nothing,
1099 * IPIs are marked per-cpu. We use separate HW interrupts under the
1100 * hood but associated with the same "linux" interrupt
1102 struct xive_ipi_alloc_info {
1103 irq_hw_number_t hwirq;
1106 static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1107 unsigned int nr_irqs, void *arg)
1109 struct xive_ipi_alloc_info *info = arg;
1112 for (i = 0; i < nr_irqs; i++) {
1113 irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
1114 domain->host_data, handle_percpu_irq,
1120 static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
1121 .alloc = xive_ipi_irq_domain_alloc,
1124 static int __init xive_init_ipis(void)
1126 struct fwnode_handle *fwnode;
1127 struct irq_domain *ipi_domain;
1131 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
1135 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
1136 &xive_ipi_irq_domain_ops, NULL);
1138 goto out_free_fwnode;
1140 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
1142 goto out_free_domain;
1144 for_each_node(node) {
1145 struct xive_ipi_desc *xid = &xive_ipis[node];
1146 struct xive_ipi_alloc_info info = { node };
1149 * Map one IPI interrupt per node for all cpus of that node.
1150 * Since the HW interrupt number doesn't have any meaning,
1151 * simply use the node number.
1153 ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
1155 goto out_free_xive_ipis;
1158 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
1166 irq_domain_remove(ipi_domain);
1168 irq_domain_free_fwnode(fwnode);
1173 static int xive_request_ipi(unsigned int cpu)
1175 struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
1178 if (atomic_inc_return(&xid->started) > 1)
1181 ret = request_irq(xid->irq, xive_muxed_ipi_action,
1182 IRQF_PERCPU | IRQF_NO_THREAD,
1185 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
1189 static int xive_setup_cpu_ipi(unsigned int cpu)
1191 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1192 struct xive_cpu *xc;
1195 pr_debug("Setting up IPI for CPU %d\n", cpu);
1197 xc = per_cpu(xive_cpu, cpu);
1199 /* Check if we are already setup */
1200 if (xc->hw_ipi != XIVE_BAD_IRQ)
1203 /* Register the IPI */
1204 xive_request_ipi(cpu);
1206 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
1207 if (xive_ops->get_ipi(cpu, xc))
1211 * Populate the IRQ data in the xive_cpu structure and
1212 * configure the HW / enable the IPIs.
1214 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1216 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1219 rc = xive_ops->configure_irq(xc->hw_ipi,
1220 get_hard_smp_processor_id(cpu),
1221 xive_irq_priority, xive_ipi_irq);
1223 pr_err("Failed to map IPI CPU %d\n", cpu);
1226 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1227 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1230 xive_do_source_set_mask(&xc->ipi_data, false);
1235 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1237 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1239 /* Disable the IPI and free the IRQ data */
1241 /* Already cleaned up ? */
1242 if (xc->hw_ipi == XIVE_BAD_IRQ)
1245 /* TODO: clear IPI mapping */
1248 xive_do_source_set_mask(&xc->ipi_data, true);
1251 * Note: We don't call xive_cleanup_irq_data() to free
1252 * the mappings as this is called from an IPI on kexec
1253 * which is not a safe environment to call iounmap()
1256 /* Deconfigure/mask in the backend */
1257 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1258 0xff, xive_ipi_irq);
1260 /* Free the IPIs in the backend */
1261 xive_ops->put_ipi(cpu, xc);
1264 void __init xive_smp_probe(void)
1266 smp_ops->cause_ipi = xive_cause_ipi;
1268 /* Register the IPI */
1271 /* Allocate and setup IPI for the boot CPU */
1272 xive_setup_cpu_ipi(smp_processor_id());
1275 #endif /* CONFIG_SMP */
1277 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1283 * Mark interrupts as edge sensitive by default so that resend
1284 * actually works. Will fix that up below if needed.
1286 irq_clear_status_flags(virq, IRQ_LEVEL);
1288 rc = xive_irq_alloc_data(virq, hw);
1292 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1297 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1299 xive_irq_free_data(virq);
1302 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1303 const u32 *intspec, unsigned int intsize,
1304 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1307 *out_hwirq = intspec[0];
1310 * If intsize is at least 2, we look for the type in the second cell,
1311 * we assume the LSB indicates a level interrupt.
1315 *out_flags = IRQ_TYPE_LEVEL_LOW;
1317 *out_flags = IRQ_TYPE_EDGE_RISING;
1319 *out_flags = IRQ_TYPE_LEVEL_LOW;
1324 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1325 enum irq_domain_bus_token bus_token)
1327 return xive_ops->match(node);
1330 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1331 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
1333 static const struct {
1336 } xive_irq_flags[] = {
1337 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
1338 { XIVE_IRQ_FLAG_LSI, "LSI" },
1339 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
1340 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" },
1343 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
1344 struct irq_data *irqd, int ind)
1346 struct xive_irq_data *xd;
1350 /* No IRQ domain level information. To be done */
1354 if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
1357 seq_printf(m, "%*sXIVE:\n", ind, "");
1360 xd = irq_data_get_irq_handler_data(irqd);
1362 seq_printf(m, "%*snot assigned\n", ind, "");
1366 val = xive_esb_read(xd, XIVE_ESB_GET);
1367 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]);
1368 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "",
1369 xd->saved_p ? "saved" : "");
1370 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target);
1371 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip);
1372 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page);
1373 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page);
1374 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags);
1375 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
1376 if (xd->flags & xive_irq_flags[i].mask)
1377 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
1382 static const struct irq_domain_ops xive_irq_domain_ops = {
1383 .match = xive_irq_domain_match,
1384 .map = xive_irq_domain_map,
1385 .unmap = xive_irq_domain_unmap,
1386 .xlate = xive_irq_domain_xlate,
1387 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1388 .debug_show = xive_irq_domain_debug_show,
1392 static void __init xive_init_host(struct device_node *np)
1394 xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
1395 &xive_irq_domain_ops, NULL);
1396 if (WARN_ON(xive_irq_domain == NULL))
1398 irq_set_default_host(xive_irq_domain);
1401 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1403 if (xc->queue[xive_irq_priority].qpage)
1404 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1407 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1411 /* We setup 1 queues for now with a 64k page */
1412 if (!xc->queue[xive_irq_priority].qpage)
1413 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1418 static int xive_prepare_cpu(unsigned int cpu)
1420 struct xive_cpu *xc;
1422 xc = per_cpu(xive_cpu, cpu);
1424 xc = kzalloc_node(sizeof(struct xive_cpu),
1425 GFP_KERNEL, cpu_to_node(cpu));
1428 xc->hw_ipi = XIVE_BAD_IRQ;
1429 xc->chip_id = XIVE_INVALID_CHIP_ID;
1430 if (xive_ops->prepare_cpu)
1431 xive_ops->prepare_cpu(cpu, xc);
1433 per_cpu(xive_cpu, cpu) = xc;
1436 /* Setup EQs if not already */
1437 return xive_setup_cpu_queues(cpu, xc);
1440 static void xive_setup_cpu(void)
1442 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1444 /* The backend might have additional things to do */
1445 if (xive_ops->setup_cpu)
1446 xive_ops->setup_cpu(smp_processor_id(), xc);
1448 /* Set CPPR to 0xff to enable flow of interrupts */
1450 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1454 void xive_smp_setup_cpu(void)
1456 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1458 /* This will have already been done on the boot CPU */
1459 if (smp_processor_id() != boot_cpuid)
1464 int xive_smp_prepare_cpu(unsigned int cpu)
1468 /* Allocate per-CPU data and queues */
1469 rc = xive_prepare_cpu(cpu);
1473 /* Allocate and setup IPI for the new CPU */
1474 return xive_setup_cpu_ipi(cpu);
1477 #ifdef CONFIG_HOTPLUG_CPU
1478 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1482 /* We assume local irqs are disabled */
1483 WARN_ON(!irqs_disabled());
1485 /* Check what's already in the CPU queue */
1486 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1488 * We need to re-route that interrupt to its new destination.
1489 * First get and lock the descriptor
1491 struct irq_desc *desc = irq_to_desc(irq);
1492 struct irq_data *d = irq_desc_get_irq_data(desc);
1493 struct xive_irq_data *xd;
1496 * Ignore anything that isn't a XIVE irq and ignore
1497 * IPIs, so can just be dropped.
1499 if (d->domain != xive_irq_domain)
1503 * The IRQ should have already been re-routed, it's just a
1504 * stale in the old queue, so re-trigger it in order to make
1505 * it reach is new destination.
1508 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1511 raw_spin_lock(&desc->lock);
1512 xd = irq_desc_get_handler_data(desc);
1515 * Clear saved_p to indicate that it's no longer pending
1517 xd->saved_p = false;
1520 * For LSIs, we EOI, this will cause a resend if it's
1521 * still asserted. Otherwise do an MSI retrigger.
1523 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1524 xive_do_source_eoi(xd);
1526 xive_irq_retrigger(d);
1528 raw_spin_unlock(&desc->lock);
1532 void xive_smp_disable_cpu(void)
1534 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1535 unsigned int cpu = smp_processor_id();
1537 /* Migrate interrupts away from the CPU */
1538 irq_migrate_all_off_this_cpu();
1540 /* Set CPPR to 0 to disable flow of interrupts */
1542 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1544 /* Flush everything still in the queue */
1545 xive_flush_cpu_queue(cpu, xc);
1547 /* Re-enable CPPR */
1549 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1552 void xive_flush_interrupt(void)
1554 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1555 unsigned int cpu = smp_processor_id();
1557 /* Called if an interrupt occurs while the CPU is hot unplugged */
1558 xive_flush_cpu_queue(cpu, xc);
1561 #endif /* CONFIG_HOTPLUG_CPU */
1563 #endif /* CONFIG_SMP */
1565 void xive_teardown_cpu(void)
1567 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1568 unsigned int cpu = smp_processor_id();
1570 /* Set CPPR to 0 to disable flow of interrupts */
1572 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1574 if (xive_ops->teardown_cpu)
1575 xive_ops->teardown_cpu(cpu, xc);
1578 /* Get rid of IPI */
1579 xive_cleanup_cpu_ipi(cpu, xc);
1582 /* Disable and free the queues */
1583 xive_cleanup_cpu_queues(cpu, xc);
1586 void xive_shutdown(void)
1588 xive_ops->shutdown();
1591 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
1592 void __iomem *area, u32 offset, u8 max_prio)
1595 xive_tima_offset = offset;
1597 xive_irq_priority = max_prio;
1599 ppc_md.get_irq = xive_get_irq;
1600 __xive_enabled = true;
1602 pr_devel("Initializing host..\n");
1605 pr_devel("Initializing boot CPU..\n");
1607 /* Allocate per-CPU data and queues */
1608 xive_prepare_cpu(smp_processor_id());
1610 /* Get ready for interrupts */
1613 pr_info("Interrupt handling initialized with %s backend\n",
1615 pr_info("Using priority %d for all interrupts\n", max_prio);
1620 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1622 unsigned int alloc_order;
1626 alloc_order = xive_alloc_order(queue_shift);
1627 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1629 return ERR_PTR(-ENOMEM);
1630 qpage = (__be32 *)page_address(pages);
1631 memset(qpage, 0, 1 << queue_shift);
1636 static int __init xive_off(char *arg)
1638 xive_cmdline_disabled = true;
1641 __setup("xive=off", xive_off);
1643 static void xive_debug_show_cpu(struct seq_file *m, int cpu)
1645 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1647 seq_printf(m, "CPU %d:", cpu);
1649 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
1653 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
1655 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
1656 val & XIVE_ESB_VAL_P ? 'P' : '-',
1657 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1661 struct xive_q *q = &xc->queue[xive_irq_priority];
1666 i0 = be32_to_cpup(q->qpage + idx);
1667 idx = (idx + 1) & q->msk;
1668 i1 = be32_to_cpup(q->qpage + idx);
1669 seq_printf(m, "EQ idx=%d T=%d %08x %08x ...",
1670 q->idx, q->toggle, i0, i1);
1677 static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
1679 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1684 struct xive_irq_data *xd;
1687 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
1689 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
1693 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1694 hw_irq, target, prio, lirq);
1696 xd = irq_data_get_irq_handler_data(d);
1697 val = xive_esb_read(xd, XIVE_ESB_GET);
1698 seq_printf(m, "flags=%c%c%c PQ=%c%c",
1699 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
1700 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
1701 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
1702 val & XIVE_ESB_VAL_P ? 'P' : '-',
1703 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1707 static int xive_core_debug_show(struct seq_file *m, void *private)
1710 struct irq_desc *desc;
1713 if (xive_ops->debug_show)
1714 xive_ops->debug_show(m, private);
1716 for_each_possible_cpu(cpu)
1717 xive_debug_show_cpu(m, cpu);
1719 for_each_irq_desc(i, desc) {
1720 struct irq_data *d = irq_desc_get_irq_data(desc);
1722 if (d->domain == xive_irq_domain)
1723 xive_debug_show_irq(m, d);
1727 DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
1729 int xive_core_debug_init(void)
1732 debugfs_create_file("xive", 0400, powerpc_debugfs_root,
1733 NULL, &xive_core_debug_fops);