1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
9 #include <linux/threads.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/debugfs.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/msi.h>
22 #include <linux/vmalloc.h>
24 #include <asm/debugfs.h>
28 #include <asm/machdep.h>
30 #include <asm/errno.h>
32 #include <asm/xive-regs.h>
35 #include "xive-internal.h"
41 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
42 smp_processor_id(), ## __VA_ARGS__)
44 #define DBG_VERBOSE(fmt...) do { } while(0)
48 EXPORT_SYMBOL_GPL(__xive_enabled);
49 bool xive_cmdline_disabled;
51 /* We use only one priority for now */
52 static u8 xive_irq_priority;
54 /* TIMA exported to KVM */
55 void __iomem *xive_tima;
56 EXPORT_SYMBOL_GPL(xive_tima);
60 static const struct xive_ops *xive_ops;
62 /* Our global interrupt domain */
63 static struct irq_domain *xive_irq_domain;
66 /* The IPIs use the same logical irq number when on the same chip */
67 static struct xive_ipi_desc {
73 * Use early_cpu_to_node() for hot-plugged CPUs
75 static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
77 return xive_ipis[early_cpu_to_node(cpu)].irq;
81 /* Xive state for each CPU */
82 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
84 /* An invalid CPU target */
85 #define XIVE_INVALID_TARGET (-1)
88 * Read the next entry in a queue, return its content if it's valid
89 * or 0 if there is no new entry.
91 * The queue pointer is moved forward unless "just_peek" is set
93 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
99 cur = be32_to_cpup(q->qpage + q->idx);
101 /* Check valid bit (31) vs current toggle polarity */
102 if ((cur >> 31) == q->toggle)
105 /* If consuming from the queue ... */
108 q->idx = (q->idx + 1) & q->msk;
110 /* Wrap around: flip valid toggle */
114 /* Mask out the valid bit (31) */
115 return cur & 0x7fffffff;
119 * Scans all the queue that may have interrupts in them
120 * (based on "pending_prio") in priority order until an
121 * interrupt is found or all the queues are empty.
123 * Then updates the CPPR (Current Processor Priority
124 * Register) based on the most favored interrupt found
125 * (0xff if none) and return what was found (0 if none).
127 * If just_peek is set, return the most favored pending
128 * interrupt if any but don't update the queue pointers.
130 * Note: This function can operate generically on any number
131 * of queues (up to 8). The current implementation of the XIVE
132 * driver only uses a single queue however.
134 * Note2: This will also "flush" "the pending_count" of a queue
135 * into the "count" when that queue is observed to be empty.
136 * This is used to keep track of the amount of interrupts
137 * targetting a queue. When an interrupt is moved away from
138 * a queue, we only decrement that queue count once the queue
139 * has been observed empty to avoid races.
141 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
146 /* Find highest pending priority */
147 while (xc->pending_prio != 0) {
150 prio = ffs(xc->pending_prio) - 1;
151 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
154 irq = xive_read_eq(&xc->queue[prio], just_peek);
156 /* Found something ? That's it */
158 if (just_peek || irq_to_desc(irq))
161 * We should never get here; if we do then we must
162 * have failed to synchronize the interrupt properly
163 * when shutting it down.
165 pr_crit("xive: got interrupt %d without descriptor, dropping\n",
171 /* Clear pending bits */
172 xc->pending_prio &= ~(1 << prio);
175 * Check if the queue count needs adjusting due to
176 * interrupts being moved away. See description of
177 * xive_dec_target_count()
179 q = &xc->queue[prio];
180 if (atomic_read(&q->pending_count)) {
181 int p = atomic_xchg(&q->pending_count, 0);
183 WARN_ON(p > atomic_read(&q->count));
184 atomic_sub(p, &q->count);
189 /* If nothing was found, set CPPR to 0xff */
193 /* Update HW CPPR to match if necessary */
194 if (prio != xc->cppr) {
195 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
197 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
204 * This is used to perform the magic loads from an ESB
205 * described in xive-regs.h
207 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
211 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
212 offset |= XIVE_ESB_LD_ST_MO;
214 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
215 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
217 val = in_be64(xd->eoi_mmio + offset);
222 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
224 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
225 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
227 out_be64(xd->eoi_mmio + offset, data);
231 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
238 i0 = be32_to_cpup(q->qpage + idx);
239 idx = (idx + 1) & q->msk;
240 i1 = be32_to_cpup(q->qpage + idx);
241 xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
242 q->idx, q->toggle, i0, i1);
245 notrace void xmon_xive_do_dump(int cpu)
247 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
249 xmon_printf("CPU %d:", cpu);
251 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
255 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
257 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
258 val & XIVE_ESB_VAL_P ? 'P' : '-',
259 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
262 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
267 static struct irq_data *xive_get_irq_data(u32 hw_irq)
269 unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
271 return irq ? irq_get_irq_data(irq) : NULL;
274 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
281 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
283 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
287 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
288 hw_irq, target, prio, lirq);
291 d = xive_get_irq_data(hw_irq);
294 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
295 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
297 xmon_printf("flags=%c%c%c PQ=%c%c",
298 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
299 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
300 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
301 val & XIVE_ESB_VAL_P ? 'P' : '-',
302 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
309 void xmon_xive_get_irq_all(void)
312 struct irq_desc *desc;
314 for_each_irq_desc(i, desc) {
315 struct irq_data *d = irq_desc_get_irq_data(desc);
316 unsigned int hwirq = (unsigned int)irqd_to_hwirq(d);
318 if (d->domain == xive_irq_domain)
319 xmon_xive_get_irq_config(hwirq, d);
323 #endif /* CONFIG_XMON */
325 static unsigned int xive_get_irq(void)
327 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
331 * This can be called either as a result of a HW interrupt or
332 * as a "replay" because EOI decided there was still something
333 * in one of the queues.
335 * First we perform an ACK cycle in order to update our mask
336 * of pending priorities. This will also have the effect of
337 * updating the CPPR to the most favored pending interrupts.
339 * In the future, if we have a way to differentiate a first
340 * entry (on HW interrupt) from a replay triggered by EOI,
341 * we could skip this on replays unless we soft-mask tells us
342 * that a new HW interrupt occurred.
344 xive_ops->update_pending(xc);
346 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
348 /* Scan our queue(s) for interrupts */
349 irq = xive_scan_interrupts(xc, false);
351 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
352 irq, xc->pending_prio);
354 /* Return pending interrupt if any */
355 if (irq == XIVE_BAD_IRQ)
361 * After EOI'ing an interrupt, we need to re-check the queue
362 * to see if another interrupt is pending since multiple
363 * interrupts can coalesce into a single notification to the
366 * If we find that there is indeed more in there, we call
367 * force_external_irq_replay() to make Linux synthetize an
368 * external interrupt on the next call to local_irq_restore().
370 static void xive_do_queue_eoi(struct xive_cpu *xc)
372 if (xive_scan_interrupts(xc, true) != 0) {
373 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
374 force_external_irq_replay();
379 * EOI an interrupt at the source. There are several methods
380 * to do this depending on the HW version and source type
382 static void xive_do_source_eoi(struct xive_irq_data *xd)
388 /* If the XIVE supports the new "store EOI facility, use it */
389 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) {
390 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
395 * For LSIs, we use the "EOI cycle" special load rather than
396 * PQ bits, as they are automatically re-triggered in HW when
399 if (xd->flags & XIVE_IRQ_FLAG_LSI) {
400 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
405 * Otherwise, we use the special MMIO that does a clear of
406 * both P and Q and returns the old Q. This allows us to then
407 * do a re-trigger if Q was set rather than synthesizing an
408 * interrupt in software
410 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
411 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
413 /* Re-trigger if needed */
414 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
415 out_be64(xd->trig_mmio, 0);
418 /* irq_chip eoi callback, called with irq descriptor lock held */
419 static void xive_irq_eoi(struct irq_data *d)
421 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
422 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
424 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
425 d->irq, irqd_to_hwirq(d), xc->pending_prio);
428 * EOI the source if it hasn't been disabled and hasn't
429 * been passed-through to a KVM guest
431 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
432 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
433 xive_do_source_eoi(xd);
438 * Clear saved_p to indicate that it's no longer occupying
439 * a queue slot on the target queue
443 /* Check for more work in the queue */
444 xive_do_queue_eoi(xc);
448 * Helper used to mask and unmask an interrupt source.
450 static void xive_do_source_set_mask(struct xive_irq_data *xd,
456 * If the interrupt had P set, it may be in a queue.
458 * We need to make sure we don't re-enable it until it
459 * has been fetched from that queue and EOId. We keep
460 * a copy of that P state and use it to restore the
461 * ESB accordingly on unmask.
464 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
465 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
468 } else if (xd->saved_p) {
469 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
472 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
478 * Try to chose "cpu" as a new interrupt target. Increments
479 * the queue accounting for that target if it's not already
482 static bool xive_try_pick_target(int cpu)
484 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
485 struct xive_q *q = &xc->queue[xive_irq_priority];
489 * Calculate max number of interrupts in that queue.
491 * We leave a gap of 1 just in case...
493 max = (q->msk + 1) - 1;
494 return !!atomic_add_unless(&q->count, 1, max);
498 * Un-account an interrupt for a target CPU. We don't directly
499 * decrement q->count since the interrupt might still be present
502 * Instead increment a separate counter "pending_count" which
503 * will be substracted from "count" later when that CPU observes
504 * the queue to be empty.
506 static void xive_dec_target_count(int cpu)
508 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
509 struct xive_q *q = &xc->queue[xive_irq_priority];
511 if (WARN_ON(cpu < 0 || !xc)) {
512 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
517 * We increment the "pending count" which will be used
518 * to decrement the target queue count whenever it's next
519 * processed and found empty. This ensure that we don't
520 * decrement while we still have the interrupt there
523 atomic_inc(&q->pending_count);
526 /* Find a tentative CPU target in a CPU mask */
527 static int xive_find_target_in_mask(const struct cpumask *mask,
530 int cpu, first, num, i;
532 /* Pick up a starting point CPU in the mask based on fuzz */
533 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
537 cpu = cpumask_first(mask);
538 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
539 cpu = cpumask_next(cpu, mask);
542 if (WARN_ON(cpu >= nr_cpu_ids))
543 cpu = cpumask_first(cpu_online_mask);
545 /* Remember first one to handle wrap-around */
549 * Now go through the entire mask until we find a valid
554 * We re-check online as the fallback case passes us
555 * an untested affinity mask
557 if (cpu_online(cpu) && xive_try_pick_target(cpu))
559 cpu = cpumask_next(cpu, mask);
561 if (cpu >= nr_cpu_ids)
562 cpu = cpumask_first(mask);
563 } while (cpu != first);
569 * Pick a target CPU for an interrupt. This is done at
570 * startup or if the affinity is changed in a way that
571 * invalidates the current target.
573 static int xive_pick_irq_target(struct irq_data *d,
574 const struct cpumask *affinity)
576 static unsigned int fuzz;
577 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
582 * If we have chip IDs, first we try to build a mask of
583 * CPUs matching the CPU and find a target in there
585 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
586 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
587 /* Build a mask of matching chip IDs */
588 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
589 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
590 if (xc->chip_id == xd->src_chip)
591 cpumask_set_cpu(cpu, mask);
593 /* Try to find a target */
594 if (cpumask_empty(mask))
597 cpu = xive_find_target_in_mask(mask, fuzz++);
598 free_cpumask_var(mask);
604 /* No chip IDs, fallback to using the affinity mask */
605 return xive_find_target_in_mask(affinity, fuzz++);
608 static unsigned int xive_irq_startup(struct irq_data *d)
610 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
611 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
616 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
620 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
621 if (target == XIVE_INVALID_TARGET) {
622 /* Try again breaking affinity */
623 target = xive_pick_irq_target(d, cpu_online_mask);
624 if (target == XIVE_INVALID_TARGET)
626 pr_warn("irq %d started with broken affinity\n", d->irq);
630 if (WARN_ON(target == XIVE_INVALID_TARGET ||
631 target >= nr_cpu_ids))
632 target = smp_processor_id();
637 * Configure the logical number to be the Linux IRQ number
638 * and set the target queue
640 rc = xive_ops->configure_irq(hw_irq,
641 get_hard_smp_processor_id(target),
642 xive_irq_priority, d->irq);
647 xive_do_source_set_mask(xd, false);
652 /* called with irq descriptor lock held */
653 static void xive_irq_shutdown(struct irq_data *d)
655 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
656 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
658 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
661 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
664 /* Mask the interrupt at the source */
665 xive_do_source_set_mask(xd, true);
668 * Mask the interrupt in HW in the IVT/EAS and set the number
669 * to be the "bad" IRQ number
671 xive_ops->configure_irq(hw_irq,
672 get_hard_smp_processor_id(xd->target),
675 xive_dec_target_count(xd->target);
676 xd->target = XIVE_INVALID_TARGET;
679 static void xive_irq_unmask(struct irq_data *d)
681 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
683 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
685 xive_do_source_set_mask(xd, false);
688 static void xive_irq_mask(struct irq_data *d)
690 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
692 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
694 xive_do_source_set_mask(xd, true);
697 static int xive_irq_set_affinity(struct irq_data *d,
698 const struct cpumask *cpumask,
701 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
702 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
703 u32 target, old_target;
706 pr_debug("%s: irq %d/%x\n", __func__, d->irq, hw_irq);
708 /* Is this valid ? */
709 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
713 * If existing target is already in the new mask, and is
714 * online then do nothing.
716 if (xd->target != XIVE_INVALID_TARGET &&
717 cpu_online(xd->target) &&
718 cpumask_test_cpu(xd->target, cpumask))
719 return IRQ_SET_MASK_OK;
721 /* Pick a new target */
722 target = xive_pick_irq_target(d, cpumask);
724 /* No target found */
725 if (target == XIVE_INVALID_TARGET)
729 if (WARN_ON(target >= nr_cpu_ids))
730 target = smp_processor_id();
732 old_target = xd->target;
735 * Only configure the irq if it's not currently passed-through to
738 if (!irqd_is_forwarded_to_vcpu(d))
739 rc = xive_ops->configure_irq(hw_irq,
740 get_hard_smp_processor_id(target),
741 xive_irq_priority, d->irq);
743 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
747 pr_debug(" target: 0x%x\n", target);
750 /* Give up previous target */
751 if (old_target != XIVE_INVALID_TARGET)
752 xive_dec_target_count(old_target);
754 return IRQ_SET_MASK_OK;
757 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
759 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
762 * We only support these. This has really no effect other than setting
763 * the corresponding descriptor bits mind you but those will in turn
764 * affect the resend function when re-enabling an edge interrupt.
766 * Set set the default to edge as explained in map().
768 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
769 flow_type = IRQ_TYPE_EDGE_RISING;
771 if (flow_type != IRQ_TYPE_EDGE_RISING &&
772 flow_type != IRQ_TYPE_LEVEL_LOW)
775 irqd_set_trigger_type(d, flow_type);
778 * Double check it matches what the FW thinks
780 * NOTE: We don't know yet if the PAPR interface will provide
781 * the LSI vs MSI information apart from the device-tree so
782 * this check might have to move into an optional backend call
783 * that is specific to the native backend
785 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
786 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
787 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
788 d->irq, (u32)irqd_to_hwirq(d),
789 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
790 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
793 return IRQ_SET_MASK_OK_NOCOPY;
796 static int xive_irq_retrigger(struct irq_data *d)
798 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
800 /* This should be only for MSIs */
801 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
805 * To perform a retrigger, we first set the PQ bits to
806 * 11, then perform an EOI.
808 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
809 xive_do_source_eoi(xd);
815 * Caller holds the irq descriptor lock, so this won't be called
816 * concurrently with xive_get_irqchip_state on the same interrupt.
818 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
820 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
821 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
826 * This is called by KVM with state non-NULL for enabling
827 * pass-through or NULL for disabling it
830 irqd_set_forwarded_to_vcpu(d);
832 /* Set it to PQ=10 state to prevent further sends */
833 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
835 xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
836 xd->stale_p = !xd->saved_p;
839 /* No target ? nothing to do */
840 if (xd->target == XIVE_INVALID_TARGET) {
842 * An untargetted interrupt should have been
843 * also masked at the source
845 WARN_ON(xd->saved_p);
851 * If P was set, adjust state to PQ=11 to indicate
852 * that a resend is needed for the interrupt to reach
853 * the guest. Also remember the value of P.
855 * This also tells us that it's in flight to a host queue
856 * or has already been fetched but hasn't been EOIed yet
857 * by the host. This it's potentially using up a host
858 * queue slot. This is important to know because as long
859 * as this is the case, we must not hard-unmask it when
860 * "returning" that interrupt to the host.
862 * This saved_p is cleared by the host EOI, when we know
863 * for sure the queue slot is no longer in use.
866 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
869 * Sync the XIVE source HW to ensure the interrupt
870 * has gone through the EAS before we change its
871 * target to the guest. That should guarantee us
872 * that we *will* eventually get an EOI for it on
873 * the host. Otherwise there would be a small window
874 * for P to be seen here but the interrupt going
875 * to the guest queue.
877 if (xive_ops->sync_source)
878 xive_ops->sync_source(hw_irq);
881 irqd_clr_forwarded_to_vcpu(d);
883 /* No host target ? hard mask and return */
884 if (xd->target == XIVE_INVALID_TARGET) {
885 xive_do_source_set_mask(xd, true);
890 * Sync the XIVE source HW to ensure the interrupt
891 * has gone through the EAS before we change its
892 * target to the host.
894 if (xive_ops->sync_source)
895 xive_ops->sync_source(hw_irq);
898 * By convention we are called with the interrupt in
899 * a PQ=10 or PQ=11 state, ie, it won't fire and will
900 * have latched in Q whether there's a pending HW
903 * First reconfigure the target.
905 rc = xive_ops->configure_irq(hw_irq,
906 get_hard_smp_processor_id(xd->target),
907 xive_irq_priority, d->irq);
912 * Then if saved_p is not set, effectively re-enable the
913 * interrupt with an EOI. If it is set, we know there is
914 * still a message in a host queue somewhere that will be
917 * Note: We don't check irqd_irq_disabled(). Effectively,
918 * we *will* let the irq get through even if masked if the
919 * HW is still firing it in order to deal with the whole
920 * saved_p business properly. If the interrupt triggers
921 * while masked, the generic code will re-mask it anyway.
924 xive_do_source_eoi(xd);
930 /* Called with irq descriptor lock held. */
931 static int xive_get_irqchip_state(struct irq_data *data,
932 enum irqchip_irq_state which, bool *state)
934 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
938 case IRQCHIP_STATE_ACTIVE:
939 pq = xive_esb_read(xd, XIVE_ESB_GET);
942 * The esb value being all 1's means we couldn't get
943 * the PQ state of the interrupt through mmio. It may
944 * happen, for example when querying a PHB interrupt
945 * while the PHB is in an error state. We consider the
946 * interrupt to be inactive in that case.
948 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
949 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
956 static struct irq_chip xive_irq_chip = {
958 .irq_startup = xive_irq_startup,
959 .irq_shutdown = xive_irq_shutdown,
960 .irq_eoi = xive_irq_eoi,
961 .irq_mask = xive_irq_mask,
962 .irq_unmask = xive_irq_unmask,
963 .irq_set_affinity = xive_irq_set_affinity,
964 .irq_set_type = xive_irq_set_type,
965 .irq_retrigger = xive_irq_retrigger,
966 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
967 .irq_get_irqchip_state = xive_get_irqchip_state,
970 bool is_xive_irq(struct irq_chip *chip)
972 return chip == &xive_irq_chip;
974 EXPORT_SYMBOL_GPL(is_xive_irq);
976 void xive_cleanup_irq_data(struct xive_irq_data *xd)
978 pr_debug("%s for HW %x\n", __func__, xd->hw_irq);
981 iounmap(xd->eoi_mmio);
982 if (xd->eoi_mmio == xd->trig_mmio)
983 xd->trig_mmio = NULL;
987 iounmap(xd->trig_mmio);
988 xd->trig_mmio = NULL;
991 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
993 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
995 struct xive_irq_data *xd;
998 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
1001 rc = xive_ops->populate_irq_data(hw, xd);
1006 xd->target = XIVE_INVALID_TARGET;
1007 irq_set_handler_data(virq, xd);
1010 * Turn OFF by default the interrupt being mapped. A side
1011 * effect of this check is the mapping the ESB page of the
1012 * interrupt in the Linux address space. This prevents page
1013 * fault issues in the crash handler which masks all
1016 xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1021 void xive_irq_free_data(unsigned int virq)
1023 struct xive_irq_data *xd = irq_get_handler_data(virq);
1027 irq_set_handler_data(virq, NULL);
1028 xive_cleanup_irq_data(xd);
1031 EXPORT_SYMBOL_GPL(xive_irq_free_data);
1035 static void xive_cause_ipi(int cpu)
1037 struct xive_cpu *xc;
1038 struct xive_irq_data *xd;
1040 xc = per_cpu(xive_cpu, cpu);
1042 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1043 smp_processor_id(), cpu, xc->hw_ipi);
1046 if (WARN_ON(!xd->trig_mmio))
1048 out_be64(xd->trig_mmio, 0);
1051 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1053 return smp_ipi_demux();
1056 static void xive_ipi_eoi(struct irq_data *d)
1058 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1060 /* Handle possible race with unplug and drop stale IPIs */
1064 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1065 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1067 xive_do_source_eoi(&xc->ipi_data);
1068 xive_do_queue_eoi(xc);
1071 static void xive_ipi_do_nothing(struct irq_data *d)
1074 * Nothing to do, we never mask/unmask IPIs, but the callback
1075 * has to exist for the struct irq_chip.
1079 static struct irq_chip xive_ipi_chip = {
1081 .irq_eoi = xive_ipi_eoi,
1082 .irq_mask = xive_ipi_do_nothing,
1083 .irq_unmask = xive_ipi_do_nothing,
1087 * IPIs are marked per-cpu. We use separate HW interrupts under the
1088 * hood but associated with the same "linux" interrupt
1090 struct xive_ipi_alloc_info {
1091 irq_hw_number_t hwirq;
1094 static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1095 unsigned int nr_irqs, void *arg)
1097 struct xive_ipi_alloc_info *info = arg;
1100 for (i = 0; i < nr_irqs; i++) {
1101 irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
1102 domain->host_data, handle_percpu_irq,
1108 static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
1109 .alloc = xive_ipi_irq_domain_alloc,
1112 static int __init xive_request_ipi(void)
1114 struct fwnode_handle *fwnode;
1115 struct irq_domain *ipi_domain;
1119 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
1123 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
1124 &xive_ipi_irq_domain_ops, NULL);
1126 goto out_free_fwnode;
1128 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
1130 goto out_free_domain;
1132 for_each_node(node) {
1133 struct xive_ipi_desc *xid = &xive_ipis[node];
1134 struct xive_ipi_alloc_info info = { node };
1136 /* Skip nodes without CPUs */
1137 if (cpumask_empty(cpumask_of_node(node)))
1141 * Map one IPI interrupt per node for all cpus of that node.
1142 * Since the HW interrupt number doesn't have any meaning,
1143 * simply use the node number.
1145 ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
1147 goto out_free_xive_ipis;
1150 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
1152 ret = request_irq(xid->irq, xive_muxed_ipi_action,
1153 IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
1155 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
1163 irq_domain_remove(ipi_domain);
1165 irq_domain_free_fwnode(fwnode);
1170 static int xive_setup_cpu_ipi(unsigned int cpu)
1172 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1173 struct xive_cpu *xc;
1176 pr_debug("Setting up IPI for CPU %d\n", cpu);
1178 xc = per_cpu(xive_cpu, cpu);
1180 /* Check if we are already setup */
1181 if (xc->hw_ipi != XIVE_BAD_IRQ)
1184 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
1185 if (xive_ops->get_ipi(cpu, xc))
1189 * Populate the IRQ data in the xive_cpu structure and
1190 * configure the HW / enable the IPIs.
1192 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1194 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1197 rc = xive_ops->configure_irq(xc->hw_ipi,
1198 get_hard_smp_processor_id(cpu),
1199 xive_irq_priority, xive_ipi_irq);
1201 pr_err("Failed to map IPI CPU %d\n", cpu);
1204 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1205 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1208 xive_do_source_set_mask(&xc->ipi_data, false);
1213 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1215 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1217 /* Disable the IPI and free the IRQ data */
1219 /* Already cleaned up ? */
1220 if (xc->hw_ipi == XIVE_BAD_IRQ)
1224 xive_do_source_set_mask(&xc->ipi_data, true);
1227 * Note: We don't call xive_cleanup_irq_data() to free
1228 * the mappings as this is called from an IPI on kexec
1229 * which is not a safe environment to call iounmap()
1232 /* Deconfigure/mask in the backend */
1233 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1234 0xff, xive_ipi_irq);
1236 /* Free the IPIs in the backend */
1237 xive_ops->put_ipi(cpu, xc);
1240 void __init xive_smp_probe(void)
1242 smp_ops->cause_ipi = xive_cause_ipi;
1244 /* Register the IPI */
1247 /* Allocate and setup IPI for the boot CPU */
1248 xive_setup_cpu_ipi(smp_processor_id());
1251 #endif /* CONFIG_SMP */
1253 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1259 * Mark interrupts as edge sensitive by default so that resend
1260 * actually works. Will fix that up below if needed.
1262 irq_clear_status_flags(virq, IRQ_LEVEL);
1264 rc = xive_irq_alloc_data(virq, hw);
1268 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1273 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1275 xive_irq_free_data(virq);
1278 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1279 const u32 *intspec, unsigned int intsize,
1280 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1283 *out_hwirq = intspec[0];
1286 * If intsize is at least 2, we look for the type in the second cell,
1287 * we assume the LSB indicates a level interrupt.
1291 *out_flags = IRQ_TYPE_LEVEL_LOW;
1293 *out_flags = IRQ_TYPE_EDGE_RISING;
1295 *out_flags = IRQ_TYPE_LEVEL_LOW;
1300 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1301 enum irq_domain_bus_token bus_token)
1303 return xive_ops->match(node);
1306 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1307 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
1309 static const struct {
1312 } xive_irq_flags[] = {
1313 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
1314 { XIVE_IRQ_FLAG_LSI, "LSI" },
1315 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
1316 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" },
1319 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
1320 struct irq_data *irqd, int ind)
1322 struct xive_irq_data *xd;
1326 /* No IRQ domain level information. To be done */
1330 if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
1333 seq_printf(m, "%*sXIVE:\n", ind, "");
1336 xd = irq_data_get_irq_handler_data(irqd);
1338 seq_printf(m, "%*snot assigned\n", ind, "");
1342 val = xive_esb_read(xd, XIVE_ESB_GET);
1343 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]);
1344 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "",
1345 xd->saved_p ? "saved" : "");
1346 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target);
1347 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip);
1348 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page);
1349 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page);
1350 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags);
1351 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
1352 if (xd->flags & xive_irq_flags[i].mask)
1353 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
1358 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1359 static int xive_irq_domain_translate(struct irq_domain *d,
1360 struct irq_fwspec *fwspec,
1361 unsigned long *hwirq,
1364 return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode),
1365 fwspec->param, fwspec->param_count,
1369 static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1370 unsigned int nr_irqs, void *arg)
1372 struct irq_fwspec *fwspec = arg;
1373 irq_hw_number_t hwirq;
1374 unsigned int type = IRQ_TYPE_NONE;
1377 rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type);
1381 pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
1383 for (i = 0; i < nr_irqs; i++) {
1384 /* TODO: call xive_irq_domain_map() */
1387 * Mark interrupts as edge sensitive by default so that resend
1388 * actually works. Will fix that up below if needed.
1390 irq_clear_status_flags(virq, IRQ_LEVEL);
1392 /* allocates and sets handler data */
1393 rc = xive_irq_alloc_data(virq + i, hwirq + i);
1397 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
1398 &xive_irq_chip, domain->host_data);
1399 irq_set_handler(virq + i, handle_fasteoi_irq);
1405 static void xive_irq_domain_free(struct irq_domain *domain,
1406 unsigned int virq, unsigned int nr_irqs)
1410 pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
1412 for (i = 0; i < nr_irqs; i++)
1413 xive_irq_free_data(virq + i);
1417 static const struct irq_domain_ops xive_irq_domain_ops = {
1418 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1419 .alloc = xive_irq_domain_alloc,
1420 .free = xive_irq_domain_free,
1421 .translate = xive_irq_domain_translate,
1423 .match = xive_irq_domain_match,
1424 .map = xive_irq_domain_map,
1425 .unmap = xive_irq_domain_unmap,
1426 .xlate = xive_irq_domain_xlate,
1427 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1428 .debug_show = xive_irq_domain_debug_show,
1432 static void __init xive_init_host(struct device_node *np)
1434 xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
1435 &xive_irq_domain_ops, NULL);
1436 if (WARN_ON(xive_irq_domain == NULL))
1438 irq_set_default_host(xive_irq_domain);
1441 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1443 if (xc->queue[xive_irq_priority].qpage)
1444 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1447 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1451 /* We setup 1 queues for now with a 64k page */
1452 if (!xc->queue[xive_irq_priority].qpage)
1453 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1458 static int xive_prepare_cpu(unsigned int cpu)
1460 struct xive_cpu *xc;
1462 xc = per_cpu(xive_cpu, cpu);
1464 xc = kzalloc_node(sizeof(struct xive_cpu),
1465 GFP_KERNEL, cpu_to_node(cpu));
1468 xc->hw_ipi = XIVE_BAD_IRQ;
1469 xc->chip_id = XIVE_INVALID_CHIP_ID;
1470 if (xive_ops->prepare_cpu)
1471 xive_ops->prepare_cpu(cpu, xc);
1473 per_cpu(xive_cpu, cpu) = xc;
1476 /* Setup EQs if not already */
1477 return xive_setup_cpu_queues(cpu, xc);
1480 static void xive_setup_cpu(void)
1482 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1484 /* The backend might have additional things to do */
1485 if (xive_ops->setup_cpu)
1486 xive_ops->setup_cpu(smp_processor_id(), xc);
1488 /* Set CPPR to 0xff to enable flow of interrupts */
1490 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1494 void xive_smp_setup_cpu(void)
1496 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1498 /* This will have already been done on the boot CPU */
1499 if (smp_processor_id() != boot_cpuid)
1504 int xive_smp_prepare_cpu(unsigned int cpu)
1508 /* Allocate per-CPU data and queues */
1509 rc = xive_prepare_cpu(cpu);
1513 /* Allocate and setup IPI for the new CPU */
1514 return xive_setup_cpu_ipi(cpu);
1517 #ifdef CONFIG_HOTPLUG_CPU
1518 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1522 /* We assume local irqs are disabled */
1523 WARN_ON(!irqs_disabled());
1525 /* Check what's already in the CPU queue */
1526 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1528 * We need to re-route that interrupt to its new destination.
1529 * First get and lock the descriptor
1531 struct irq_desc *desc = irq_to_desc(irq);
1532 struct irq_data *d = irq_desc_get_irq_data(desc);
1533 struct xive_irq_data *xd;
1536 * Ignore anything that isn't a XIVE irq and ignore
1537 * IPIs, so can just be dropped.
1539 if (d->domain != xive_irq_domain)
1543 * The IRQ should have already been re-routed, it's just a
1544 * stale in the old queue, so re-trigger it in order to make
1545 * it reach is new destination.
1548 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1551 raw_spin_lock(&desc->lock);
1552 xd = irq_desc_get_handler_data(desc);
1555 * Clear saved_p to indicate that it's no longer pending
1557 xd->saved_p = false;
1560 * For LSIs, we EOI, this will cause a resend if it's
1561 * still asserted. Otherwise do an MSI retrigger.
1563 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1564 xive_do_source_eoi(xd);
1566 xive_irq_retrigger(d);
1568 raw_spin_unlock(&desc->lock);
1572 void xive_smp_disable_cpu(void)
1574 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1575 unsigned int cpu = smp_processor_id();
1577 /* Migrate interrupts away from the CPU */
1578 irq_migrate_all_off_this_cpu();
1580 /* Set CPPR to 0 to disable flow of interrupts */
1582 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1584 /* Flush everything still in the queue */
1585 xive_flush_cpu_queue(cpu, xc);
1587 /* Re-enable CPPR */
1589 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1592 void xive_flush_interrupt(void)
1594 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1595 unsigned int cpu = smp_processor_id();
1597 /* Called if an interrupt occurs while the CPU is hot unplugged */
1598 xive_flush_cpu_queue(cpu, xc);
1601 #endif /* CONFIG_HOTPLUG_CPU */
1603 #endif /* CONFIG_SMP */
1605 void xive_teardown_cpu(void)
1607 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1608 unsigned int cpu = smp_processor_id();
1610 /* Set CPPR to 0 to disable flow of interrupts */
1612 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1614 if (xive_ops->teardown_cpu)
1615 xive_ops->teardown_cpu(cpu, xc);
1618 /* Get rid of IPI */
1619 xive_cleanup_cpu_ipi(cpu, xc);
1622 /* Disable and free the queues */
1623 xive_cleanup_cpu_queues(cpu, xc);
1626 void xive_shutdown(void)
1628 xive_ops->shutdown();
1631 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
1632 void __iomem *area, u32 offset, u8 max_prio)
1635 xive_tima_offset = offset;
1637 xive_irq_priority = max_prio;
1639 ppc_md.get_irq = xive_get_irq;
1640 __xive_enabled = true;
1642 pr_devel("Initializing host..\n");
1645 pr_devel("Initializing boot CPU..\n");
1647 /* Allocate per-CPU data and queues */
1648 xive_prepare_cpu(smp_processor_id());
1650 /* Get ready for interrupts */
1653 pr_info("Interrupt handling initialized with %s backend\n",
1655 pr_info("Using priority %d for all interrupts\n", max_prio);
1660 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1662 unsigned int alloc_order;
1666 alloc_order = xive_alloc_order(queue_shift);
1667 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1669 return ERR_PTR(-ENOMEM);
1670 qpage = (__be32 *)page_address(pages);
1671 memset(qpage, 0, 1 << queue_shift);
1676 static int __init xive_off(char *arg)
1678 xive_cmdline_disabled = true;
1681 __setup("xive=off", xive_off);
1683 static void xive_debug_show_cpu(struct seq_file *m, int cpu)
1685 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1687 seq_printf(m, "CPU %d:", cpu);
1689 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
1693 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
1695 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
1696 val & XIVE_ESB_VAL_P ? 'P' : '-',
1697 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1701 struct xive_q *q = &xc->queue[xive_irq_priority];
1706 i0 = be32_to_cpup(q->qpage + idx);
1707 idx = (idx + 1) & q->msk;
1708 i1 = be32_to_cpup(q->qpage + idx);
1709 seq_printf(m, "EQ idx=%d T=%d %08x %08x ...",
1710 q->idx, q->toggle, i0, i1);
1717 static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
1719 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1724 struct xive_irq_data *xd;
1727 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
1729 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
1733 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1734 hw_irq, target, prio, lirq);
1736 xd = irq_data_get_irq_handler_data(d);
1737 val = xive_esb_read(xd, XIVE_ESB_GET);
1738 seq_printf(m, "flags=%c%c%c PQ=%c%c",
1739 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
1740 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
1741 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
1742 val & XIVE_ESB_VAL_P ? 'P' : '-',
1743 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1747 static int xive_core_debug_show(struct seq_file *m, void *private)
1750 struct irq_desc *desc;
1753 if (xive_ops->debug_show)
1754 xive_ops->debug_show(m, private);
1756 for_each_possible_cpu(cpu)
1757 xive_debug_show_cpu(m, cpu);
1759 for_each_irq_desc(i, desc) {
1760 struct irq_data *d = irq_desc_get_irq_data(desc);
1762 if (d->domain == xive_irq_domain)
1763 xive_debug_show_irq(m, d);
1767 DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
1769 int xive_core_debug_init(void)
1772 debugfs_create_file("xive", 0400, powerpc_debugfs_root,
1773 NULL, &xive_core_debug_fops);