1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #define pr_fmt(fmt) "GICv3: " fmt
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kstrtox.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/percpu.h>
20 #include <linux/refcount.h>
21 #include <linux/slab.h>
23 #include <linux/irqchip.h>
24 #include <linux/irqchip/arm-gic-common.h>
25 #include <linux/irqchip/arm-gic-v3.h>
26 #include <linux/irqchip/irq-partition-percpu.h>
27 #include <linux/bitfield.h>
28 #include <linux/bits.h>
29 #include <linux/arm-smccc.h>
31 #include <asm/cputype.h>
32 #include <asm/exception.h>
33 #include <asm/smp_plat.h>
36 #include "irq-gic-common.h"
38 #define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
40 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
41 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
43 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
45 struct redist_region {
46 void __iomem *redist_base;
47 phys_addr_t phys_base;
51 struct gic_chip_data {
52 struct fwnode_handle *fwnode;
53 phys_addr_t dist_phys_base;
54 void __iomem *dist_base;
55 struct redist_region *redist_regions;
57 struct irq_domain *domain;
59 u32 nr_redist_regions;
63 struct partition_desc **ppi_descs;
66 #define T241_CHIPS_MAX 4
67 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
68 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
70 static struct gic_chip_data gic_data __read_mostly;
71 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
73 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
74 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
75 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
78 * The behaviours of RPR and PMR registers differ depending on the value of
79 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
80 * distributor and redistributors depends on whether security is enabled in the
83 * When security is enabled, non-secure priority values from the (re)distributor
84 * are presented to the GIC CPUIF as follow:
85 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
87 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
88 * EL1 are subject to a similar operation thus matching the priorities presented
89 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
90 * these values are unchanged by the GIC.
92 * see GICv3/GICv4 Architecture Specification (IHI0069D):
93 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
95 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
98 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
100 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
101 EXPORT_SYMBOL(gic_nonsecure_priorities);
104 * When the Non-secure world has access to group 0 interrupts (as a
105 * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
106 * return the Distributor's view of the interrupt priority.
108 * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
109 * written by software is moved to the Non-secure range by the Distributor.
111 * If both are true (which is when gic_nonsecure_priorities gets enabled),
112 * we need to shift down the priority programmed by software to match it
113 * against the value returned by ICC_RPR_EL1.
115 #define GICD_INT_RPR_PRI(priority) \
117 u32 __priority = (priority); \
118 if (static_branch_unlikely(&gic_nonsecure_priorities)) \
119 __priority = 0x80 | (__priority >> 1); \
124 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
125 static refcount_t *ppi_nmi_refs;
127 static struct gic_kvm_info gic_v3_kvm_info __initdata;
128 static DEFINE_PER_CPU(bool, has_rss);
130 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
131 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
132 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
133 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
135 /* Our default, arbitrary priority value. Linux only uses one anyway. */
136 #define DEFAULT_PMR_VALUE 0xf0
138 enum gic_intid_range {
148 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
157 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
159 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
161 case 8192 ... GENMASK(23, 0):
164 return __INVALID_RANGE__;
168 static enum gic_intid_range get_intid_range(struct irq_data *d)
170 return __get_intid_range(d->hwirq);
173 static inline unsigned int gic_irq(struct irq_data *d)
178 static inline bool gic_irq_in_rdist(struct irq_data *d)
180 switch (get_intid_range(d)) {
190 static inline void __iomem *gic_dist_base_alias(struct irq_data *d)
192 if (static_branch_unlikely(&gic_nvidia_t241_erratum)) {
193 irq_hw_number_t hwirq = irqd_to_hwirq(d);
197 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
198 * registers are directed to the chip that owns the SPI. The
199 * the alias region can also be used for writes to the
200 * GICD_In{E} except GICD_ICENABLERn. Each chip has support
201 * for 320 {E}SPIs. Mappings for all 4 chips:
207 switch (__get_intid_range(hwirq)) {
209 chip = (hwirq - 32) / 320;
217 return t241_dist_base_alias[chip];
220 return gic_data.dist_base;
223 static inline void __iomem *gic_dist_base(struct irq_data *d)
225 switch (get_intid_range(d)) {
229 /* SGI+PPI -> SGI_base for this CPU */
230 return gic_data_rdist_sgi_base();
234 /* SPI -> dist_base */
235 return gic_data.dist_base;
242 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
244 u32 count = 1000000; /* 1s! */
246 while (readl_relaxed(base + GICD_CTLR) & bit) {
249 pr_err_ratelimited("RWP timeout, gone fishing\n");
257 /* Wait for completion of a distributor change */
258 static void gic_dist_wait_for_rwp(void)
260 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
263 /* Wait for completion of a redistributor change */
264 static void gic_redist_wait_for_rwp(void)
266 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
271 static u64 __maybe_unused gic_read_iar(void)
273 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
274 return gic_read_iar_cavium_thunderx();
276 return gic_read_iar_common();
280 static void gic_enable_redist(bool enable)
283 u32 count = 1000000; /* 1s! */
286 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
289 rbase = gic_data_rdist_rd_base();
291 val = readl_relaxed(rbase + GICR_WAKER);
293 /* Wake up this CPU redistributor */
294 val &= ~GICR_WAKER_ProcessorSleep;
296 val |= GICR_WAKER_ProcessorSleep;
297 writel_relaxed(val, rbase + GICR_WAKER);
299 if (!enable) { /* Check that GICR_WAKER is writeable */
300 val = readl_relaxed(rbase + GICR_WAKER);
301 if (!(val & GICR_WAKER_ProcessorSleep))
302 return; /* No PM support in this redistributor */
306 val = readl_relaxed(rbase + GICR_WAKER);
307 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
313 pr_err_ratelimited("redistributor failed to %s...\n",
314 enable ? "wakeup" : "sleep");
318 * Routines to disable, enable, EOI and route interrupts
320 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
322 switch (get_intid_range(d)) {
330 * Contrary to the ESPI range, the EPPI range is contiguous
331 * to the PPI range in the registers, so let's adjust the
332 * displacement accordingly. Consistency is overrated.
334 *index = d->hwirq - EPPI_BASE_INTID + 32;
337 *index = d->hwirq - ESPI_BASE_INTID;
340 return GICD_ISENABLERnE;
342 return GICD_ICENABLERnE;
344 return GICD_ISPENDRnE;
346 return GICD_ICPENDRnE;
348 return GICD_ISACTIVERnE;
350 return GICD_ICACTIVERnE;
351 case GICD_IPRIORITYR:
352 return GICD_IPRIORITYRnE;
356 return GICD_IROUTERnE;
370 static int gic_peek_irq(struct irq_data *d, u32 offset)
375 offset = convert_offset_index(d, offset, &index);
376 mask = 1 << (index % 32);
378 if (gic_irq_in_rdist(d))
379 base = gic_data_rdist_sgi_base();
381 base = gic_dist_base_alias(d);
383 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
386 static void gic_poke_irq(struct irq_data *d, u32 offset)
391 offset = convert_offset_index(d, offset, &index);
392 mask = 1 << (index % 32);
394 if (gic_irq_in_rdist(d))
395 base = gic_data_rdist_sgi_base();
397 base = gic_data.dist_base;
399 writel_relaxed(mask, base + offset + (index / 32) * 4);
402 static void gic_mask_irq(struct irq_data *d)
404 gic_poke_irq(d, GICD_ICENABLER);
405 if (gic_irq_in_rdist(d))
406 gic_redist_wait_for_rwp();
408 gic_dist_wait_for_rwp();
411 static void gic_eoimode1_mask_irq(struct irq_data *d)
415 * When masking a forwarded interrupt, make sure it is
416 * deactivated as well.
418 * This ensures that an interrupt that is getting
419 * disabled/masked will not get "stuck", because there is
420 * noone to deactivate it (guest is being terminated).
422 if (irqd_is_forwarded_to_vcpu(d))
423 gic_poke_irq(d, GICD_ICACTIVER);
426 static void gic_unmask_irq(struct irq_data *d)
428 gic_poke_irq(d, GICD_ISENABLER);
431 static inline bool gic_supports_nmi(void)
433 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
434 static_branch_likely(&supports_pseudo_nmis);
437 static int gic_irq_set_irqchip_state(struct irq_data *d,
438 enum irqchip_irq_state which, bool val)
442 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
446 case IRQCHIP_STATE_PENDING:
447 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
450 case IRQCHIP_STATE_ACTIVE:
451 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
454 case IRQCHIP_STATE_MASKED:
459 reg = GICD_ISENABLER;
466 gic_poke_irq(d, reg);
470 static int gic_irq_get_irqchip_state(struct irq_data *d,
471 enum irqchip_irq_state which, bool *val)
473 if (d->hwirq >= 8192) /* PPI/SPI only */
477 case IRQCHIP_STATE_PENDING:
478 *val = gic_peek_irq(d, GICD_ISPENDR);
481 case IRQCHIP_STATE_ACTIVE:
482 *val = gic_peek_irq(d, GICD_ISACTIVER);
485 case IRQCHIP_STATE_MASKED:
486 *val = !gic_peek_irq(d, GICD_ISENABLER);
496 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
498 void __iomem *base = gic_dist_base(d);
501 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
503 writeb_relaxed(prio, base + offset + index);
506 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
508 switch (__get_intid_range(hwirq)) {
512 return hwirq - EPPI_BASE_INTID + 16;
518 static u32 gic_get_ppi_index(struct irq_data *d)
520 return __gic_get_ppi_index(d->hwirq);
523 static int gic_irq_nmi_setup(struct irq_data *d)
525 struct irq_desc *desc = irq_to_desc(d->irq);
527 if (!gic_supports_nmi())
530 if (gic_peek_irq(d, GICD_ISENABLER)) {
531 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
536 * A secondary irq_chip should be in charge of LPI request,
537 * it should not be possible to get there
539 if (WARN_ON(gic_irq(d) >= 8192))
542 /* desc lock should already be held */
543 if (gic_irq_in_rdist(d)) {
544 u32 idx = gic_get_ppi_index(d);
546 /* Setting up PPI as NMI, only switch handler for first NMI */
547 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
548 refcount_set(&ppi_nmi_refs[idx], 1);
549 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
552 desc->handle_irq = handle_fasteoi_nmi;
555 gic_irq_set_prio(d, GICD_INT_NMI_PRI);
560 static void gic_irq_nmi_teardown(struct irq_data *d)
562 struct irq_desc *desc = irq_to_desc(d->irq);
564 if (WARN_ON(!gic_supports_nmi()))
567 if (gic_peek_irq(d, GICD_ISENABLER)) {
568 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
573 * A secondary irq_chip should be in charge of LPI request,
574 * it should not be possible to get there
576 if (WARN_ON(gic_irq(d) >= 8192))
579 /* desc lock should already be held */
580 if (gic_irq_in_rdist(d)) {
581 u32 idx = gic_get_ppi_index(d);
583 /* Tearing down NMI, only switch handler for last NMI */
584 if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
585 desc->handle_irq = handle_percpu_devid_irq;
587 desc->handle_irq = handle_fasteoi_irq;
590 gic_irq_set_prio(d, GICD_INT_DEF_PRI);
593 static void gic_eoi_irq(struct irq_data *d)
595 write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
599 static void gic_eoimode1_eoi_irq(struct irq_data *d)
602 * No need to deactivate an LPI, or an interrupt that
603 * is is getting forwarded to a vcpu.
605 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
607 gic_write_dir(gic_irq(d));
610 static int gic_set_type(struct irq_data *d, unsigned int type)
612 enum gic_intid_range range;
613 unsigned int irq = gic_irq(d);
618 range = get_intid_range(d);
620 /* Interrupt configuration for SGIs can't be changed */
621 if (range == SGI_RANGE)
622 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
624 /* SPIs have restrictions on the supported types */
625 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
626 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
629 if (gic_irq_in_rdist(d))
630 base = gic_data_rdist_sgi_base();
632 base = gic_dist_base_alias(d);
634 offset = convert_offset_index(d, GICD_ICFGR, &index);
636 ret = gic_configure_irq(index, type, base + offset, NULL);
637 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
638 /* Misconfigured PPIs are usually not fatal */
639 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
646 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
648 if (get_intid_range(d) == SGI_RANGE)
652 irqd_set_forwarded_to_vcpu(d);
654 irqd_clr_forwarded_to_vcpu(d);
658 static u64 gic_mpidr_to_affinity(unsigned long mpidr)
662 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
663 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
664 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
665 MPIDR_AFFINITY_LEVEL(mpidr, 0));
670 static void gic_deactivate_unhandled(u32 irqnr)
672 if (static_branch_likely(&supports_deactivate_key)) {
674 gic_write_dir(irqnr);
676 write_gicreg(irqnr, ICC_EOIR1_EL1);
682 * Follow a read of the IAR with any HW maintenance that needs to happen prior
683 * to invoking the relevant IRQ handler. We must do two things:
685 * (1) Ensure instruction ordering between a read of IAR and subsequent
686 * instructions in the IRQ handler using an ISB.
688 * It is possible for the IAR to report an IRQ which was signalled *after*
689 * the CPU took an IRQ exception as multiple interrupts can race to be
690 * recognized by the GIC, earlier interrupts could be withdrawn, and/or
691 * later interrupts could be prioritized by the GIC.
693 * For devices which are tightly coupled to the CPU, such as PMUs, a
694 * context synchronization event is necessary to ensure that system
695 * register state is not stale, as these may have been indirectly written
696 * *after* exception entry.
698 * (2) Deactivate the interrupt when EOI mode 1 is in use.
700 static inline void gic_complete_ack(u32 irqnr)
702 if (static_branch_likely(&supports_deactivate_key))
703 write_gicreg(irqnr, ICC_EOIR1_EL1);
708 static bool gic_rpr_is_nmi_prio(void)
710 if (!gic_supports_nmi())
713 return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
716 static bool gic_irqnr_is_special(u32 irqnr)
718 return irqnr >= 1020 && irqnr <= 1023;
721 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
723 if (gic_irqnr_is_special(irqnr))
726 gic_complete_ack(irqnr);
728 if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
729 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
730 gic_deactivate_unhandled(irqnr);
734 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
736 if (gic_irqnr_is_special(irqnr))
739 gic_complete_ack(irqnr);
741 if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
742 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
743 gic_deactivate_unhandled(irqnr);
748 * An exception has been taken from a context with IRQs enabled, and this could
749 * be an IRQ or an NMI.
751 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
752 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
753 * after handling any NMI but before handling any IRQ.
755 * The entry code has performed IRQ entry, and if an NMI is detected we must
756 * perform NMI entry/exit around invoking the handler.
758 static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
763 irqnr = gic_read_iar();
765 is_nmi = gic_rpr_is_nmi_prio();
769 __gic_handle_nmi(irqnr, regs);
773 if (gic_prio_masking_enabled()) {
775 gic_arch_enable_irqs();
779 __gic_handle_irq(irqnr, regs);
783 * An exception has been taken from a context with IRQs disabled, which can only
786 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
787 * DAIF.IF (and ICC_PMR_EL1) unchanged.
789 * The entry code has performed NMI entry.
791 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
797 * We were in a context with IRQs disabled. However, the
798 * entry code has set PMR to a value that allows any
799 * interrupt to be acknowledged, and not just NMIs. This can
800 * lead to surprising effects if the NMI has been retired in
801 * the meantime, and that there is an IRQ pending. The IRQ
802 * would then be taken in NMI context, something that nobody
803 * wants to debug twice.
805 * Until we sort this, drop PMR again to a level that will
806 * actually only allow NMIs before reading IAR, and then
807 * restore it to what it was.
809 pmr = gic_read_pmr();
812 irqnr = gic_read_iar();
815 __gic_handle_nmi(irqnr, regs);
818 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
820 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
821 __gic_handle_irq_from_irqsoff(regs);
823 __gic_handle_irq_from_irqson(regs);
826 static u32 gic_get_pribits(void)
830 pribits = gic_read_ctlr();
831 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
832 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
838 static bool gic_has_group0(void)
843 old_pmr = gic_read_pmr();
846 * Let's find out if Group0 is under control of EL3 or not by
847 * setting the highest possible, non-zero priority in PMR.
849 * If SCR_EL3.FIQ is set, the priority gets shifted down in
850 * order for the CPU interface to set bit 7, and keep the
851 * actual priority in the non-secure range. In the process, it
852 * looses the least significant bit and the actual priority
853 * becomes 0x80. Reading it back returns 0, indicating that
854 * we're don't have access to Group0.
856 gic_write_pmr(BIT(8 - gic_get_pribits()));
857 val = gic_read_pmr();
859 gic_write_pmr(old_pmr);
864 static void __init gic_dist_init(void)
868 void __iomem *base = gic_data.dist_base;
871 /* Disable the distributor */
872 writel_relaxed(0, base + GICD_CTLR);
873 gic_dist_wait_for_rwp();
876 * Configure SPIs as non-secure Group-1. This will only matter
877 * if the GIC only has a single security state. This will not
878 * do the right thing if the kernel is running in secure mode,
879 * but that's not the intended use case anyway.
881 for (i = 32; i < GIC_LINE_NR; i += 32)
882 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
884 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
885 for (i = 0; i < GIC_ESPI_NR; i += 32) {
886 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
887 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
890 for (i = 0; i < GIC_ESPI_NR; i += 32)
891 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
893 for (i = 0; i < GIC_ESPI_NR; i += 16)
894 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
896 for (i = 0; i < GIC_ESPI_NR; i += 4)
897 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
899 /* Now do the common stuff */
900 gic_dist_config(base, GIC_LINE_NR, NULL);
902 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
903 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
904 pr_info("Enabling SGIs without active state\n");
905 val |= GICD_CTLR_nASSGIreq;
908 /* Enable distributor with ARE, Group1, and wait for it to drain */
909 writel_relaxed(val, base + GICD_CTLR);
910 gic_dist_wait_for_rwp();
913 * Set all global interrupts to the boot CPU only. ARE must be
916 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
917 for (i = 32; i < GIC_LINE_NR; i++)
918 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
920 for (i = 0; i < GIC_ESPI_NR; i++)
921 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
924 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
929 for (i = 0; i < gic_data.nr_redist_regions; i++) {
930 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
934 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
935 if (reg != GIC_PIDR2_ARCH_GICv3 &&
936 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
937 pr_warn("No redistributor present @%p\n", ptr);
942 typer = gic_read_typer(ptr + GICR_TYPER);
943 ret = fn(gic_data.redist_regions + i, ptr);
947 if (gic_data.redist_regions[i].single_redist)
950 if (gic_data.redist_stride) {
951 ptr += gic_data.redist_stride;
953 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
954 if (typer & GICR_TYPER_VLPIS)
955 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
957 } while (!(typer & GICR_TYPER_LAST));
960 return ret ? -ENODEV : 0;
963 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
965 unsigned long mpidr = cpu_logical_map(smp_processor_id());
970 * Convert affinity to a 32bit value that can be matched to
971 * GICR_TYPER bits [63:32].
973 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
974 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
975 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
976 MPIDR_AFFINITY_LEVEL(mpidr, 0));
978 typer = gic_read_typer(ptr + GICR_TYPER);
979 if ((typer >> 32) == aff) {
980 u64 offset = ptr - region->redist_base;
981 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
982 gic_data_rdist_rd_base() = ptr;
983 gic_data_rdist()->phys_base = region->phys_base + offset;
985 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
986 smp_processor_id(), mpidr,
987 (int)(region - gic_data.redist_regions),
988 &gic_data_rdist()->phys_base);
996 static int gic_populate_rdist(void)
998 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
1001 /* We couldn't even deal with ourselves... */
1002 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
1004 (unsigned long)cpu_logical_map(smp_processor_id()));
1008 static int __gic_update_rdist_properties(struct redist_region *region,
1011 u64 typer = gic_read_typer(ptr + GICR_TYPER);
1012 u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
1014 /* Boot-time cleanup */
1015 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
1018 /* Deactivate any present vPE */
1019 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
1020 if (val & GICR_VPENDBASER_Valid)
1021 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
1022 ptr + SZ_128K + GICR_VPENDBASER);
1024 /* Mark the VPE table as invalid */
1025 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
1026 val &= ~GICR_VPROPBASER_4_1_VALID;
1027 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
1030 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
1033 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
1034 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
1035 * that the ITS driver can make use of for LPIs (and not VLPIs).
1037 * These are 3 different ways to express the same thing, depending
1038 * on the revision of the architecture and its relaxations over
1039 * time. Just group them under the 'direct_lpi' banner.
1041 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1042 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
1043 !!(ctlr & GICR_CTLR_IR) |
1044 gic_data.rdists.has_rvpeid);
1045 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
1047 /* Detect non-sensical configurations */
1048 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1049 gic_data.rdists.has_direct_lpi = false;
1050 gic_data.rdists.has_vlpis = false;
1051 gic_data.rdists.has_rvpeid = false;
1054 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
1059 static void gic_update_rdist_properties(void)
1061 gic_data.ppi_nr = UINT_MAX;
1062 gic_iterate_rdists(__gic_update_rdist_properties);
1063 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1064 gic_data.ppi_nr = 0;
1065 pr_info("GICv3 features: %d PPIs%s%s\n",
1067 gic_data.has_rss ? ", RSS" : "",
1068 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1070 if (gic_data.rdists.has_vlpis)
1071 pr_info("GICv4 features: %s%s%s\n",
1072 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1073 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1074 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
1077 /* Check whether it's single security state view */
1078 static inline bool gic_dist_security_disabled(void)
1080 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1083 static void gic_cpu_sys_reg_init(void)
1085 int i, cpu = smp_processor_id();
1086 u64 mpidr = cpu_logical_map(cpu);
1087 u64 need_rss = MPIDR_RS(mpidr);
1092 * Need to check that the SRE bit has actually been set. If
1093 * not, it means that SRE is disabled at EL2. We're going to
1094 * die painfully, and there is nothing we can do about it.
1096 * Kindly inform the luser.
1098 if (!gic_enable_sre())
1099 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1101 pribits = gic_get_pribits();
1103 group0 = gic_has_group0();
1105 /* Set priority mask register */
1106 if (!gic_prio_masking_enabled()) {
1107 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1108 } else if (gic_supports_nmi()) {
1110 * Mismatch configuration with boot CPU, the system is likely
1111 * to die as interrupt masking will not work properly on all
1114 * The boot CPU calls this function before enabling NMI support,
1115 * and as a result we'll never see this warning in the boot path
1118 if (static_branch_unlikely(&gic_nonsecure_priorities))
1119 WARN_ON(!group0 || gic_dist_security_disabled());
1121 WARN_ON(group0 && !gic_dist_security_disabled());
1125 * Some firmwares hand over to the kernel with the BPR changed from
1126 * its reset value (and with a value large enough to prevent
1127 * any pre-emptive interrupts from working at all). Writing a zero
1128 * to BPR restores is reset value.
1132 if (static_branch_likely(&supports_deactivate_key)) {
1133 /* EOI drops priority only (mode 1) */
1134 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1136 /* EOI deactivates interrupt too (mode 0) */
1137 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1140 /* Always whack Group0 before Group1 */
1145 write_gicreg(0, ICC_AP0R3_EL1);
1146 write_gicreg(0, ICC_AP0R2_EL1);
1149 write_gicreg(0, ICC_AP0R1_EL1);
1153 write_gicreg(0, ICC_AP0R0_EL1);
1162 write_gicreg(0, ICC_AP1R3_EL1);
1163 write_gicreg(0, ICC_AP1R2_EL1);
1166 write_gicreg(0, ICC_AP1R1_EL1);
1170 write_gicreg(0, ICC_AP1R0_EL1);
1175 /* ... and let's hit the road... */
1176 gic_write_grpen1(1);
1178 /* Keep the RSS capability status in per_cpu variable */
1179 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1181 /* Check all the CPUs have capable of sending SGIs to other CPUs */
1182 for_each_online_cpu(i) {
1183 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1185 need_rss |= MPIDR_RS(cpu_logical_map(i));
1186 if (need_rss && (!have_rss))
1187 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1188 cpu, (unsigned long)mpidr,
1189 i, (unsigned long)cpu_logical_map(i));
1193 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1194 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1195 * UNPREDICTABLE choice of :
1196 * - The write is ignored.
1197 * - The RS field is treated as 0.
1199 if (need_rss && (!gic_data.has_rss))
1200 pr_crit_once("RSS is required but GICD doesn't support it\n");
1203 static bool gicv3_nolpi;
1205 static int __init gicv3_nolpi_cfg(char *buf)
1207 return kstrtobool(buf, &gicv3_nolpi);
1209 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1211 static int gic_dist_supports_lpis(void)
1213 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1214 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1218 static void gic_cpu_init(void)
1220 void __iomem *rbase;
1223 /* Register ourselves with the rest of the world */
1224 if (gic_populate_rdist())
1227 gic_enable_redist(true);
1229 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1230 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1231 "Distributor has extended ranges, but CPU%d doesn't\n",
1232 smp_processor_id());
1234 rbase = gic_data_rdist_sgi_base();
1236 /* Configure SGIs/PPIs as non-secure Group-1 */
1237 for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1238 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1240 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1242 /* initialise system registers */
1243 gic_cpu_sys_reg_init();
1248 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1249 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1251 static int gic_starting_cpu(unsigned int cpu)
1255 if (gic_dist_supports_lpis())
1261 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1262 unsigned long cluster_id)
1264 int next_cpu, cpu = *base_cpu;
1265 unsigned long mpidr = cpu_logical_map(cpu);
1268 while (cpu < nr_cpu_ids) {
1269 tlist |= 1 << (mpidr & 0xf);
1271 next_cpu = cpumask_next(cpu, mask);
1272 if (next_cpu >= nr_cpu_ids)
1276 mpidr = cpu_logical_map(cpu);
1278 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1288 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1289 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1290 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1292 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1296 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1297 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1298 irq << ICC_SGI1R_SGI_ID_SHIFT |
1299 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1300 MPIDR_TO_SGI_RS(cluster_id) |
1301 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1303 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1304 gic_write_sgi1r(val);
1307 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1311 if (WARN_ON(d->hwirq >= 16))
1315 * Ensure that stores to Normal memory are visible to the
1316 * other CPUs before issuing the IPI.
1320 for_each_cpu(cpu, mask) {
1321 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1324 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1325 gic_send_sgi(cluster_id, tlist, d->hwirq);
1328 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1332 static void __init gic_smp_init(void)
1334 struct irq_fwspec sgi_fwspec = {
1335 .fwnode = gic_data.fwnode,
1340 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1341 "irqchip/arm/gicv3:starting",
1342 gic_starting_cpu, NULL);
1344 /* Register all 8 non-secure SGIs */
1345 base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
1346 if (WARN_ON(base_sgi <= 0))
1349 set_smp_ipi_range(base_sgi, 8);
1352 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1362 cpu = cpumask_first(mask_val);
1364 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1366 if (cpu >= nr_cpu_ids)
1369 if (gic_irq_in_rdist(d))
1372 /* If interrupt was enabled, disable it first */
1373 enabled = gic_peek_irq(d, GICD_ISENABLER);
1377 offset = convert_offset_index(d, GICD_IROUTER, &index);
1378 reg = gic_dist_base(d) + offset + (index * 8);
1379 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1381 gic_write_irouter(val, reg);
1384 * If the interrupt was enabled, enabled it again. Otherwise,
1385 * just wait for the distributor to have digested our changes.
1390 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1392 return IRQ_SET_MASK_OK_DONE;
1395 #define gic_set_affinity NULL
1396 #define gic_ipi_send_mask NULL
1397 #define gic_smp_init() do { } while(0)
1400 static int gic_retrigger(struct irq_data *data)
1402 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1405 #ifdef CONFIG_CPU_PM
1406 static int gic_cpu_pm_notifier(struct notifier_block *self,
1407 unsigned long cmd, void *v)
1409 if (cmd == CPU_PM_EXIT) {
1410 if (gic_dist_security_disabled())
1411 gic_enable_redist(true);
1412 gic_cpu_sys_reg_init();
1413 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1414 gic_write_grpen1(0);
1415 gic_enable_redist(false);
1420 static struct notifier_block gic_cpu_pm_notifier_block = {
1421 .notifier_call = gic_cpu_pm_notifier,
1424 static void gic_cpu_pm_init(void)
1426 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1430 static inline void gic_cpu_pm_init(void) { }
1431 #endif /* CONFIG_CPU_PM */
1433 static struct irq_chip gic_chip = {
1435 .irq_mask = gic_mask_irq,
1436 .irq_unmask = gic_unmask_irq,
1437 .irq_eoi = gic_eoi_irq,
1438 .irq_set_type = gic_set_type,
1439 .irq_set_affinity = gic_set_affinity,
1440 .irq_retrigger = gic_retrigger,
1441 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1442 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1443 .irq_nmi_setup = gic_irq_nmi_setup,
1444 .irq_nmi_teardown = gic_irq_nmi_teardown,
1445 .ipi_send_mask = gic_ipi_send_mask,
1446 .flags = IRQCHIP_SET_TYPE_MASKED |
1447 IRQCHIP_SKIP_SET_WAKE |
1448 IRQCHIP_MASK_ON_SUSPEND,
1451 static struct irq_chip gic_eoimode1_chip = {
1453 .irq_mask = gic_eoimode1_mask_irq,
1454 .irq_unmask = gic_unmask_irq,
1455 .irq_eoi = gic_eoimode1_eoi_irq,
1456 .irq_set_type = gic_set_type,
1457 .irq_set_affinity = gic_set_affinity,
1458 .irq_retrigger = gic_retrigger,
1459 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1460 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1461 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1462 .irq_nmi_setup = gic_irq_nmi_setup,
1463 .irq_nmi_teardown = gic_irq_nmi_teardown,
1464 .ipi_send_mask = gic_ipi_send_mask,
1465 .flags = IRQCHIP_SET_TYPE_MASKED |
1466 IRQCHIP_SKIP_SET_WAKE |
1467 IRQCHIP_MASK_ON_SUSPEND,
1470 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1473 struct irq_chip *chip = &gic_chip;
1474 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1476 if (static_branch_likely(&supports_deactivate_key))
1477 chip = &gic_eoimode1_chip;
1479 switch (__get_intid_range(hw)) {
1483 irq_set_percpu_devid(irq);
1484 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1485 handle_percpu_devid_irq, NULL, NULL);
1490 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1491 handle_fasteoi_irq, NULL, NULL);
1493 irqd_set_single_target(irqd);
1497 if (!gic_dist_supports_lpis())
1499 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1500 handle_fasteoi_irq, NULL, NULL);
1507 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1508 irqd_set_handle_enforce_irqctx(irqd);
1512 static int gic_irq_domain_translate(struct irq_domain *d,
1513 struct irq_fwspec *fwspec,
1514 unsigned long *hwirq,
1517 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1518 *hwirq = fwspec->param[0];
1519 *type = IRQ_TYPE_EDGE_RISING;
1523 if (is_of_node(fwspec->fwnode)) {
1524 if (fwspec->param_count < 3)
1527 switch (fwspec->param[0]) {
1529 *hwirq = fwspec->param[1] + 32;
1532 *hwirq = fwspec->param[1] + 16;
1535 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1538 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1540 case GIC_IRQ_TYPE_LPI: /* LPI */
1541 *hwirq = fwspec->param[1];
1543 case GIC_IRQ_TYPE_PARTITION:
1544 *hwirq = fwspec->param[1];
1545 if (fwspec->param[1] >= 16)
1546 *hwirq += EPPI_BASE_INTID - 16;
1554 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1557 * Make it clear that broken DTs are... broken.
1558 * Partitioned PPIs are an unfortunate exception.
1560 WARN_ON(*type == IRQ_TYPE_NONE &&
1561 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1565 if (is_fwnode_irqchip(fwspec->fwnode)) {
1566 if(fwspec->param_count != 2)
1569 if (fwspec->param[0] < 16) {
1570 pr_err(FW_BUG "Illegal GSI%d translation request\n",
1575 *hwirq = fwspec->param[0];
1576 *type = fwspec->param[1];
1578 WARN_ON(*type == IRQ_TYPE_NONE);
1585 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1586 unsigned int nr_irqs, void *arg)
1589 irq_hw_number_t hwirq;
1590 unsigned int type = IRQ_TYPE_NONE;
1591 struct irq_fwspec *fwspec = arg;
1593 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1597 for (i = 0; i < nr_irqs; i++) {
1598 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1606 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1607 unsigned int nr_irqs)
1611 for (i = 0; i < nr_irqs; i++) {
1612 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1613 irq_set_handler(virq + i, NULL);
1614 irq_domain_reset_irq_data(d);
1618 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1619 irq_hw_number_t hwirq)
1621 enum gic_intid_range range;
1623 if (!gic_data.ppi_descs)
1626 if (!is_of_node(fwspec->fwnode))
1629 if (fwspec->param_count < 4 || !fwspec->param[3])
1632 range = __get_intid_range(hwirq);
1633 if (range != PPI_RANGE && range != EPPI_RANGE)
1639 static int gic_irq_domain_select(struct irq_domain *d,
1640 struct irq_fwspec *fwspec,
1641 enum irq_domain_bus_token bus_token)
1643 unsigned int type, ret, ppi_idx;
1644 irq_hw_number_t hwirq;
1647 if (fwspec->fwnode != d->fwnode)
1650 /* If this is not DT, then we have a single domain */
1651 if (!is_of_node(fwspec->fwnode))
1654 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1655 if (WARN_ON_ONCE(ret))
1658 if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1659 return d == gic_data.domain;
1662 * If this is a PPI and we have a 4th (non-null) parameter,
1663 * then we need to match the partition domain.
1665 ppi_idx = __gic_get_ppi_index(hwirq);
1666 return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
1669 static const struct irq_domain_ops gic_irq_domain_ops = {
1670 .translate = gic_irq_domain_translate,
1671 .alloc = gic_irq_domain_alloc,
1672 .free = gic_irq_domain_free,
1673 .select = gic_irq_domain_select,
1676 static int partition_domain_translate(struct irq_domain *d,
1677 struct irq_fwspec *fwspec,
1678 unsigned long *hwirq,
1681 unsigned long ppi_intid;
1682 struct device_node *np;
1683 unsigned int ppi_idx;
1686 if (!gic_data.ppi_descs)
1689 np = of_find_node_by_phandle(fwspec->param[3]);
1693 ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1694 if (WARN_ON_ONCE(ret))
1697 ppi_idx = __gic_get_ppi_index(ppi_intid);
1698 ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
1699 of_node_to_fwnode(np));
1704 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1709 static const struct irq_domain_ops partition_domain_ops = {
1710 .translate = partition_domain_translate,
1711 .select = gic_irq_domain_select,
1714 static bool gic_enable_quirk_msm8996(void *data)
1716 struct gic_chip_data *d = data;
1718 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1723 static bool gic_enable_quirk_cavium_38539(void *data)
1725 struct gic_chip_data *d = data;
1727 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1732 static bool gic_enable_quirk_hip06_07(void *data)
1734 struct gic_chip_data *d = data;
1737 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1738 * not being an actual ARM implementation). The saving grace is
1739 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1740 * HIP07 doesn't even have a proper IIDR, and still pretends to
1741 * have ESPI. In both cases, put them right.
1743 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1744 /* Zero both ESPI and the RES0 field next to it... */
1745 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1752 #define T241_CHIPN_MASK GENMASK_ULL(45, 44)
1753 #define T241_CHIP_GICDA_OFFSET 0x1580000
1754 #define SMCCC_SOC_ID_T241 0x036b0241
1756 static bool gic_enable_quirk_nvidia_t241(void *data)
1758 s32 soc_id = arm_smccc_get_soc_id_version();
1759 unsigned long chip_bmask = 0;
1763 /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
1764 if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241))
1767 /* Find the chips based on GICR regions PHYS addr */
1768 for (i = 0; i < gic_data.nr_redist_regions; i++) {
1769 chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
1770 (u64)gic_data.redist_regions[i].phys_base));
1773 if (hweight32(chip_bmask) < 3)
1776 /* Setup GICD alias regions */
1777 for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) {
1778 if (chip_bmask & BIT(i)) {
1779 phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
1780 phys |= FIELD_PREP(T241_CHIPN_MASK, i);
1781 t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
1782 WARN_ON_ONCE(!t241_dist_base_alias[i]);
1785 static_branch_enable(&gic_nvidia_t241_erratum);
1789 static const struct gic_quirk gic_quirks[] = {
1791 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1792 .compatible = "qcom,msm8996-gic-v3",
1793 .init = gic_enable_quirk_msm8996,
1796 .desc = "GICv3: HIP06 erratum 161010803",
1799 .init = gic_enable_quirk_hip06_07,
1802 .desc = "GICv3: HIP07 erratum 161010803",
1805 .init = gic_enable_quirk_hip06_07,
1809 * Reserved register accesses generate a Synchronous
1810 * External Abort. This erratum applies to:
1811 * - ThunderX: CN88xx
1812 * - OCTEON TX: CN83xx, CN81xx
1813 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1815 .desc = "GICv3: Cavium erratum 38539",
1818 .init = gic_enable_quirk_cavium_38539,
1821 .desc = "GICv3: NVIDIA erratum T241-FABRIC-4",
1824 .init = gic_enable_quirk_nvidia_t241,
1830 static void gic_enable_nmi_support(void)
1834 if (!gic_prio_masking_enabled())
1837 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1841 for (i = 0; i < gic_data.ppi_nr; i++)
1842 refcount_set(&ppi_nmi_refs[i], 0);
1844 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1845 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
1848 * How priority values are used by the GIC depends on two things:
1849 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1850 * and if Group 0 interrupts can be delivered to Linux in the non-secure
1851 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1852 * ICC_PMR_EL1 register and the priority that software assigns to
1855 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1856 * -----------------------------------------------------------
1857 * 1 | - | unchanged | unchanged
1858 * -----------------------------------------------------------
1859 * 0 | 1 | non-secure | non-secure
1860 * -----------------------------------------------------------
1861 * 0 | 0 | unchanged | non-secure
1863 * where non-secure means that the value is right-shifted by one and the
1864 * MSB bit set, to make it fit in the non-secure priority range.
1866 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1867 * are both either modified or unchanged, we can use the same set of
1870 * In the last case, where only the interrupt priorities are modified to
1871 * be in the non-secure range, we use a different PMR value to mask IRQs
1872 * and the rest of the values that we use remain unchanged.
1874 if (gic_has_group0() && !gic_dist_security_disabled())
1875 static_branch_enable(&gic_nonsecure_priorities);
1877 static_branch_enable(&supports_pseudo_nmis);
1879 if (static_branch_likely(&supports_deactivate_key))
1880 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1882 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1885 static int __init gic_init_bases(phys_addr_t dist_phys_base,
1886 void __iomem *dist_base,
1887 struct redist_region *rdist_regs,
1888 u32 nr_redist_regions,
1890 struct fwnode_handle *handle)
1895 if (!is_hyp_mode_available())
1896 static_branch_disable(&supports_deactivate_key);
1898 if (static_branch_likely(&supports_deactivate_key))
1899 pr_info("GIC: Using split EOI/Deactivate mode\n");
1901 gic_data.fwnode = handle;
1902 gic_data.dist_phys_base = dist_phys_base;
1903 gic_data.dist_base = dist_base;
1904 gic_data.redist_regions = rdist_regs;
1905 gic_data.nr_redist_regions = nr_redist_regions;
1906 gic_data.redist_stride = redist_stride;
1909 * Find out how many interrupts are supported.
1911 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1912 gic_data.rdists.gicd_typer = typer;
1914 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1915 gic_quirks, &gic_data);
1917 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1918 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1921 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1922 * architecture spec (which says that reserved registers are RES0).
1924 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1925 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1927 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1929 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1930 if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) {
1931 /* Disable GICv4.x features for the erratum T241-FABRIC-4 */
1932 gic_data.rdists.has_rvpeid = true;
1933 gic_data.rdists.has_vlpis = true;
1934 gic_data.rdists.has_direct_lpi = true;
1935 gic_data.rdists.has_vpend_valid_dirty = true;
1938 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1943 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1945 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1947 if (typer & GICD_TYPER_MBIS) {
1948 err = mbi_init(handle, gic_data.domain);
1950 pr_err("Failed to initialize MBIs\n");
1953 set_handle_irq(gic_handle_irq);
1955 gic_update_rdist_properties();
1962 if (gic_dist_supports_lpis()) {
1963 its_init(handle, &gic_data.rdists, gic_data.domain);
1965 its_lpi_memreserve_init();
1967 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1968 gicv2m_init(handle, gic_data.domain);
1971 gic_enable_nmi_support();
1976 if (gic_data.domain)
1977 irq_domain_remove(gic_data.domain);
1978 free_percpu(gic_data.rdists.rdist);
1982 static int __init gic_validate_dist_version(void __iomem *dist_base)
1984 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1986 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1992 /* Create all possible partitions at boot time */
1993 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1995 struct device_node *parts_node, *child_part;
1996 int part_idx = 0, i;
1998 struct partition_affinity *parts;
2000 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
2004 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
2005 if (!gic_data.ppi_descs)
2008 nr_parts = of_get_child_count(parts_node);
2013 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
2014 if (WARN_ON(!parts))
2017 for_each_child_of_node(parts_node, child_part) {
2018 struct partition_affinity *part;
2021 part = &parts[part_idx];
2023 part->partition_id = of_node_to_fwnode(child_part);
2025 pr_info("GIC: PPI partition %pOFn[%d] { ",
2026 child_part, part_idx);
2028 n = of_property_count_elems_of_size(child_part, "affinity",
2032 for (i = 0; i < n; i++) {
2035 struct device_node *cpu_node;
2037 err = of_property_read_u32_index(child_part, "affinity",
2042 cpu_node = of_find_node_by_phandle(cpu_phandle);
2043 if (WARN_ON(!cpu_node))
2046 cpu = of_cpu_node_to_id(cpu_node);
2047 if (WARN_ON(cpu < 0)) {
2048 of_node_put(cpu_node);
2052 pr_cont("%pOF[%d] ", cpu_node, cpu);
2054 cpumask_set_cpu(cpu, &part->mask);
2055 of_node_put(cpu_node);
2062 for (i = 0; i < gic_data.ppi_nr; i++) {
2064 struct partition_desc *desc;
2065 struct irq_fwspec ppi_fwspec = {
2066 .fwnode = gic_data.fwnode,
2069 [0] = GIC_IRQ_TYPE_PARTITION,
2071 [2] = IRQ_TYPE_NONE,
2075 irq = irq_create_fwspec_mapping(&ppi_fwspec);
2078 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
2079 irq, &partition_domain_ops);
2083 gic_data.ppi_descs[i] = desc;
2087 of_node_put(parts_node);
2090 static void __init gic_of_setup_kvm_info(struct device_node *node)
2096 gic_v3_kvm_info.type = GIC_V3;
2098 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2099 if (!gic_v3_kvm_info.maint_irq)
2102 if (of_property_read_u32(node, "#redistributor-regions",
2106 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
2107 ret = of_address_to_resource(node, gicv_idx, &r);
2109 gic_v3_kvm_info.vcpu = r;
2111 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2112 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2113 vgic_set_kvm_info(&gic_v3_kvm_info);
2116 static void gic_request_region(resource_size_t base, resource_size_t size,
2119 if (!request_mem_region(base, size, name))
2120 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2124 static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2125 const char *name, struct resource *res)
2130 ret = of_address_to_resource(node, idx, res);
2132 return IOMEM_ERR_PTR(ret);
2134 gic_request_region(res->start, resource_size(res), name);
2135 base = of_iomap(node, idx);
2137 return base ?: IOMEM_ERR_PTR(-ENOMEM);
2140 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2142 phys_addr_t dist_phys_base;
2143 void __iomem *dist_base;
2144 struct redist_region *rdist_regs;
2145 struct resource res;
2147 u32 nr_redist_regions;
2150 dist_base = gic_of_iomap(node, 0, "GICD", &res);
2151 if (IS_ERR(dist_base)) {
2152 pr_err("%pOF: unable to map gic dist registers\n", node);
2153 return PTR_ERR(dist_base);
2156 dist_phys_base = res.start;
2158 err = gic_validate_dist_version(dist_base);
2160 pr_err("%pOF: no distributor detected, giving up\n", node);
2161 goto out_unmap_dist;
2164 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2165 nr_redist_regions = 1;
2167 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
2171 goto out_unmap_dist;
2174 for (i = 0; i < nr_redist_regions; i++) {
2175 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2176 if (IS_ERR(rdist_regs[i].redist_base)) {
2177 pr_err("%pOF: couldn't map region %d\n", node, i);
2179 goto out_unmap_rdist;
2181 rdist_regs[i].phys_base = res.start;
2184 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2187 gic_enable_of_quirks(node, gic_quirks, &gic_data);
2189 err = gic_init_bases(dist_phys_base, dist_base, rdist_regs,
2190 nr_redist_regions, redist_stride, &node->fwnode);
2192 goto out_unmap_rdist;
2194 gic_populate_ppi_partitions(node);
2196 if (static_branch_likely(&supports_deactivate_key))
2197 gic_of_setup_kvm_info(node);
2201 for (i = 0; i < nr_redist_regions; i++)
2202 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
2203 iounmap(rdist_regs[i].redist_base);
2210 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2215 void __iomem *dist_base;
2216 struct redist_region *redist_regs;
2217 u32 nr_redist_regions;
2222 phys_addr_t vcpu_base;
2223 } acpi_data __initdata;
2226 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2228 static int count = 0;
2230 acpi_data.redist_regs[count].phys_base = phys_base;
2231 acpi_data.redist_regs[count].redist_base = redist_base;
2232 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2237 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2238 const unsigned long end)
2240 struct acpi_madt_generic_redistributor *redist =
2241 (struct acpi_madt_generic_redistributor *)header;
2242 void __iomem *redist_base;
2244 redist_base = ioremap(redist->base_address, redist->length);
2246 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2249 gic_request_region(redist->base_address, redist->length, "GICR");
2251 gic_acpi_register_redist(redist->base_address, redist_base);
2256 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2257 const unsigned long end)
2259 struct acpi_madt_generic_interrupt *gicc =
2260 (struct acpi_madt_generic_interrupt *)header;
2261 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2262 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2263 void __iomem *redist_base;
2265 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2266 if (!(gicc->flags & ACPI_MADT_ENABLED))
2269 redist_base = ioremap(gicc->gicr_base_address, size);
2272 gic_request_region(gicc->gicr_base_address, size, "GICR");
2274 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2278 static int __init gic_acpi_collect_gicr_base(void)
2280 acpi_tbl_entry_handler redist_parser;
2281 enum acpi_madt_type type;
2283 if (acpi_data.single_redist) {
2284 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2285 redist_parser = gic_acpi_parse_madt_gicc;
2287 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2288 redist_parser = gic_acpi_parse_madt_redist;
2291 /* Collect redistributor base addresses in GICR entries */
2292 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2295 pr_info("No valid GICR entries exist\n");
2299 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2300 const unsigned long end)
2302 /* Subtable presence means that redist exists, that's it */
2306 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2307 const unsigned long end)
2309 struct acpi_madt_generic_interrupt *gicc =
2310 (struct acpi_madt_generic_interrupt *)header;
2313 * If GICC is enabled and has valid gicr base address, then it means
2314 * GICR base is presented via GICC
2316 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2317 acpi_data.enabled_rdists++;
2322 * It's perfectly valid firmware can pass disabled GICC entry, driver
2323 * should not treat as errors, skip the entry instead of probe fail.
2325 if (!(gicc->flags & ACPI_MADT_ENABLED))
2331 static int __init gic_acpi_count_gicr_regions(void)
2336 * Count how many redistributor regions we have. It is not allowed
2337 * to mix redistributor description, GICR and GICC subtables have to be
2338 * mutually exclusive.
2340 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2341 gic_acpi_match_gicr, 0);
2343 acpi_data.single_redist = false;
2347 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2348 gic_acpi_match_gicc, 0);
2350 acpi_data.single_redist = true;
2351 count = acpi_data.enabled_rdists;
2357 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2358 struct acpi_probe_entry *ape)
2360 struct acpi_madt_generic_distributor *dist;
2363 dist = (struct acpi_madt_generic_distributor *)header;
2364 if (dist->version != ape->driver_data)
2367 /* We need to do that exercise anyway, the sooner the better */
2368 count = gic_acpi_count_gicr_regions();
2372 acpi_data.nr_redist_regions = count;
2376 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2377 const unsigned long end)
2379 struct acpi_madt_generic_interrupt *gicc =
2380 (struct acpi_madt_generic_interrupt *)header;
2382 static int first_madt = true;
2384 /* Skip unusable CPUs */
2385 if (!(gicc->flags & ACPI_MADT_ENABLED))
2388 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2389 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2394 acpi_data.maint_irq = gicc->vgic_interrupt;
2395 acpi_data.maint_irq_mode = maint_irq_mode;
2396 acpi_data.vcpu_base = gicc->gicv_base_address;
2402 * The maintenance interrupt and GICV should be the same for every CPU
2404 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2405 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2406 (acpi_data.vcpu_base != gicc->gicv_base_address))
2412 static bool __init gic_acpi_collect_virt_info(void)
2416 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2417 gic_acpi_parse_virt_madt_gicc, 0);
2422 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2423 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2424 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2426 static void __init gic_acpi_setup_kvm_info(void)
2430 if (!gic_acpi_collect_virt_info()) {
2431 pr_warn("Unable to get hardware information used for virtualization\n");
2435 gic_v3_kvm_info.type = GIC_V3;
2437 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2438 acpi_data.maint_irq_mode,
2443 gic_v3_kvm_info.maint_irq = irq;
2445 if (acpi_data.vcpu_base) {
2446 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2448 vcpu->flags = IORESOURCE_MEM;
2449 vcpu->start = acpi_data.vcpu_base;
2450 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2453 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2454 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2455 vgic_set_kvm_info(&gic_v3_kvm_info);
2458 static struct fwnode_handle *gsi_domain_handle;
2460 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2462 return gsi_domain_handle;
2466 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2468 struct acpi_madt_generic_distributor *dist;
2472 /* Get distributor base address */
2473 dist = (struct acpi_madt_generic_distributor *)header;
2474 acpi_data.dist_base = ioremap(dist->base_address,
2475 ACPI_GICV3_DIST_MEM_SIZE);
2476 if (!acpi_data.dist_base) {
2477 pr_err("Unable to map GICD registers\n");
2480 gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
2482 err = gic_validate_dist_version(acpi_data.dist_base);
2484 pr_err("No distributor detected at @%p, giving up\n",
2485 acpi_data.dist_base);
2486 goto out_dist_unmap;
2489 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2490 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2491 if (!acpi_data.redist_regs) {
2493 goto out_dist_unmap;
2496 err = gic_acpi_collect_gicr_base();
2498 goto out_redist_unmap;
2500 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2501 if (!gsi_domain_handle) {
2503 goto out_redist_unmap;
2506 err = gic_init_bases(dist->base_address, acpi_data.dist_base,
2507 acpi_data.redist_regs, acpi_data.nr_redist_regions,
2508 0, gsi_domain_handle);
2510 goto out_fwhandle_free;
2512 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
2514 if (static_branch_likely(&supports_deactivate_key))
2515 gic_acpi_setup_kvm_info();
2520 irq_domain_free_fwnode(gsi_domain_handle);
2522 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2523 if (acpi_data.redist_regs[i].redist_base)
2524 iounmap(acpi_data.redist_regs[i].redist_base);
2525 kfree(acpi_data.redist_regs);
2527 iounmap(acpi_data.dist_base);
2530 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2531 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2533 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2534 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2536 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2537 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,