1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 * Interrupt architecture for the GIC:
7 * o There is one Interrupt Distributor, which receives interrupts
8 * from system devices and sends them to the Interrupt Controllers.
10 * o There is one CPU Interface per CPU, which sends interrupts sent
11 * by the Distributor, and interrupts generated locally, to the
12 * associated CPU. The base address of the CPU interface is usually
13 * aliased so that the same address points to different chips depending
14 * on the CPU it is accessed from.
16 * Note that IRQs 0-31 are special - they are local to each CPU.
17 * As such, the enable set/clear, pending set/clear and active bit
18 * registers are banked per-cpu for these sources.
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/kstrtox.h>
23 #include <linux/err.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/smp.h>
27 #include <linux/cpu.h>
28 #include <linux/cpu_pm.h>
29 #include <linux/cpumask.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 #include <linux/acpi.h>
35 #include <linux/irqdomain.h>
36 #include <linux/interrupt.h>
37 #include <linux/percpu.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/irqchip.h>
41 #include <linux/irqchip/chained_irq.h>
42 #include <linux/irqchip/arm-gic.h>
44 #include <asm/cputype.h>
46 #include <asm/exception.h>
47 #include <asm/smp_plat.h>
50 #include "irq-gic-common.h"
53 #include <asm/cpufeature.h>
55 static void gic_check_cpu_features(void)
57 WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
58 TAINT_CPU_OUT_OF_SPEC,
59 "GICv3 system registers enabled, broken firmware!\n");
62 #define gic_check_cpu_features() do { } while(0)
66 void __iomem *common_base;
67 void __percpu * __iomem *percpu_base;
70 struct gic_chip_data {
71 union gic_base dist_base;
72 union gic_base cpu_base;
73 void __iomem *raw_dist_base;
74 void __iomem *raw_cpu_base;
76 #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
77 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
78 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
79 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
80 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
81 u32 __percpu *saved_ppi_enable;
82 u32 __percpu *saved_ppi_active;
83 u32 __percpu *saved_ppi_conf;
85 struct irq_domain *domain;
86 unsigned int gic_irqs;
89 #ifdef CONFIG_BL_SWITCHER
91 static DEFINE_RAW_SPINLOCK(cpu_map_lock);
93 #define gic_lock_irqsave(f) \
94 raw_spin_lock_irqsave(&cpu_map_lock, (f))
95 #define gic_unlock_irqrestore(f) \
96 raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
98 #define gic_lock() raw_spin_lock(&cpu_map_lock)
99 #define gic_unlock() raw_spin_unlock(&cpu_map_lock)
103 #define gic_lock_irqsave(f) do { (void)(f); } while(0)
104 #define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
106 #define gic_lock() do { } while(0)
107 #define gic_unlock() do { } while(0)
111 static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
114 * The GIC mapping of CPU interfaces does not necessarily match
115 * the logical CPU numbering. Let's use a mapping as returned
118 #define NR_GIC_CPU_IF 8
119 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
121 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
123 static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
125 static struct gic_kvm_info gic_v2_kvm_info __initdata;
127 static DEFINE_PER_CPU(u32, sgi_intid);
129 #ifdef CONFIG_GIC_NON_BANKED
130 static DEFINE_STATIC_KEY_FALSE(frankengic_key);
132 static void enable_frankengic(void)
134 static_branch_enable(&frankengic_key);
137 static inline void __iomem *__get_base(union gic_base *base)
139 if (static_branch_unlikely(&frankengic_key))
140 return raw_cpu_read(*base->percpu_base);
142 return base->common_base;
145 #define gic_data_dist_base(d) __get_base(&(d)->dist_base)
146 #define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
148 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
149 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
150 #define enable_frankengic() do { } while(0)
153 static inline void __iomem *gic_dist_base(struct irq_data *d)
155 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
156 return gic_data_dist_base(gic_data);
159 static inline void __iomem *gic_cpu_base(struct irq_data *d)
161 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
162 return gic_data_cpu_base(gic_data);
165 static inline unsigned int gic_irq(struct irq_data *d)
170 static inline bool cascading_gic_irq(struct irq_data *d)
172 void *data = irq_data_get_irq_handler_data(d);
175 * If handler_data is set, this is a cascading interrupt, and
176 * it cannot possibly be forwarded.
182 * Routines to acknowledge, disable and enable interrupts
184 static void gic_poke_irq(struct irq_data *d, u32 offset)
186 u32 mask = 1 << (gic_irq(d) % 32);
187 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
190 static int gic_peek_irq(struct irq_data *d, u32 offset)
192 u32 mask = 1 << (gic_irq(d) % 32);
193 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
196 static void gic_mask_irq(struct irq_data *d)
198 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
201 static void gic_eoimode1_mask_irq(struct irq_data *d)
205 * When masking a forwarded interrupt, make sure it is
206 * deactivated as well.
208 * This ensures that an interrupt that is getting
209 * disabled/masked will not get "stuck", because there is
210 * noone to deactivate it (guest is being terminated).
212 if (irqd_is_forwarded_to_vcpu(d))
213 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
216 static void gic_unmask_irq(struct irq_data *d)
218 gic_poke_irq(d, GIC_DIST_ENABLE_SET);
221 static void gic_eoi_irq(struct irq_data *d)
223 u32 hwirq = gic_irq(d);
226 hwirq = this_cpu_read(sgi_intid);
228 writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
231 static void gic_eoimode1_eoi_irq(struct irq_data *d)
233 u32 hwirq = gic_irq(d);
235 /* Do not deactivate an IRQ forwarded to a vcpu. */
236 if (irqd_is_forwarded_to_vcpu(d))
240 hwirq = this_cpu_read(sgi_intid);
242 writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
245 static int gic_irq_set_irqchip_state(struct irq_data *d,
246 enum irqchip_irq_state which, bool val)
251 case IRQCHIP_STATE_PENDING:
252 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
255 case IRQCHIP_STATE_ACTIVE:
256 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
259 case IRQCHIP_STATE_MASKED:
260 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
267 gic_poke_irq(d, reg);
271 static int gic_irq_get_irqchip_state(struct irq_data *d,
272 enum irqchip_irq_state which, bool *val)
275 case IRQCHIP_STATE_PENDING:
276 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
279 case IRQCHIP_STATE_ACTIVE:
280 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
283 case IRQCHIP_STATE_MASKED:
284 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
294 static int gic_set_type(struct irq_data *d, unsigned int type)
296 void __iomem *base = gic_dist_base(d);
297 unsigned int gicirq = gic_irq(d);
300 /* Interrupt configuration for SGIs can't be changed */
302 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
304 /* SPIs have restrictions on the supported types */
305 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
306 type != IRQ_TYPE_EDGE_RISING)
309 ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
310 if (ret && gicirq < 32) {
311 /* Misconfigured PPIs are usually not fatal */
312 pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
319 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
321 /* Only interrupts on the primary GIC can be forwarded to a vcpu. */
322 if (cascading_gic_irq(d) || gic_irq(d) < 16)
326 irqd_set_forwarded_to_vcpu(d);
328 irqd_clr_forwarded_to_vcpu(d);
332 static int gic_retrigger(struct irq_data *data)
334 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
337 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
340 struct gic_chip_data *gic = &gic_data[0];
341 void __iomem *cpu_base = gic_data_cpu_base(gic);
344 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
345 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
347 if (unlikely(irqnr >= 1020))
350 if (static_branch_likely(&supports_deactivate_key))
351 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
355 * Ensure any shared data written by the CPU sending the IPI
356 * is read after we've read the ACK register on the GIC.
358 * Pairs with the write barrier in gic_ipi_send_mask
364 * The GIC encodes the source CPU in GICC_IAR,
365 * leading to the deactivation to fail if not
366 * written back as is to GICC_EOI. Stash the INTID
367 * away for gic_eoi_irq() to write back. This only
368 * works because we don't nest SGIs...
370 this_cpu_write(sgi_intid, irqstat);
373 generic_handle_domain_irq(gic->domain, irqnr);
377 static void gic_handle_cascade_irq(struct irq_desc *desc)
379 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
380 struct irq_chip *chip = irq_desc_get_chip(desc);
381 unsigned int gic_irq;
382 unsigned long status;
385 chained_irq_enter(chip, desc);
387 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
389 gic_irq = (status & GICC_IAR_INT_ID_MASK);
390 if (gic_irq == GICC_INT_SPURIOUS)
394 ret = generic_handle_domain_irq(chip_data->domain, gic_irq);
396 handle_bad_irq(desc);
398 chained_irq_exit(chip, desc);
401 static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
403 struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
405 if (gic->domain->pm_dev)
406 seq_printf(p, gic->domain->pm_dev->of_node->name);
408 seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
411 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
413 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
414 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
418 static u8 gic_get_cpumask(struct gic_chip_data *gic)
420 void __iomem *base = gic_data_dist_base(gic);
423 for (i = mask = 0; i < 32; i += 4) {
424 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
431 if (!mask && num_possible_cpus() > 1)
432 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
437 static bool gic_check_gicv2(void __iomem *base)
439 u32 val = readl_relaxed(base + GIC_CPU_IDENT);
440 return (val & 0xff0fff) == 0x02043B;
443 static void gic_cpu_if_up(struct gic_chip_data *gic)
445 void __iomem *cpu_base = gic_data_cpu_base(gic);
450 if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
451 mode = GIC_CPU_CTRL_EOImodeNS;
453 if (gic_check_gicv2(cpu_base))
454 for (i = 0; i < 4; i++)
455 writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
458 * Preserve bypass disable bits to be written back later
460 bypass = readl(cpu_base + GIC_CPU_CTRL);
461 bypass &= GICC_DIS_BYPASS_MASK;
463 writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
467 static void gic_dist_init(struct gic_chip_data *gic)
471 unsigned int gic_irqs = gic->gic_irqs;
472 void __iomem *base = gic_data_dist_base(gic);
474 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
477 * Set all global interrupts to this CPU only.
479 cpumask = gic_get_cpumask(gic);
480 cpumask |= cpumask << 8;
481 cpumask |= cpumask << 16;
482 for (i = 32; i < gic_irqs; i += 4)
483 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
485 gic_dist_config(base, gic_irqs, NULL);
487 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
490 static int gic_cpu_init(struct gic_chip_data *gic)
492 void __iomem *dist_base = gic_data_dist_base(gic);
493 void __iomem *base = gic_data_cpu_base(gic);
494 unsigned int cpu_mask, cpu = smp_processor_id();
498 * Setting up the CPU map is only relevant for the primary GIC
499 * because any nested/secondary GICs do not directly interface
502 if (gic == &gic_data[0]) {
504 * Get what the GIC says our CPU mask is.
506 if (WARN_ON(cpu >= NR_GIC_CPU_IF))
509 gic_check_cpu_features();
510 cpu_mask = gic_get_cpumask(gic);
511 gic_cpu_map[cpu] = cpu_mask;
514 * Clear our mask from the other map entries in case they're
517 for (i = 0; i < NR_GIC_CPU_IF; i++)
519 gic_cpu_map[i] &= ~cpu_mask;
522 gic_cpu_config(dist_base, 32, NULL);
524 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
530 int gic_cpu_if_down(unsigned int gic_nr)
532 void __iomem *cpu_base;
535 if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
538 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
539 val = readl(cpu_base + GIC_CPU_CTRL);
541 writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
546 #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
548 * Saves the GIC distributor registers during suspend or idle. Must be called
549 * with interrupts disabled but before powering down the GIC. After calling
550 * this function, no interrupts will be delivered by the GIC, and another
551 * platform-specific wakeup source must be enabled.
553 void gic_dist_save(struct gic_chip_data *gic)
555 unsigned int gic_irqs;
556 void __iomem *dist_base;
562 gic_irqs = gic->gic_irqs;
563 dist_base = gic_data_dist_base(gic);
568 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
569 gic->saved_spi_conf[i] =
570 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
572 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
573 gic->saved_spi_target[i] =
574 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
576 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
577 gic->saved_spi_enable[i] =
578 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
580 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
581 gic->saved_spi_active[i] =
582 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
586 * Restores the GIC distributor registers during resume or when coming out of
587 * idle. Must be called before enabling interrupts. If a level interrupt
588 * that occurred while the GIC was suspended is still present, it will be
589 * handled normally, but any edge interrupts that occurred will not be seen by
590 * the GIC and need to be handled by the platform-specific wakeup source.
592 void gic_dist_restore(struct gic_chip_data *gic)
594 unsigned int gic_irqs;
596 void __iomem *dist_base;
601 gic_irqs = gic->gic_irqs;
602 dist_base = gic_data_dist_base(gic);
607 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
609 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
610 writel_relaxed(gic->saved_spi_conf[i],
611 dist_base + GIC_DIST_CONFIG + i * 4);
613 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
614 writel_relaxed(GICD_INT_DEF_PRI_X4,
615 dist_base + GIC_DIST_PRI + i * 4);
617 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
618 writel_relaxed(gic->saved_spi_target[i],
619 dist_base + GIC_DIST_TARGET + i * 4);
621 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
622 writel_relaxed(GICD_INT_EN_CLR_X32,
623 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
624 writel_relaxed(gic->saved_spi_enable[i],
625 dist_base + GIC_DIST_ENABLE_SET + i * 4);
628 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
629 writel_relaxed(GICD_INT_EN_CLR_X32,
630 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
631 writel_relaxed(gic->saved_spi_active[i],
632 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
635 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
638 void gic_cpu_save(struct gic_chip_data *gic)
642 void __iomem *dist_base;
643 void __iomem *cpu_base;
648 dist_base = gic_data_dist_base(gic);
649 cpu_base = gic_data_cpu_base(gic);
651 if (!dist_base || !cpu_base)
654 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
655 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
656 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
658 ptr = raw_cpu_ptr(gic->saved_ppi_active);
659 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
660 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
662 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
663 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
664 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
668 void gic_cpu_restore(struct gic_chip_data *gic)
672 void __iomem *dist_base;
673 void __iomem *cpu_base;
678 dist_base = gic_data_dist_base(gic);
679 cpu_base = gic_data_cpu_base(gic);
681 if (!dist_base || !cpu_base)
684 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
685 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
686 writel_relaxed(GICD_INT_EN_CLR_X32,
687 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
688 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
691 ptr = raw_cpu_ptr(gic->saved_ppi_active);
692 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
693 writel_relaxed(GICD_INT_EN_CLR_X32,
694 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
695 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
698 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
699 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
700 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
702 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
703 writel_relaxed(GICD_INT_DEF_PRI_X4,
704 dist_base + GIC_DIST_PRI + i * 4);
706 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
710 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
714 for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
717 gic_cpu_save(&gic_data[i]);
719 case CPU_PM_ENTER_FAILED:
721 gic_cpu_restore(&gic_data[i]);
723 case CPU_CLUSTER_PM_ENTER:
724 gic_dist_save(&gic_data[i]);
726 case CPU_CLUSTER_PM_ENTER_FAILED:
727 case CPU_CLUSTER_PM_EXIT:
728 gic_dist_restore(&gic_data[i]);
736 static struct notifier_block gic_notifier_block = {
737 .notifier_call = gic_notifier,
740 static int gic_pm_init(struct gic_chip_data *gic)
742 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
744 if (WARN_ON(!gic->saved_ppi_enable))
747 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
749 if (WARN_ON(!gic->saved_ppi_active))
750 goto free_ppi_enable;
752 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
754 if (WARN_ON(!gic->saved_ppi_conf))
755 goto free_ppi_active;
757 if (gic == &gic_data[0])
758 cpu_pm_register_notifier(&gic_notifier_block);
763 free_percpu(gic->saved_ppi_active);
765 free_percpu(gic->saved_ppi_enable);
770 static int gic_pm_init(struct gic_chip_data *gic)
777 static void rmw_writeb(u8 bval, void __iomem *addr)
779 static DEFINE_RAW_SPINLOCK(rmw_lock);
780 unsigned long offset = (unsigned long)addr & 3UL;
781 unsigned long shift = offset * 8;
785 raw_spin_lock_irqsave(&rmw_lock, flags);
788 val = readl_relaxed(addr);
789 val &= ~GENMASK(shift + 7, shift);
790 val |= bval << shift;
791 writel_relaxed(val, addr);
793 raw_spin_unlock_irqrestore(&rmw_lock, flags);
796 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
799 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
800 struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
803 if (unlikely(gic != &gic_data[0]))
807 cpu = cpumask_any_and(mask_val, cpu_online_mask);
809 cpu = cpumask_first(mask_val);
811 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
814 if (static_branch_unlikely(&needs_rmw_access))
815 rmw_writeb(gic_cpu_map[cpu], reg);
817 writeb_relaxed(gic_cpu_map[cpu], reg);
818 irq_data_update_effective_affinity(d, cpumask_of(cpu));
820 return IRQ_SET_MASK_OK_DONE;
823 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
826 unsigned long flags, map = 0;
828 if (unlikely(nr_cpu_ids == 1)) {
829 /* Only one CPU? let's do a self-IPI... */
830 writel_relaxed(2 << 24 | d->hwirq,
831 gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
835 gic_lock_irqsave(flags);
837 /* Convert our logical CPU mask into a physical one. */
838 for_each_cpu(cpu, mask)
839 map |= gic_cpu_map[cpu];
842 * Ensure that stores to Normal memory are visible to the
843 * other CPUs before they observe us issuing the IPI.
847 /* this always happens on GIC0 */
848 writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
850 gic_unlock_irqrestore(flags);
853 static int gic_starting_cpu(unsigned int cpu)
855 gic_cpu_init(&gic_data[0]);
859 static __init void gic_smp_init(void)
861 struct irq_fwspec sgi_fwspec = {
862 .fwnode = gic_data[0].domain->fwnode,
867 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
868 "irqchip/arm/gic:starting",
869 gic_starting_cpu, NULL);
871 base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
872 NUMA_NO_NODE, &sgi_fwspec,
874 if (WARN_ON(base_sgi <= 0))
877 set_smp_ipi_range(base_sgi, 8);
880 #define gic_smp_init() do { } while(0)
881 #define gic_set_affinity NULL
882 #define gic_ipi_send_mask NULL
885 static const struct irq_chip gic_chip = {
886 .irq_mask = gic_mask_irq,
887 .irq_unmask = gic_unmask_irq,
888 .irq_eoi = gic_eoi_irq,
889 .irq_set_type = gic_set_type,
890 .irq_retrigger = gic_retrigger,
891 .irq_set_affinity = gic_set_affinity,
892 .ipi_send_mask = gic_ipi_send_mask,
893 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
894 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
895 .irq_print_chip = gic_irq_print_chip,
896 .flags = IRQCHIP_SET_TYPE_MASKED |
897 IRQCHIP_SKIP_SET_WAKE |
898 IRQCHIP_MASK_ON_SUSPEND,
901 static const struct irq_chip gic_chip_mode1 = {
903 .irq_mask = gic_eoimode1_mask_irq,
904 .irq_unmask = gic_unmask_irq,
905 .irq_eoi = gic_eoimode1_eoi_irq,
906 .irq_set_type = gic_set_type,
907 .irq_retrigger = gic_retrigger,
908 .irq_set_affinity = gic_set_affinity,
909 .ipi_send_mask = gic_ipi_send_mask,
910 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
911 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
912 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
913 .flags = IRQCHIP_SET_TYPE_MASKED |
914 IRQCHIP_SKIP_SET_WAKE |
915 IRQCHIP_MASK_ON_SUSPEND,
918 #ifdef CONFIG_BL_SWITCHER
920 * gic_send_sgi - send a SGI directly to given CPU interface number
922 * cpu_id: the ID for the destination CPU interface
923 * irq: the IPI number to send a SGI for
925 void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
927 BUG_ON(cpu_id >= NR_GIC_CPU_IF);
928 cpu_id = 1 << cpu_id;
929 /* this always happens on GIC0 */
930 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
934 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
936 * @cpu: the logical CPU number to get the GIC ID for.
938 * Return the CPU interface ID for the given logical CPU number,
939 * or -1 if the CPU number is too large or the interface ID is
940 * unknown (more than one bit set).
942 int gic_get_cpu_id(unsigned int cpu)
944 unsigned int cpu_bit;
946 if (cpu >= NR_GIC_CPU_IF)
948 cpu_bit = gic_cpu_map[cpu];
949 if (cpu_bit & (cpu_bit - 1))
951 return __ffs(cpu_bit);
955 * gic_migrate_target - migrate IRQs to another CPU interface
957 * @new_cpu_id: the CPU target ID to migrate IRQs to
959 * Migrate all peripheral interrupts with a target matching the current CPU
960 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
961 * is also updated. Targets to other CPU interfaces are unchanged.
962 * This must be called with IRQs locally disabled.
964 void gic_migrate_target(unsigned int new_cpu_id)
966 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
967 void __iomem *dist_base;
968 int i, ror_val, cpu = smp_processor_id();
969 u32 val, cur_target_mask, active_mask;
971 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
973 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
976 gic_irqs = gic_data[gic_nr].gic_irqs;
978 cur_cpu_id = __ffs(gic_cpu_map[cpu]);
979 cur_target_mask = 0x01010101 << cur_cpu_id;
980 ror_val = (cur_cpu_id - new_cpu_id) & 31;
984 /* Update the target interface for this logical CPU */
985 gic_cpu_map[cpu] = 1 << new_cpu_id;
988 * Find all the peripheral interrupts targeting the current
989 * CPU interface and migrate them to the new CPU interface.
990 * We skip DIST_TARGET 0 to 7 as they are read-only.
992 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
993 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
994 active_mask = val & cur_target_mask;
997 val |= ror32(active_mask, ror_val);
998 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
1005 * Now let's migrate and clear any potential SGIs that might be
1006 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
1007 * is a banked register, we can only forward the SGI using
1008 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
1009 * doesn't use that information anyway.
1011 * For the same reason we do not adjust SGI source information
1012 * for previously sent SGIs by us to other CPUs either.
1014 for (i = 0; i < 16; i += 4) {
1016 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
1019 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
1020 for (j = i; j < i + 4; j++) {
1022 writel_relaxed((1 << (new_cpu_id + 16)) | j,
1023 dist_base + GIC_DIST_SOFTINT);
1030 * gic_get_sgir_physaddr - get the physical address for the SGI register
1032 * Return the physical address of the SGI register to be used
1033 * by some early assembly code when the kernel is not yet available.
1035 static unsigned long gic_dist_physaddr;
1037 unsigned long gic_get_sgir_physaddr(void)
1039 if (!gic_dist_physaddr)
1041 return gic_dist_physaddr + GIC_DIST_SOFTINT;
1044 static void __init gic_init_physaddr(struct device_node *node)
1046 struct resource res;
1047 if (of_address_to_resource(node, 0, &res) == 0) {
1048 gic_dist_physaddr = res.start;
1049 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
1054 #define gic_init_physaddr(node) do { } while (0)
1057 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1060 struct gic_chip_data *gic = d->host_data;
1061 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1062 const struct irq_chip *chip;
1064 chip = (static_branch_likely(&supports_deactivate_key) &&
1065 gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
1069 irq_set_percpu_devid(irq);
1070 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1071 handle_percpu_devid_irq, NULL, NULL);
1074 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1075 handle_fasteoi_irq, NULL, NULL);
1077 irqd_set_single_target(irqd);
1081 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1082 irqd_set_handle_enforce_irqctx(irqd);
1086 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
1090 static int gic_irq_domain_translate(struct irq_domain *d,
1091 struct irq_fwspec *fwspec,
1092 unsigned long *hwirq,
1095 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1096 *hwirq = fwspec->param[0];
1097 *type = IRQ_TYPE_EDGE_RISING;
1101 if (is_of_node(fwspec->fwnode)) {
1102 if (fwspec->param_count < 3)
1105 switch (fwspec->param[0]) {
1107 *hwirq = fwspec->param[1] + 32;
1110 *hwirq = fwspec->param[1] + 16;
1116 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1118 /* Make it clear that broken DTs are... broken */
1119 WARN(*type == IRQ_TYPE_NONE,
1120 "HW irq %ld has invalid type\n", *hwirq);
1124 if (is_fwnode_irqchip(fwspec->fwnode)) {
1125 if(fwspec->param_count != 2)
1128 if (fwspec->param[0] < 16) {
1129 pr_err(FW_BUG "Illegal GSI%d translation request\n",
1134 *hwirq = fwspec->param[0];
1135 *type = fwspec->param[1];
1137 WARN(*type == IRQ_TYPE_NONE,
1138 "HW irq %ld has invalid type\n", *hwirq);
1145 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1146 unsigned int nr_irqs, void *arg)
1149 irq_hw_number_t hwirq;
1150 unsigned int type = IRQ_TYPE_NONE;
1151 struct irq_fwspec *fwspec = arg;
1153 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1157 for (i = 0; i < nr_irqs; i++) {
1158 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1166 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
1167 .translate = gic_irq_domain_translate,
1168 .alloc = gic_irq_domain_alloc,
1169 .free = irq_domain_free_irqs_top,
1172 static const struct irq_domain_ops gic_irq_domain_ops = {
1173 .map = gic_irq_domain_map,
1174 .unmap = gic_irq_domain_unmap,
1177 static int gic_init_bases(struct gic_chip_data *gic,
1178 struct fwnode_handle *handle)
1182 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1183 /* Frankein-GIC without banked registers... */
1186 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
1187 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
1188 if (WARN_ON(!gic->dist_base.percpu_base ||
1189 !gic->cpu_base.percpu_base)) {
1194 for_each_possible_cpu(cpu) {
1195 u32 mpidr = cpu_logical_map(cpu);
1196 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
1197 unsigned long offset = gic->percpu_offset * core_id;
1198 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
1199 gic->raw_dist_base + offset;
1200 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
1201 gic->raw_cpu_base + offset;
1204 enable_frankengic();
1206 /* Normal, sane GIC... */
1207 WARN(gic->percpu_offset,
1208 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
1209 gic->percpu_offset);
1210 gic->dist_base.common_base = gic->raw_dist_base;
1211 gic->cpu_base.common_base = gic->raw_cpu_base;
1215 * Find out how many interrupts are supported.
1216 * The GIC only supports up to 1020 interrupt sources.
1218 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
1219 gic_irqs = (gic_irqs + 1) * 32;
1220 if (gic_irqs > 1020)
1222 gic->gic_irqs = gic_irqs;
1224 if (handle) { /* DT/ACPI */
1225 gic->domain = irq_domain_create_linear(handle, gic_irqs,
1226 &gic_irq_domain_hierarchy_ops,
1228 } else { /* Legacy support */
1230 * For primary GICs, skip over SGIs.
1231 * No secondary GIC support whatsoever.
1235 gic_irqs -= 16; /* calculate # of irqs to allocate */
1237 irq_base = irq_alloc_descs(16, 16, gic_irqs,
1240 WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
1244 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
1245 16, &gic_irq_domain_ops, gic);
1248 if (WARN_ON(!gic->domain)) {
1254 ret = gic_cpu_init(gic);
1258 ret = gic_pm_init(gic);
1265 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1266 free_percpu(gic->dist_base.percpu_base);
1267 free_percpu(gic->cpu_base.percpu_base);
1273 static int __init __gic_init_bases(struct gic_chip_data *gic,
1274 struct fwnode_handle *handle)
1278 if (WARN_ON(!gic || gic->domain))
1281 if (gic == &gic_data[0]) {
1283 * Initialize the CPU interface map to all CPUs.
1284 * It will be refined as each CPU probes its ID.
1285 * This is only necessary for the primary GIC.
1287 for (i = 0; i < NR_GIC_CPU_IF; i++)
1288 gic_cpu_map[i] = 0xff;
1290 set_handle_irq(gic_handle_irq);
1291 if (static_branch_likely(&supports_deactivate_key))
1292 pr_info("GIC: Using split EOI/Deactivate mode\n");
1295 ret = gic_init_bases(gic, handle);
1296 if (gic == &gic_data[0])
1302 void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
1304 struct gic_chip_data *gic;
1307 * Non-DT/ACPI systems won't run a hypervisor, so let's not
1308 * bother with these...
1310 static_branch_disable(&supports_deactivate_key);
1313 gic->raw_dist_base = dist_base;
1314 gic->raw_cpu_base = cpu_base;
1316 __gic_init_bases(gic, NULL);
1319 static void gic_teardown(struct gic_chip_data *gic)
1324 if (gic->raw_dist_base)
1325 iounmap(gic->raw_dist_base);
1326 if (gic->raw_cpu_base)
1327 iounmap(gic->raw_cpu_base);
1331 static int gic_cnt __initdata;
1332 static bool gicv2_force_probe;
1334 static int __init gicv2_force_probe_cfg(char *buf)
1336 return kstrtobool(buf, &gicv2_force_probe);
1338 early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
1340 static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1342 struct resource cpuif_res;
1344 of_address_to_resource(node, 1, &cpuif_res);
1346 if (!is_hyp_mode_available())
1348 if (resource_size(&cpuif_res) < SZ_8K) {
1351 * Check for a stupid firmware that only exposes the
1352 * first page of a GICv2.
1354 if (!gic_check_gicv2(*base))
1357 if (!gicv2_force_probe) {
1358 pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
1362 alt = ioremap(cpuif_res.start, SZ_8K);
1365 if (!gic_check_gicv2(alt + SZ_4K)) {
1367 * The first page was that of a GICv2, and
1368 * the second was *something*. Let's trust it
1369 * to be a GICv2, and update the mapping.
1371 pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
1379 * We detected *two* initial GICv2 pages in a
1380 * row. Could be a GICv2 aliased over two 64kB
1381 * pages. Update the resource, map the iospace, and
1385 alt = ioremap(cpuif_res.start, SZ_128K);
1388 pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
1390 cpuif_res.end = cpuif_res.start + SZ_128K -1;
1394 if (resource_size(&cpuif_res) == SZ_128K) {
1396 * Verify that we have the first 4kB of a GICv2
1397 * aliased over the first 64kB by checking the
1398 * GICC_IIDR register on both ends.
1400 if (!gic_check_gicv2(*base) ||
1401 !gic_check_gicv2(*base + 0xf000))
1405 * Move the base up by 60kB, so that we have a 8kB
1406 * contiguous region, which allows us to use GICC_DIR
1407 * at its normal offset. Please pass me that bucket.
1410 cpuif_res.start += 0xf000;
1411 pr_warn("GIC: Adjusting CPU interface base to %pa\n",
1418 static bool gic_enable_rmw_access(void *data)
1421 * The EMEV2 class of machines has a broken interconnect, and
1422 * locks up on accesses that are less than 32bit. So far, only
1423 * the affinity setting requires it.
1425 if (of_machine_is_compatible("renesas,emev2")) {
1426 static_branch_enable(&needs_rmw_access);
1433 static const struct gic_quirk gic_quirks[] = {
1435 .desc = "broken byte access",
1436 .compatible = "arm,pl390",
1437 .init = gic_enable_rmw_access,
1442 static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
1447 gic->raw_dist_base = of_iomap(node, 0);
1448 if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
1451 gic->raw_cpu_base = of_iomap(node, 1);
1452 if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
1455 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
1456 gic->percpu_offset = 0;
1458 gic_enable_of_quirks(node, gic_quirks, gic);
1468 int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1472 if (!dev || !dev->of_node || !gic || !irq)
1475 *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
1479 ret = gic_of_setup(*gic, dev->of_node);
1483 ret = gic_init_bases(*gic, &dev->of_node->fwnode);
1489 irq_domain_set_pm_device((*gic)->domain, dev);
1490 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
1495 static void __init gic_of_setup_kvm_info(struct device_node *node)
1498 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
1499 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
1501 gic_v2_kvm_info.type = GIC_V2;
1503 gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1504 if (!gic_v2_kvm_info.maint_irq)
1507 ret = of_address_to_resource(node, 2, vctrl_res);
1511 ret = of_address_to_resource(node, 3, vcpu_res);
1515 if (static_branch_likely(&supports_deactivate_key))
1516 vgic_set_kvm_info(&gic_v2_kvm_info);
1520 gic_of_init(struct device_node *node, struct device_node *parent)
1522 struct gic_chip_data *gic;
1528 if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
1531 gic = &gic_data[gic_cnt];
1533 ret = gic_of_setup(gic, node);
1538 * Disable split EOI/Deactivate if either HYP is not available
1539 * or the CPU interface is too small.
1541 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
1542 static_branch_disable(&supports_deactivate_key);
1544 ret = __gic_init_bases(gic, &node->fwnode);
1551 gic_init_physaddr(node);
1552 gic_of_setup_kvm_info(node);
1556 irq = irq_of_parse_and_map(node, 0);
1557 gic_cascade_irq(gic_cnt, irq);
1560 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1561 gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
1566 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1567 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
1568 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
1569 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1570 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1571 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1572 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1573 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1574 IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
1576 int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1585 phys_addr_t cpu_phys_base;
1588 phys_addr_t vctrl_base;
1589 phys_addr_t vcpu_base;
1590 } acpi_data __initdata;
1593 gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header,
1594 const unsigned long end)
1596 struct acpi_madt_generic_interrupt *processor;
1597 phys_addr_t gic_cpu_base;
1598 static int cpu_base_assigned;
1600 processor = (struct acpi_madt_generic_interrupt *)header;
1602 if (BAD_MADT_GICC_ENTRY(processor, end))
1606 * There is no support for non-banked GICv1/2 register in ACPI spec.
1607 * All CPU interface addresses have to be the same.
1609 gic_cpu_base = processor->base_address;
1610 if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
1613 acpi_data.cpu_phys_base = gic_cpu_base;
1614 acpi_data.maint_irq = processor->vgic_interrupt;
1615 acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1616 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1617 acpi_data.vctrl_base = processor->gich_base_address;
1618 acpi_data.vcpu_base = processor->gicv_base_address;
1620 cpu_base_assigned = 1;
1624 /* The things you have to do to just *count* something... */
1625 static int __init acpi_dummy_func(union acpi_subtable_headers *header,
1626 const unsigned long end)
1631 static bool __init acpi_gic_redist_is_present(void)
1633 return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1634 acpi_dummy_func, 0) > 0;
1637 static bool __init gic_validate_dist(struct acpi_subtable_header *header,
1638 struct acpi_probe_entry *ape)
1640 struct acpi_madt_generic_distributor *dist;
1641 dist = (struct acpi_madt_generic_distributor *)header;
1643 return (dist->version == ape->driver_data &&
1644 (dist->version != ACPI_MADT_GIC_VERSION_NONE ||
1645 !acpi_gic_redist_is_present()));
1648 #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
1649 #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
1650 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
1651 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
1653 static void __init gic_acpi_setup_kvm_info(void)
1656 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
1657 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
1659 gic_v2_kvm_info.type = GIC_V2;
1661 if (!acpi_data.vctrl_base)
1664 vctrl_res->flags = IORESOURCE_MEM;
1665 vctrl_res->start = acpi_data.vctrl_base;
1666 vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
1668 if (!acpi_data.vcpu_base)
1671 vcpu_res->flags = IORESOURCE_MEM;
1672 vcpu_res->start = acpi_data.vcpu_base;
1673 vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1675 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1676 acpi_data.maint_irq_mode,
1681 gic_v2_kvm_info.maint_irq = irq;
1683 vgic_set_kvm_info(&gic_v2_kvm_info);
1686 static struct fwnode_handle *gsi_domain_handle;
1688 static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi)
1690 return gsi_domain_handle;
1693 static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
1694 const unsigned long end)
1696 struct acpi_madt_generic_distributor *dist;
1697 struct gic_chip_data *gic = &gic_data[0];
1700 /* Collect CPU base addresses */
1701 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1702 gic_acpi_parse_madt_cpu, 0);
1704 pr_err("No valid GICC entries exist\n");
1708 gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
1709 if (!gic->raw_cpu_base) {
1710 pr_err("Unable to map GICC registers\n");
1714 dist = (struct acpi_madt_generic_distributor *)header;
1715 gic->raw_dist_base = ioremap(dist->base_address,
1716 ACPI_GICV2_DIST_MEM_SIZE);
1717 if (!gic->raw_dist_base) {
1718 pr_err("Unable to map GICD registers\n");
1724 * Disable split EOI/Deactivate if HYP is not available. ACPI
1725 * guarantees that we'll always have a GICv2, so the CPU
1726 * interface will always be the right size.
1728 if (!is_hyp_mode_available())
1729 static_branch_disable(&supports_deactivate_key);
1732 * Initialize GIC instance zero (no multi-GIC support).
1734 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
1735 if (!gsi_domain_handle) {
1736 pr_err("Unable to allocate domain handle\n");
1741 ret = __gic_init_bases(gic, gsi_domain_handle);
1743 pr_err("Failed to initialise GIC\n");
1744 irq_domain_free_fwnode(gsi_domain_handle);
1749 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id);
1751 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1752 gicv2m_init(NULL, gic_data[0].domain);
1754 if (static_branch_likely(&supports_deactivate_key))
1755 gic_acpi_setup_kvm_info();
1759 IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1760 gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
1762 IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1763 gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,