Merge tag 'vfio-v6.3-rc1' of https://github.com/awilliam/linux-vfio
[linux-2.6-microblaze.git] / drivers / irqchip / irq-gic-v3.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #define pr_fmt(fmt)     "GICv3: " fmt
8
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kstrtox.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/percpu.h>
20 #include <linux/refcount.h>
21 #include <linux/slab.h>
22
23 #include <linux/irqchip.h>
24 #include <linux/irqchip/arm-gic-common.h>
25 #include <linux/irqchip/arm-gic-v3.h>
26 #include <linux/irqchip/irq-partition-percpu.h>
27
28 #include <asm/cputype.h>
29 #include <asm/exception.h>
30 #include <asm/smp_plat.h>
31 #include <asm/virt.h>
32
33 #include "irq-gic-common.h"
34
35 #define GICD_INT_NMI_PRI        (GICD_INT_DEF_PRI & ~0x80)
36
37 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996     (1ULL << 0)
38 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539   (1ULL << 1)
39
40 #define GIC_IRQ_TYPE_PARTITION  (GIC_IRQ_TYPE_LPI + 1)
41
42 struct redist_region {
43         void __iomem            *redist_base;
44         phys_addr_t             phys_base;
45         bool                    single_redist;
46 };
47
48 struct gic_chip_data {
49         struct fwnode_handle    *fwnode;
50         void __iomem            *dist_base;
51         struct redist_region    *redist_regions;
52         struct rdists           rdists;
53         struct irq_domain       *domain;
54         u64                     redist_stride;
55         u32                     nr_redist_regions;
56         u64                     flags;
57         bool                    has_rss;
58         unsigned int            ppi_nr;
59         struct partition_desc   **ppi_descs;
60 };
61
62 static struct gic_chip_data gic_data __read_mostly;
63 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
64
65 #define GIC_ID_NR       (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
66 #define GIC_LINE_NR     min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
67 #define GIC_ESPI_NR     GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
68
69 /*
70  * The behaviours of RPR and PMR registers differ depending on the value of
71  * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
72  * distributor and redistributors depends on whether security is enabled in the
73  * GIC.
74  *
75  * When security is enabled, non-secure priority values from the (re)distributor
76  * are presented to the GIC CPUIF as follow:
77  *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
78  *
79  * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
80  * EL1 are subject to a similar operation thus matching the priorities presented
81  * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
82  * these values are unchanged by the GIC.
83  *
84  * see GICv3/GICv4 Architecture Specification (IHI0069D):
85  * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
86  *   priorities.
87  * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
88  *   interrupt.
89  */
90 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
91
92 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
93 EXPORT_SYMBOL(gic_nonsecure_priorities);
94
95 /*
96  * When the Non-secure world has access to group 0 interrupts (as a
97  * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
98  * return the Distributor's view of the interrupt priority.
99  *
100  * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
101  * written by software is moved to the Non-secure range by the Distributor.
102  *
103  * If both are true (which is when gic_nonsecure_priorities gets enabled),
104  * we need to shift down the priority programmed by software to match it
105  * against the value returned by ICC_RPR_EL1.
106  */
107 #define GICD_INT_RPR_PRI(priority)                                      \
108         ({                                                              \
109                 u32 __priority = (priority);                            \
110                 if (static_branch_unlikely(&gic_nonsecure_priorities))  \
111                         __priority = 0x80 | (__priority >> 1);          \
112                                                                         \
113                 __priority;                                             \
114         })
115
116 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
117 static refcount_t *ppi_nmi_refs;
118
119 static struct gic_kvm_info gic_v3_kvm_info __initdata;
120 static DEFINE_PER_CPU(bool, has_rss);
121
122 #define MPIDR_RS(mpidr)                 (((mpidr) & 0xF0UL) >> 4)
123 #define gic_data_rdist()                (this_cpu_ptr(gic_data.rdists.rdist))
124 #define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
125 #define gic_data_rdist_sgi_base()       (gic_data_rdist_rd_base() + SZ_64K)
126
127 /* Our default, arbitrary priority value. Linux only uses one anyway. */
128 #define DEFAULT_PMR_VALUE       0xf0
129
130 enum gic_intid_range {
131         SGI_RANGE,
132         PPI_RANGE,
133         SPI_RANGE,
134         EPPI_RANGE,
135         ESPI_RANGE,
136         LPI_RANGE,
137         __INVALID_RANGE__
138 };
139
140 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
141 {
142         switch (hwirq) {
143         case 0 ... 15:
144                 return SGI_RANGE;
145         case 16 ... 31:
146                 return PPI_RANGE;
147         case 32 ... 1019:
148                 return SPI_RANGE;
149         case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
150                 return EPPI_RANGE;
151         case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
152                 return ESPI_RANGE;
153         case 8192 ... GENMASK(23, 0):
154                 return LPI_RANGE;
155         default:
156                 return __INVALID_RANGE__;
157         }
158 }
159
160 static enum gic_intid_range get_intid_range(struct irq_data *d)
161 {
162         return __get_intid_range(d->hwirq);
163 }
164
165 static inline unsigned int gic_irq(struct irq_data *d)
166 {
167         return d->hwirq;
168 }
169
170 static inline bool gic_irq_in_rdist(struct irq_data *d)
171 {
172         switch (get_intid_range(d)) {
173         case SGI_RANGE:
174         case PPI_RANGE:
175         case EPPI_RANGE:
176                 return true;
177         default:
178                 return false;
179         }
180 }
181
182 static inline void __iomem *gic_dist_base(struct irq_data *d)
183 {
184         switch (get_intid_range(d)) {
185         case SGI_RANGE:
186         case PPI_RANGE:
187         case EPPI_RANGE:
188                 /* SGI+PPI -> SGI_base for this CPU */
189                 return gic_data_rdist_sgi_base();
190
191         case SPI_RANGE:
192         case ESPI_RANGE:
193                 /* SPI -> dist_base */
194                 return gic_data.dist_base;
195
196         default:
197                 return NULL;
198         }
199 }
200
201 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
202 {
203         u32 count = 1000000;    /* 1s! */
204
205         while (readl_relaxed(base + GICD_CTLR) & bit) {
206                 count--;
207                 if (!count) {
208                         pr_err_ratelimited("RWP timeout, gone fishing\n");
209                         return;
210                 }
211                 cpu_relax();
212                 udelay(1);
213         }
214 }
215
216 /* Wait for completion of a distributor change */
217 static void gic_dist_wait_for_rwp(void)
218 {
219         gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
220 }
221
222 /* Wait for completion of a redistributor change */
223 static void gic_redist_wait_for_rwp(void)
224 {
225         gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
226 }
227
228 #ifdef CONFIG_ARM64
229
230 static u64 __maybe_unused gic_read_iar(void)
231 {
232         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
233                 return gic_read_iar_cavium_thunderx();
234         else
235                 return gic_read_iar_common();
236 }
237 #endif
238
239 static void gic_enable_redist(bool enable)
240 {
241         void __iomem *rbase;
242         u32 count = 1000000;    /* 1s! */
243         u32 val;
244
245         if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
246                 return;
247
248         rbase = gic_data_rdist_rd_base();
249
250         val = readl_relaxed(rbase + GICR_WAKER);
251         if (enable)
252                 /* Wake up this CPU redistributor */
253                 val &= ~GICR_WAKER_ProcessorSleep;
254         else
255                 val |= GICR_WAKER_ProcessorSleep;
256         writel_relaxed(val, rbase + GICR_WAKER);
257
258         if (!enable) {          /* Check that GICR_WAKER is writeable */
259                 val = readl_relaxed(rbase + GICR_WAKER);
260                 if (!(val & GICR_WAKER_ProcessorSleep))
261                         return; /* No PM support in this redistributor */
262         }
263
264         while (--count) {
265                 val = readl_relaxed(rbase + GICR_WAKER);
266                 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
267                         break;
268                 cpu_relax();
269                 udelay(1);
270         }
271         if (!count)
272                 pr_err_ratelimited("redistributor failed to %s...\n",
273                                    enable ? "wakeup" : "sleep");
274 }
275
276 /*
277  * Routines to disable, enable, EOI and route interrupts
278  */
279 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
280 {
281         switch (get_intid_range(d)) {
282         case SGI_RANGE:
283         case PPI_RANGE:
284         case SPI_RANGE:
285                 *index = d->hwirq;
286                 return offset;
287         case EPPI_RANGE:
288                 /*
289                  * Contrary to the ESPI range, the EPPI range is contiguous
290                  * to the PPI range in the registers, so let's adjust the
291                  * displacement accordingly. Consistency is overrated.
292                  */
293                 *index = d->hwirq - EPPI_BASE_INTID + 32;
294                 return offset;
295         case ESPI_RANGE:
296                 *index = d->hwirq - ESPI_BASE_INTID;
297                 switch (offset) {
298                 case GICD_ISENABLER:
299                         return GICD_ISENABLERnE;
300                 case GICD_ICENABLER:
301                         return GICD_ICENABLERnE;
302                 case GICD_ISPENDR:
303                         return GICD_ISPENDRnE;
304                 case GICD_ICPENDR:
305                         return GICD_ICPENDRnE;
306                 case GICD_ISACTIVER:
307                         return GICD_ISACTIVERnE;
308                 case GICD_ICACTIVER:
309                         return GICD_ICACTIVERnE;
310                 case GICD_IPRIORITYR:
311                         return GICD_IPRIORITYRnE;
312                 case GICD_ICFGR:
313                         return GICD_ICFGRnE;
314                 case GICD_IROUTER:
315                         return GICD_IROUTERnE;
316                 default:
317                         break;
318                 }
319                 break;
320         default:
321                 break;
322         }
323
324         WARN_ON(1);
325         *index = d->hwirq;
326         return offset;
327 }
328
329 static int gic_peek_irq(struct irq_data *d, u32 offset)
330 {
331         void __iomem *base;
332         u32 index, mask;
333
334         offset = convert_offset_index(d, offset, &index);
335         mask = 1 << (index % 32);
336
337         if (gic_irq_in_rdist(d))
338                 base = gic_data_rdist_sgi_base();
339         else
340                 base = gic_data.dist_base;
341
342         return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
343 }
344
345 static void gic_poke_irq(struct irq_data *d, u32 offset)
346 {
347         void __iomem *base;
348         u32 index, mask;
349
350         offset = convert_offset_index(d, offset, &index);
351         mask = 1 << (index % 32);
352
353         if (gic_irq_in_rdist(d))
354                 base = gic_data_rdist_sgi_base();
355         else
356                 base = gic_data.dist_base;
357
358         writel_relaxed(mask, base + offset + (index / 32) * 4);
359 }
360
361 static void gic_mask_irq(struct irq_data *d)
362 {
363         gic_poke_irq(d, GICD_ICENABLER);
364         if (gic_irq_in_rdist(d))
365                 gic_redist_wait_for_rwp();
366         else
367                 gic_dist_wait_for_rwp();
368 }
369
370 static void gic_eoimode1_mask_irq(struct irq_data *d)
371 {
372         gic_mask_irq(d);
373         /*
374          * When masking a forwarded interrupt, make sure it is
375          * deactivated as well.
376          *
377          * This ensures that an interrupt that is getting
378          * disabled/masked will not get "stuck", because there is
379          * noone to deactivate it (guest is being terminated).
380          */
381         if (irqd_is_forwarded_to_vcpu(d))
382                 gic_poke_irq(d, GICD_ICACTIVER);
383 }
384
385 static void gic_unmask_irq(struct irq_data *d)
386 {
387         gic_poke_irq(d, GICD_ISENABLER);
388 }
389
390 static inline bool gic_supports_nmi(void)
391 {
392         return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
393                static_branch_likely(&supports_pseudo_nmis);
394 }
395
396 static int gic_irq_set_irqchip_state(struct irq_data *d,
397                                      enum irqchip_irq_state which, bool val)
398 {
399         u32 reg;
400
401         if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
402                 return -EINVAL;
403
404         switch (which) {
405         case IRQCHIP_STATE_PENDING:
406                 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
407                 break;
408
409         case IRQCHIP_STATE_ACTIVE:
410                 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
411                 break;
412
413         case IRQCHIP_STATE_MASKED:
414                 if (val) {
415                         gic_mask_irq(d);
416                         return 0;
417                 }
418                 reg = GICD_ISENABLER;
419                 break;
420
421         default:
422                 return -EINVAL;
423         }
424
425         gic_poke_irq(d, reg);
426         return 0;
427 }
428
429 static int gic_irq_get_irqchip_state(struct irq_data *d,
430                                      enum irqchip_irq_state which, bool *val)
431 {
432         if (d->hwirq >= 8192) /* PPI/SPI only */
433                 return -EINVAL;
434
435         switch (which) {
436         case IRQCHIP_STATE_PENDING:
437                 *val = gic_peek_irq(d, GICD_ISPENDR);
438                 break;
439
440         case IRQCHIP_STATE_ACTIVE:
441                 *val = gic_peek_irq(d, GICD_ISACTIVER);
442                 break;
443
444         case IRQCHIP_STATE_MASKED:
445                 *val = !gic_peek_irq(d, GICD_ISENABLER);
446                 break;
447
448         default:
449                 return -EINVAL;
450         }
451
452         return 0;
453 }
454
455 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
456 {
457         void __iomem *base = gic_dist_base(d);
458         u32 offset, index;
459
460         offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
461
462         writeb_relaxed(prio, base + offset + index);
463 }
464
465 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
466 {
467         switch (__get_intid_range(hwirq)) {
468         case PPI_RANGE:
469                 return hwirq - 16;
470         case EPPI_RANGE:
471                 return hwirq - EPPI_BASE_INTID + 16;
472         default:
473                 unreachable();
474         }
475 }
476
477 static u32 gic_get_ppi_index(struct irq_data *d)
478 {
479         return __gic_get_ppi_index(d->hwirq);
480 }
481
482 static int gic_irq_nmi_setup(struct irq_data *d)
483 {
484         struct irq_desc *desc = irq_to_desc(d->irq);
485
486         if (!gic_supports_nmi())
487                 return -EINVAL;
488
489         if (gic_peek_irq(d, GICD_ISENABLER)) {
490                 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
491                 return -EINVAL;
492         }
493
494         /*
495          * A secondary irq_chip should be in charge of LPI request,
496          * it should not be possible to get there
497          */
498         if (WARN_ON(gic_irq(d) >= 8192))
499                 return -EINVAL;
500
501         /* desc lock should already be held */
502         if (gic_irq_in_rdist(d)) {
503                 u32 idx = gic_get_ppi_index(d);
504
505                 /* Setting up PPI as NMI, only switch handler for first NMI */
506                 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
507                         refcount_set(&ppi_nmi_refs[idx], 1);
508                         desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
509                 }
510         } else {
511                 desc->handle_irq = handle_fasteoi_nmi;
512         }
513
514         gic_irq_set_prio(d, GICD_INT_NMI_PRI);
515
516         return 0;
517 }
518
519 static void gic_irq_nmi_teardown(struct irq_data *d)
520 {
521         struct irq_desc *desc = irq_to_desc(d->irq);
522
523         if (WARN_ON(!gic_supports_nmi()))
524                 return;
525
526         if (gic_peek_irq(d, GICD_ISENABLER)) {
527                 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
528                 return;
529         }
530
531         /*
532          * A secondary irq_chip should be in charge of LPI request,
533          * it should not be possible to get there
534          */
535         if (WARN_ON(gic_irq(d) >= 8192))
536                 return;
537
538         /* desc lock should already be held */
539         if (gic_irq_in_rdist(d)) {
540                 u32 idx = gic_get_ppi_index(d);
541
542                 /* Tearing down NMI, only switch handler for last NMI */
543                 if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
544                         desc->handle_irq = handle_percpu_devid_irq;
545         } else {
546                 desc->handle_irq = handle_fasteoi_irq;
547         }
548
549         gic_irq_set_prio(d, GICD_INT_DEF_PRI);
550 }
551
552 static void gic_eoi_irq(struct irq_data *d)
553 {
554         write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
555         isb();
556 }
557
558 static void gic_eoimode1_eoi_irq(struct irq_data *d)
559 {
560         /*
561          * No need to deactivate an LPI, or an interrupt that
562          * is is getting forwarded to a vcpu.
563          */
564         if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
565                 return;
566         gic_write_dir(gic_irq(d));
567 }
568
569 static int gic_set_type(struct irq_data *d, unsigned int type)
570 {
571         enum gic_intid_range range;
572         unsigned int irq = gic_irq(d);
573         void __iomem *base;
574         u32 offset, index;
575         int ret;
576
577         range = get_intid_range(d);
578
579         /* Interrupt configuration for SGIs can't be changed */
580         if (range == SGI_RANGE)
581                 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
582
583         /* SPIs have restrictions on the supported types */
584         if ((range == SPI_RANGE || range == ESPI_RANGE) &&
585             type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
586                 return -EINVAL;
587
588         if (gic_irq_in_rdist(d))
589                 base = gic_data_rdist_sgi_base();
590         else
591                 base = gic_data.dist_base;
592
593         offset = convert_offset_index(d, GICD_ICFGR, &index);
594
595         ret = gic_configure_irq(index, type, base + offset, NULL);
596         if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
597                 /* Misconfigured PPIs are usually not fatal */
598                 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
599                 ret = 0;
600         }
601
602         return ret;
603 }
604
605 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
606 {
607         if (get_intid_range(d) == SGI_RANGE)
608                 return -EINVAL;
609
610         if (vcpu)
611                 irqd_set_forwarded_to_vcpu(d);
612         else
613                 irqd_clr_forwarded_to_vcpu(d);
614         return 0;
615 }
616
617 static u64 gic_mpidr_to_affinity(unsigned long mpidr)
618 {
619         u64 aff;
620
621         aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
622                MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
623                MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
624                MPIDR_AFFINITY_LEVEL(mpidr, 0));
625
626         return aff;
627 }
628
629 static void gic_deactivate_unhandled(u32 irqnr)
630 {
631         if (static_branch_likely(&supports_deactivate_key)) {
632                 if (irqnr < 8192)
633                         gic_write_dir(irqnr);
634         } else {
635                 write_gicreg(irqnr, ICC_EOIR1_EL1);
636                 isb();
637         }
638 }
639
640 /*
641  * Follow a read of the IAR with any HW maintenance that needs to happen prior
642  * to invoking the relevant IRQ handler. We must do two things:
643  *
644  * (1) Ensure instruction ordering between a read of IAR and subsequent
645  *     instructions in the IRQ handler using an ISB.
646  *
647  *     It is possible for the IAR to report an IRQ which was signalled *after*
648  *     the CPU took an IRQ exception as multiple interrupts can race to be
649  *     recognized by the GIC, earlier interrupts could be withdrawn, and/or
650  *     later interrupts could be prioritized by the GIC.
651  *
652  *     For devices which are tightly coupled to the CPU, such as PMUs, a
653  *     context synchronization event is necessary to ensure that system
654  *     register state is not stale, as these may have been indirectly written
655  *     *after* exception entry.
656  *
657  * (2) Deactivate the interrupt when EOI mode 1 is in use.
658  */
659 static inline void gic_complete_ack(u32 irqnr)
660 {
661         if (static_branch_likely(&supports_deactivate_key))
662                 write_gicreg(irqnr, ICC_EOIR1_EL1);
663
664         isb();
665 }
666
667 static bool gic_rpr_is_nmi_prio(void)
668 {
669         if (!gic_supports_nmi())
670                 return false;
671
672         return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
673 }
674
675 static bool gic_irqnr_is_special(u32 irqnr)
676 {
677         return irqnr >= 1020 && irqnr <= 1023;
678 }
679
680 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
681 {
682         if (gic_irqnr_is_special(irqnr))
683                 return;
684
685         gic_complete_ack(irqnr);
686
687         if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
688                 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
689                 gic_deactivate_unhandled(irqnr);
690         }
691 }
692
693 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
694 {
695         if (gic_irqnr_is_special(irqnr))
696                 return;
697
698         gic_complete_ack(irqnr);
699
700         if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
701                 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
702                 gic_deactivate_unhandled(irqnr);
703         }
704 }
705
706 /*
707  * An exception has been taken from a context with IRQs enabled, and this could
708  * be an IRQ or an NMI.
709  *
710  * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
711  * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
712  * after handling any NMI but before handling any IRQ.
713  *
714  * The entry code has performed IRQ entry, and if an NMI is detected we must
715  * perform NMI entry/exit around invoking the handler.
716  */
717 static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
718 {
719         bool is_nmi;
720         u32 irqnr;
721
722         irqnr = gic_read_iar();
723
724         is_nmi = gic_rpr_is_nmi_prio();
725
726         if (is_nmi) {
727                 nmi_enter();
728                 __gic_handle_nmi(irqnr, regs);
729                 nmi_exit();
730         }
731
732         if (gic_prio_masking_enabled()) {
733                 gic_pmr_mask_irqs();
734                 gic_arch_enable_irqs();
735         }
736
737         if (!is_nmi)
738                 __gic_handle_irq(irqnr, regs);
739 }
740
741 /*
742  * An exception has been taken from a context with IRQs disabled, which can only
743  * be an NMI.
744  *
745  * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
746  * DAIF.IF (and ICC_PMR_EL1) unchanged.
747  *
748  * The entry code has performed NMI entry.
749  */
750 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
751 {
752         u64 pmr;
753         u32 irqnr;
754
755         /*
756          * We were in a context with IRQs disabled. However, the
757          * entry code has set PMR to a value that allows any
758          * interrupt to be acknowledged, and not just NMIs. This can
759          * lead to surprising effects if the NMI has been retired in
760          * the meantime, and that there is an IRQ pending. The IRQ
761          * would then be taken in NMI context, something that nobody
762          * wants to debug twice.
763          *
764          * Until we sort this, drop PMR again to a level that will
765          * actually only allow NMIs before reading IAR, and then
766          * restore it to what it was.
767          */
768         pmr = gic_read_pmr();
769         gic_pmr_mask_irqs();
770         isb();
771         irqnr = gic_read_iar();
772         gic_write_pmr(pmr);
773
774         __gic_handle_nmi(irqnr, regs);
775 }
776
777 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
778 {
779         if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
780                 __gic_handle_irq_from_irqsoff(regs);
781         else
782                 __gic_handle_irq_from_irqson(regs);
783 }
784
785 static u32 gic_get_pribits(void)
786 {
787         u32 pribits;
788
789         pribits = gic_read_ctlr();
790         pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
791         pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
792         pribits++;
793
794         return pribits;
795 }
796
797 static bool gic_has_group0(void)
798 {
799         u32 val;
800         u32 old_pmr;
801
802         old_pmr = gic_read_pmr();
803
804         /*
805          * Let's find out if Group0 is under control of EL3 or not by
806          * setting the highest possible, non-zero priority in PMR.
807          *
808          * If SCR_EL3.FIQ is set, the priority gets shifted down in
809          * order for the CPU interface to set bit 7, and keep the
810          * actual priority in the non-secure range. In the process, it
811          * looses the least significant bit and the actual priority
812          * becomes 0x80. Reading it back returns 0, indicating that
813          * we're don't have access to Group0.
814          */
815         gic_write_pmr(BIT(8 - gic_get_pribits()));
816         val = gic_read_pmr();
817
818         gic_write_pmr(old_pmr);
819
820         return val != 0;
821 }
822
823 static void __init gic_dist_init(void)
824 {
825         unsigned int i;
826         u64 affinity;
827         void __iomem *base = gic_data.dist_base;
828         u32 val;
829
830         /* Disable the distributor */
831         writel_relaxed(0, base + GICD_CTLR);
832         gic_dist_wait_for_rwp();
833
834         /*
835          * Configure SPIs as non-secure Group-1. This will only matter
836          * if the GIC only has a single security state. This will not
837          * do the right thing if the kernel is running in secure mode,
838          * but that's not the intended use case anyway.
839          */
840         for (i = 32; i < GIC_LINE_NR; i += 32)
841                 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
842
843         /* Extended SPI range, not handled by the GICv2/GICv3 common code */
844         for (i = 0; i < GIC_ESPI_NR; i += 32) {
845                 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
846                 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
847         }
848
849         for (i = 0; i < GIC_ESPI_NR; i += 32)
850                 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
851
852         for (i = 0; i < GIC_ESPI_NR; i += 16)
853                 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
854
855         for (i = 0; i < GIC_ESPI_NR; i += 4)
856                 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
857
858         /* Now do the common stuff */
859         gic_dist_config(base, GIC_LINE_NR, NULL);
860
861         val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
862         if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
863                 pr_info("Enabling SGIs without active state\n");
864                 val |= GICD_CTLR_nASSGIreq;
865         }
866
867         /* Enable distributor with ARE, Group1, and wait for it to drain */
868         writel_relaxed(val, base + GICD_CTLR);
869         gic_dist_wait_for_rwp();
870
871         /*
872          * Set all global interrupts to the boot CPU only. ARE must be
873          * enabled.
874          */
875         affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
876         for (i = 32; i < GIC_LINE_NR; i++)
877                 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
878
879         for (i = 0; i < GIC_ESPI_NR; i++)
880                 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
881 }
882
883 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
884 {
885         int ret = -ENODEV;
886         int i;
887
888         for (i = 0; i < gic_data.nr_redist_regions; i++) {
889                 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
890                 u64 typer;
891                 u32 reg;
892
893                 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
894                 if (reg != GIC_PIDR2_ARCH_GICv3 &&
895                     reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
896                         pr_warn("No redistributor present @%p\n", ptr);
897                         break;
898                 }
899
900                 do {
901                         typer = gic_read_typer(ptr + GICR_TYPER);
902                         ret = fn(gic_data.redist_regions + i, ptr);
903                         if (!ret)
904                                 return 0;
905
906                         if (gic_data.redist_regions[i].single_redist)
907                                 break;
908
909                         if (gic_data.redist_stride) {
910                                 ptr += gic_data.redist_stride;
911                         } else {
912                                 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
913                                 if (typer & GICR_TYPER_VLPIS)
914                                         ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
915                         }
916                 } while (!(typer & GICR_TYPER_LAST));
917         }
918
919         return ret ? -ENODEV : 0;
920 }
921
922 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
923 {
924         unsigned long mpidr = cpu_logical_map(smp_processor_id());
925         u64 typer;
926         u32 aff;
927
928         /*
929          * Convert affinity to a 32bit value that can be matched to
930          * GICR_TYPER bits [63:32].
931          */
932         aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
933                MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
934                MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
935                MPIDR_AFFINITY_LEVEL(mpidr, 0));
936
937         typer = gic_read_typer(ptr + GICR_TYPER);
938         if ((typer >> 32) == aff) {
939                 u64 offset = ptr - region->redist_base;
940                 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
941                 gic_data_rdist_rd_base() = ptr;
942                 gic_data_rdist()->phys_base = region->phys_base + offset;
943
944                 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
945                         smp_processor_id(), mpidr,
946                         (int)(region - gic_data.redist_regions),
947                         &gic_data_rdist()->phys_base);
948                 return 0;
949         }
950
951         /* Try next one */
952         return 1;
953 }
954
955 static int gic_populate_rdist(void)
956 {
957         if (gic_iterate_rdists(__gic_populate_rdist) == 0)
958                 return 0;
959
960         /* We couldn't even deal with ourselves... */
961         WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
962              smp_processor_id(),
963              (unsigned long)cpu_logical_map(smp_processor_id()));
964         return -ENODEV;
965 }
966
967 static int __gic_update_rdist_properties(struct redist_region *region,
968                                          void __iomem *ptr)
969 {
970         u64 typer = gic_read_typer(ptr + GICR_TYPER);
971         u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
972
973         /* Boot-time cleanup */
974         if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
975                 u64 val;
976
977                 /* Deactivate any present vPE */
978                 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
979                 if (val & GICR_VPENDBASER_Valid)
980                         gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
981                                               ptr + SZ_128K + GICR_VPENDBASER);
982
983                 /* Mark the VPE table as invalid */
984                 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
985                 val &= ~GICR_VPROPBASER_4_1_VALID;
986                 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
987         }
988
989         gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
990
991         /*
992          * TYPER.RVPEID implies some form of DirectLPI, no matter what the
993          * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
994          * that the ITS driver can make use of for LPIs (and not VLPIs).
995          *
996          * These are 3 different ways to express the same thing, depending
997          * on the revision of the architecture and its relaxations over
998          * time. Just group them under the 'direct_lpi' banner.
999          */
1000         gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1001         gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
1002                                            !!(ctlr & GICR_CTLR_IR) |
1003                                            gic_data.rdists.has_rvpeid);
1004         gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
1005
1006         /* Detect non-sensical configurations */
1007         if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1008                 gic_data.rdists.has_direct_lpi = false;
1009                 gic_data.rdists.has_vlpis = false;
1010                 gic_data.rdists.has_rvpeid = false;
1011         }
1012
1013         gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
1014
1015         return 1;
1016 }
1017
1018 static void gic_update_rdist_properties(void)
1019 {
1020         gic_data.ppi_nr = UINT_MAX;
1021         gic_iterate_rdists(__gic_update_rdist_properties);
1022         if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1023                 gic_data.ppi_nr = 0;
1024         pr_info("GICv3 features: %d PPIs%s%s\n",
1025                 gic_data.ppi_nr,
1026                 gic_data.has_rss ? ", RSS" : "",
1027                 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1028
1029         if (gic_data.rdists.has_vlpis)
1030                 pr_info("GICv4 features: %s%s%s\n",
1031                         gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1032                         gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1033                         gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
1034 }
1035
1036 /* Check whether it's single security state view */
1037 static inline bool gic_dist_security_disabled(void)
1038 {
1039         return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1040 }
1041
1042 static void gic_cpu_sys_reg_init(void)
1043 {
1044         int i, cpu = smp_processor_id();
1045         u64 mpidr = cpu_logical_map(cpu);
1046         u64 need_rss = MPIDR_RS(mpidr);
1047         bool group0;
1048         u32 pribits;
1049
1050         /*
1051          * Need to check that the SRE bit has actually been set. If
1052          * not, it means that SRE is disabled at EL2. We're going to
1053          * die painfully, and there is nothing we can do about it.
1054          *
1055          * Kindly inform the luser.
1056          */
1057         if (!gic_enable_sre())
1058                 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1059
1060         pribits = gic_get_pribits();
1061
1062         group0 = gic_has_group0();
1063
1064         /* Set priority mask register */
1065         if (!gic_prio_masking_enabled()) {
1066                 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1067         } else if (gic_supports_nmi()) {
1068                 /*
1069                  * Mismatch configuration with boot CPU, the system is likely
1070                  * to die as interrupt masking will not work properly on all
1071                  * CPUs
1072                  *
1073                  * The boot CPU calls this function before enabling NMI support,
1074                  * and as a result we'll never see this warning in the boot path
1075                  * for that CPU.
1076                  */
1077                 if (static_branch_unlikely(&gic_nonsecure_priorities))
1078                         WARN_ON(!group0 || gic_dist_security_disabled());
1079                 else
1080                         WARN_ON(group0 && !gic_dist_security_disabled());
1081         }
1082
1083         /*
1084          * Some firmwares hand over to the kernel with the BPR changed from
1085          * its reset value (and with a value large enough to prevent
1086          * any pre-emptive interrupts from working at all). Writing a zero
1087          * to BPR restores is reset value.
1088          */
1089         gic_write_bpr1(0);
1090
1091         if (static_branch_likely(&supports_deactivate_key)) {
1092                 /* EOI drops priority only (mode 1) */
1093                 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1094         } else {
1095                 /* EOI deactivates interrupt too (mode 0) */
1096                 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1097         }
1098
1099         /* Always whack Group0 before Group1 */
1100         if (group0) {
1101                 switch(pribits) {
1102                 case 8:
1103                 case 7:
1104                         write_gicreg(0, ICC_AP0R3_EL1);
1105                         write_gicreg(0, ICC_AP0R2_EL1);
1106                         fallthrough;
1107                 case 6:
1108                         write_gicreg(0, ICC_AP0R1_EL1);
1109                         fallthrough;
1110                 case 5:
1111                 case 4:
1112                         write_gicreg(0, ICC_AP0R0_EL1);
1113                 }
1114
1115                 isb();
1116         }
1117
1118         switch(pribits) {
1119         case 8:
1120         case 7:
1121                 write_gicreg(0, ICC_AP1R3_EL1);
1122                 write_gicreg(0, ICC_AP1R2_EL1);
1123                 fallthrough;
1124         case 6:
1125                 write_gicreg(0, ICC_AP1R1_EL1);
1126                 fallthrough;
1127         case 5:
1128         case 4:
1129                 write_gicreg(0, ICC_AP1R0_EL1);
1130         }
1131
1132         isb();
1133
1134         /* ... and let's hit the road... */
1135         gic_write_grpen1(1);
1136
1137         /* Keep the RSS capability status in per_cpu variable */
1138         per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1139
1140         /* Check all the CPUs have capable of sending SGIs to other CPUs */
1141         for_each_online_cpu(i) {
1142                 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1143
1144                 need_rss |= MPIDR_RS(cpu_logical_map(i));
1145                 if (need_rss && (!have_rss))
1146                         pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1147                                 cpu, (unsigned long)mpidr,
1148                                 i, (unsigned long)cpu_logical_map(i));
1149         }
1150
1151         /**
1152          * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1153          * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1154          * UNPREDICTABLE choice of :
1155          *   - The write is ignored.
1156          *   - The RS field is treated as 0.
1157          */
1158         if (need_rss && (!gic_data.has_rss))
1159                 pr_crit_once("RSS is required but GICD doesn't support it\n");
1160 }
1161
1162 static bool gicv3_nolpi;
1163
1164 static int __init gicv3_nolpi_cfg(char *buf)
1165 {
1166         return kstrtobool(buf, &gicv3_nolpi);
1167 }
1168 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1169
1170 static int gic_dist_supports_lpis(void)
1171 {
1172         return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1173                 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1174                 !gicv3_nolpi);
1175 }
1176
1177 static void gic_cpu_init(void)
1178 {
1179         void __iomem *rbase;
1180         int i;
1181
1182         /* Register ourselves with the rest of the world */
1183         if (gic_populate_rdist())
1184                 return;
1185
1186         gic_enable_redist(true);
1187
1188         WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1189              !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1190              "Distributor has extended ranges, but CPU%d doesn't\n",
1191              smp_processor_id());
1192
1193         rbase = gic_data_rdist_sgi_base();
1194
1195         /* Configure SGIs/PPIs as non-secure Group-1 */
1196         for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1197                 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1198
1199         gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1200
1201         /* initialise system registers */
1202         gic_cpu_sys_reg_init();
1203 }
1204
1205 #ifdef CONFIG_SMP
1206
1207 #define MPIDR_TO_SGI_RS(mpidr)  (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1208 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr)  ((mpidr) & ~0xFUL)
1209
1210 static int gic_starting_cpu(unsigned int cpu)
1211 {
1212         gic_cpu_init();
1213
1214         if (gic_dist_supports_lpis())
1215                 its_cpu_init();
1216
1217         return 0;
1218 }
1219
1220 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1221                                    unsigned long cluster_id)
1222 {
1223         int next_cpu, cpu = *base_cpu;
1224         unsigned long mpidr = cpu_logical_map(cpu);
1225         u16 tlist = 0;
1226
1227         while (cpu < nr_cpu_ids) {
1228                 tlist |= 1 << (mpidr & 0xf);
1229
1230                 next_cpu = cpumask_next(cpu, mask);
1231                 if (next_cpu >= nr_cpu_ids)
1232                         goto out;
1233                 cpu = next_cpu;
1234
1235                 mpidr = cpu_logical_map(cpu);
1236
1237                 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1238                         cpu--;
1239                         goto out;
1240                 }
1241         }
1242 out:
1243         *base_cpu = cpu;
1244         return tlist;
1245 }
1246
1247 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1248         (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1249                 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1250
1251 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1252 {
1253         u64 val;
1254
1255         val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)     |
1256                MPIDR_TO_SGI_AFFINITY(cluster_id, 2)     |
1257                irq << ICC_SGI1R_SGI_ID_SHIFT            |
1258                MPIDR_TO_SGI_AFFINITY(cluster_id, 1)     |
1259                MPIDR_TO_SGI_RS(cluster_id)              |
1260                tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1261
1262         pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1263         gic_write_sgi1r(val);
1264 }
1265
1266 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1267 {
1268         int cpu;
1269
1270         if (WARN_ON(d->hwirq >= 16))
1271                 return;
1272
1273         /*
1274          * Ensure that stores to Normal memory are visible to the
1275          * other CPUs before issuing the IPI.
1276          */
1277         dsb(ishst);
1278
1279         for_each_cpu(cpu, mask) {
1280                 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1281                 u16 tlist;
1282
1283                 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1284                 gic_send_sgi(cluster_id, tlist, d->hwirq);
1285         }
1286
1287         /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1288         isb();
1289 }
1290
1291 static void __init gic_smp_init(void)
1292 {
1293         struct irq_fwspec sgi_fwspec = {
1294                 .fwnode         = gic_data.fwnode,
1295                 .param_count    = 1,
1296         };
1297         int base_sgi;
1298
1299         cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1300                                   "irqchip/arm/gicv3:starting",
1301                                   gic_starting_cpu, NULL);
1302
1303         /* Register all 8 non-secure SGIs */
1304         base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
1305         if (WARN_ON(base_sgi <= 0))
1306                 return;
1307
1308         set_smp_ipi_range(base_sgi, 8);
1309 }
1310
1311 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1312                             bool force)
1313 {
1314         unsigned int cpu;
1315         u32 offset, index;
1316         void __iomem *reg;
1317         int enabled;
1318         u64 val;
1319
1320         if (force)
1321                 cpu = cpumask_first(mask_val);
1322         else
1323                 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1324
1325         if (cpu >= nr_cpu_ids)
1326                 return -EINVAL;
1327
1328         if (gic_irq_in_rdist(d))
1329                 return -EINVAL;
1330
1331         /* If interrupt was enabled, disable it first */
1332         enabled = gic_peek_irq(d, GICD_ISENABLER);
1333         if (enabled)
1334                 gic_mask_irq(d);
1335
1336         offset = convert_offset_index(d, GICD_IROUTER, &index);
1337         reg = gic_dist_base(d) + offset + (index * 8);
1338         val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1339
1340         gic_write_irouter(val, reg);
1341
1342         /*
1343          * If the interrupt was enabled, enabled it again. Otherwise,
1344          * just wait for the distributor to have digested our changes.
1345          */
1346         if (enabled)
1347                 gic_unmask_irq(d);
1348
1349         irq_data_update_effective_affinity(d, cpumask_of(cpu));
1350
1351         return IRQ_SET_MASK_OK_DONE;
1352 }
1353 #else
1354 #define gic_set_affinity        NULL
1355 #define gic_ipi_send_mask       NULL
1356 #define gic_smp_init()          do { } while(0)
1357 #endif
1358
1359 static int gic_retrigger(struct irq_data *data)
1360 {
1361         return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1362 }
1363
1364 #ifdef CONFIG_CPU_PM
1365 static int gic_cpu_pm_notifier(struct notifier_block *self,
1366                                unsigned long cmd, void *v)
1367 {
1368         if (cmd == CPU_PM_EXIT) {
1369                 if (gic_dist_security_disabled())
1370                         gic_enable_redist(true);
1371                 gic_cpu_sys_reg_init();
1372         } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1373                 gic_write_grpen1(0);
1374                 gic_enable_redist(false);
1375         }
1376         return NOTIFY_OK;
1377 }
1378
1379 static struct notifier_block gic_cpu_pm_notifier_block = {
1380         .notifier_call = gic_cpu_pm_notifier,
1381 };
1382
1383 static void gic_cpu_pm_init(void)
1384 {
1385         cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1386 }
1387
1388 #else
1389 static inline void gic_cpu_pm_init(void) { }
1390 #endif /* CONFIG_CPU_PM */
1391
1392 static struct irq_chip gic_chip = {
1393         .name                   = "GICv3",
1394         .irq_mask               = gic_mask_irq,
1395         .irq_unmask             = gic_unmask_irq,
1396         .irq_eoi                = gic_eoi_irq,
1397         .irq_set_type           = gic_set_type,
1398         .irq_set_affinity       = gic_set_affinity,
1399         .irq_retrigger          = gic_retrigger,
1400         .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1401         .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1402         .irq_nmi_setup          = gic_irq_nmi_setup,
1403         .irq_nmi_teardown       = gic_irq_nmi_teardown,
1404         .ipi_send_mask          = gic_ipi_send_mask,
1405         .flags                  = IRQCHIP_SET_TYPE_MASKED |
1406                                   IRQCHIP_SKIP_SET_WAKE |
1407                                   IRQCHIP_MASK_ON_SUSPEND,
1408 };
1409
1410 static struct irq_chip gic_eoimode1_chip = {
1411         .name                   = "GICv3",
1412         .irq_mask               = gic_eoimode1_mask_irq,
1413         .irq_unmask             = gic_unmask_irq,
1414         .irq_eoi                = gic_eoimode1_eoi_irq,
1415         .irq_set_type           = gic_set_type,
1416         .irq_set_affinity       = gic_set_affinity,
1417         .irq_retrigger          = gic_retrigger,
1418         .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1419         .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1420         .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
1421         .irq_nmi_setup          = gic_irq_nmi_setup,
1422         .irq_nmi_teardown       = gic_irq_nmi_teardown,
1423         .ipi_send_mask          = gic_ipi_send_mask,
1424         .flags                  = IRQCHIP_SET_TYPE_MASKED |
1425                                   IRQCHIP_SKIP_SET_WAKE |
1426                                   IRQCHIP_MASK_ON_SUSPEND,
1427 };
1428
1429 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1430                               irq_hw_number_t hw)
1431 {
1432         struct irq_chip *chip = &gic_chip;
1433         struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1434
1435         if (static_branch_likely(&supports_deactivate_key))
1436                 chip = &gic_eoimode1_chip;
1437
1438         switch (__get_intid_range(hw)) {
1439         case SGI_RANGE:
1440         case PPI_RANGE:
1441         case EPPI_RANGE:
1442                 irq_set_percpu_devid(irq);
1443                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1444                                     handle_percpu_devid_irq, NULL, NULL);
1445                 break;
1446
1447         case SPI_RANGE:
1448         case ESPI_RANGE:
1449                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1450                                     handle_fasteoi_irq, NULL, NULL);
1451                 irq_set_probe(irq);
1452                 irqd_set_single_target(irqd);
1453                 break;
1454
1455         case LPI_RANGE:
1456                 if (!gic_dist_supports_lpis())
1457                         return -EPERM;
1458                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1459                                     handle_fasteoi_irq, NULL, NULL);
1460                 break;
1461
1462         default:
1463                 return -EPERM;
1464         }
1465
1466         /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1467         irqd_set_handle_enforce_irqctx(irqd);
1468         return 0;
1469 }
1470
1471 static int gic_irq_domain_translate(struct irq_domain *d,
1472                                     struct irq_fwspec *fwspec,
1473                                     unsigned long *hwirq,
1474                                     unsigned int *type)
1475 {
1476         if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1477                 *hwirq = fwspec->param[0];
1478                 *type = IRQ_TYPE_EDGE_RISING;
1479                 return 0;
1480         }
1481
1482         if (is_of_node(fwspec->fwnode)) {
1483                 if (fwspec->param_count < 3)
1484                         return -EINVAL;
1485
1486                 switch (fwspec->param[0]) {
1487                 case 0:                 /* SPI */
1488                         *hwirq = fwspec->param[1] + 32;
1489                         break;
1490                 case 1:                 /* PPI */
1491                         *hwirq = fwspec->param[1] + 16;
1492                         break;
1493                 case 2:                 /* ESPI */
1494                         *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1495                         break;
1496                 case 3:                 /* EPPI */
1497                         *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1498                         break;
1499                 case GIC_IRQ_TYPE_LPI:  /* LPI */
1500                         *hwirq = fwspec->param[1];
1501                         break;
1502                 case GIC_IRQ_TYPE_PARTITION:
1503                         *hwirq = fwspec->param[1];
1504                         if (fwspec->param[1] >= 16)
1505                                 *hwirq += EPPI_BASE_INTID - 16;
1506                         else
1507                                 *hwirq += 16;
1508                         break;
1509                 default:
1510                         return -EINVAL;
1511                 }
1512
1513                 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1514
1515                 /*
1516                  * Make it clear that broken DTs are... broken.
1517                  * Partitioned PPIs are an unfortunate exception.
1518                  */
1519                 WARN_ON(*type == IRQ_TYPE_NONE &&
1520                         fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1521                 return 0;
1522         }
1523
1524         if (is_fwnode_irqchip(fwspec->fwnode)) {
1525                 if(fwspec->param_count != 2)
1526                         return -EINVAL;
1527
1528                 if (fwspec->param[0] < 16) {
1529                         pr_err(FW_BUG "Illegal GSI%d translation request\n",
1530                                fwspec->param[0]);
1531                         return -EINVAL;
1532                 }
1533
1534                 *hwirq = fwspec->param[0];
1535                 *type = fwspec->param[1];
1536
1537                 WARN_ON(*type == IRQ_TYPE_NONE);
1538                 return 0;
1539         }
1540
1541         return -EINVAL;
1542 }
1543
1544 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1545                                 unsigned int nr_irqs, void *arg)
1546 {
1547         int i, ret;
1548         irq_hw_number_t hwirq;
1549         unsigned int type = IRQ_TYPE_NONE;
1550         struct irq_fwspec *fwspec = arg;
1551
1552         ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1553         if (ret)
1554                 return ret;
1555
1556         for (i = 0; i < nr_irqs; i++) {
1557                 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1558                 if (ret)
1559                         return ret;
1560         }
1561
1562         return 0;
1563 }
1564
1565 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1566                                 unsigned int nr_irqs)
1567 {
1568         int i;
1569
1570         for (i = 0; i < nr_irqs; i++) {
1571                 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1572                 irq_set_handler(virq + i, NULL);
1573                 irq_domain_reset_irq_data(d);
1574         }
1575 }
1576
1577 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1578                                       irq_hw_number_t hwirq)
1579 {
1580         enum gic_intid_range range;
1581
1582         if (!gic_data.ppi_descs)
1583                 return false;
1584
1585         if (!is_of_node(fwspec->fwnode))
1586                 return false;
1587
1588         if (fwspec->param_count < 4 || !fwspec->param[3])
1589                 return false;
1590
1591         range = __get_intid_range(hwirq);
1592         if (range != PPI_RANGE && range != EPPI_RANGE)
1593                 return false;
1594
1595         return true;
1596 }
1597
1598 static int gic_irq_domain_select(struct irq_domain *d,
1599                                  struct irq_fwspec *fwspec,
1600                                  enum irq_domain_bus_token bus_token)
1601 {
1602         unsigned int type, ret, ppi_idx;
1603         irq_hw_number_t hwirq;
1604
1605         /* Not for us */
1606         if (fwspec->fwnode != d->fwnode)
1607                 return 0;
1608
1609         /* If this is not DT, then we have a single domain */
1610         if (!is_of_node(fwspec->fwnode))
1611                 return 1;
1612
1613         ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1614         if (WARN_ON_ONCE(ret))
1615                 return 0;
1616
1617         if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1618                 return d == gic_data.domain;
1619
1620         /*
1621          * If this is a PPI and we have a 4th (non-null) parameter,
1622          * then we need to match the partition domain.
1623          */
1624         ppi_idx = __gic_get_ppi_index(hwirq);
1625         return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
1626 }
1627
1628 static const struct irq_domain_ops gic_irq_domain_ops = {
1629         .translate = gic_irq_domain_translate,
1630         .alloc = gic_irq_domain_alloc,
1631         .free = gic_irq_domain_free,
1632         .select = gic_irq_domain_select,
1633 };
1634
1635 static int partition_domain_translate(struct irq_domain *d,
1636                                       struct irq_fwspec *fwspec,
1637                                       unsigned long *hwirq,
1638                                       unsigned int *type)
1639 {
1640         unsigned long ppi_intid;
1641         struct device_node *np;
1642         unsigned int ppi_idx;
1643         int ret;
1644
1645         if (!gic_data.ppi_descs)
1646                 return -ENOMEM;
1647
1648         np = of_find_node_by_phandle(fwspec->param[3]);
1649         if (WARN_ON(!np))
1650                 return -EINVAL;
1651
1652         ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1653         if (WARN_ON_ONCE(ret))
1654                 return 0;
1655
1656         ppi_idx = __gic_get_ppi_index(ppi_intid);
1657         ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
1658                                      of_node_to_fwnode(np));
1659         if (ret < 0)
1660                 return ret;
1661
1662         *hwirq = ret;
1663         *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1664
1665         return 0;
1666 }
1667
1668 static const struct irq_domain_ops partition_domain_ops = {
1669         .translate = partition_domain_translate,
1670         .select = gic_irq_domain_select,
1671 };
1672
1673 static bool gic_enable_quirk_msm8996(void *data)
1674 {
1675         struct gic_chip_data *d = data;
1676
1677         d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1678
1679         return true;
1680 }
1681
1682 static bool gic_enable_quirk_cavium_38539(void *data)
1683 {
1684         struct gic_chip_data *d = data;
1685
1686         d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1687
1688         return true;
1689 }
1690
1691 static bool gic_enable_quirk_hip06_07(void *data)
1692 {
1693         struct gic_chip_data *d = data;
1694
1695         /*
1696          * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1697          * not being an actual ARM implementation). The saving grace is
1698          * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1699          * HIP07 doesn't even have a proper IIDR, and still pretends to
1700          * have ESPI. In both cases, put them right.
1701          */
1702         if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1703                 /* Zero both ESPI and the RES0 field next to it... */
1704                 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1705                 return true;
1706         }
1707
1708         return false;
1709 }
1710
1711 static const struct gic_quirk gic_quirks[] = {
1712         {
1713                 .desc   = "GICv3: Qualcomm MSM8996 broken firmware",
1714                 .compatible = "qcom,msm8996-gic-v3",
1715                 .init   = gic_enable_quirk_msm8996,
1716         },
1717         {
1718                 .desc   = "GICv3: HIP06 erratum 161010803",
1719                 .iidr   = 0x0204043b,
1720                 .mask   = 0xffffffff,
1721                 .init   = gic_enable_quirk_hip06_07,
1722         },
1723         {
1724                 .desc   = "GICv3: HIP07 erratum 161010803",
1725                 .iidr   = 0x00000000,
1726                 .mask   = 0xffffffff,
1727                 .init   = gic_enable_quirk_hip06_07,
1728         },
1729         {
1730                 /*
1731                  * Reserved register accesses generate a Synchronous
1732                  * External Abort. This erratum applies to:
1733                  * - ThunderX: CN88xx
1734                  * - OCTEON TX: CN83xx, CN81xx
1735                  * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1736                  */
1737                 .desc   = "GICv3: Cavium erratum 38539",
1738                 .iidr   = 0xa000034c,
1739                 .mask   = 0xe8f00fff,
1740                 .init   = gic_enable_quirk_cavium_38539,
1741         },
1742         {
1743         }
1744 };
1745
1746 static void gic_enable_nmi_support(void)
1747 {
1748         int i;
1749
1750         if (!gic_prio_masking_enabled())
1751                 return;
1752
1753         ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1754         if (!ppi_nmi_refs)
1755                 return;
1756
1757         for (i = 0; i < gic_data.ppi_nr; i++)
1758                 refcount_set(&ppi_nmi_refs[i], 0);
1759
1760         pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1761                 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
1762
1763         /*
1764          * How priority values are used by the GIC depends on two things:
1765          * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1766          * and if Group 0 interrupts can be delivered to Linux in the non-secure
1767          * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1768          * ICC_PMR_EL1 register and the priority that software assigns to
1769          * interrupts:
1770          *
1771          * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1772          * -----------------------------------------------------------
1773          *      1       |      -      |  unchanged  |    unchanged
1774          * -----------------------------------------------------------
1775          *      0       |      1      |  non-secure |    non-secure
1776          * -----------------------------------------------------------
1777          *      0       |      0      |  unchanged  |    non-secure
1778          *
1779          * where non-secure means that the value is right-shifted by one and the
1780          * MSB bit set, to make it fit in the non-secure priority range.
1781          *
1782          * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1783          * are both either modified or unchanged, we can use the same set of
1784          * priorities.
1785          *
1786          * In the last case, where only the interrupt priorities are modified to
1787          * be in the non-secure range, we use a different PMR value to mask IRQs
1788          * and the rest of the values that we use remain unchanged.
1789          */
1790         if (gic_has_group0() && !gic_dist_security_disabled())
1791                 static_branch_enable(&gic_nonsecure_priorities);
1792
1793         static_branch_enable(&supports_pseudo_nmis);
1794
1795         if (static_branch_likely(&supports_deactivate_key))
1796                 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1797         else
1798                 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1799 }
1800
1801 static int __init gic_init_bases(void __iomem *dist_base,
1802                                  struct redist_region *rdist_regs,
1803                                  u32 nr_redist_regions,
1804                                  u64 redist_stride,
1805                                  struct fwnode_handle *handle)
1806 {
1807         u32 typer;
1808         int err;
1809
1810         if (!is_hyp_mode_available())
1811                 static_branch_disable(&supports_deactivate_key);
1812
1813         if (static_branch_likely(&supports_deactivate_key))
1814                 pr_info("GIC: Using split EOI/Deactivate mode\n");
1815
1816         gic_data.fwnode = handle;
1817         gic_data.dist_base = dist_base;
1818         gic_data.redist_regions = rdist_regs;
1819         gic_data.nr_redist_regions = nr_redist_regions;
1820         gic_data.redist_stride = redist_stride;
1821
1822         /*
1823          * Find out how many interrupts are supported.
1824          */
1825         typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1826         gic_data.rdists.gicd_typer = typer;
1827
1828         gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1829                           gic_quirks, &gic_data);
1830
1831         pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1832         pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1833
1834         /*
1835          * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1836          * architecture spec (which says that reserved registers are RES0).
1837          */
1838         if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1839                 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1840
1841         gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1842                                                  &gic_data);
1843         gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1844         gic_data.rdists.has_rvpeid = true;
1845         gic_data.rdists.has_vlpis = true;
1846         gic_data.rdists.has_direct_lpi = true;
1847         gic_data.rdists.has_vpend_valid_dirty = true;
1848
1849         if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1850                 err = -ENOMEM;
1851                 goto out_free;
1852         }
1853
1854         irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1855
1856         gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1857
1858         if (typer & GICD_TYPER_MBIS) {
1859                 err = mbi_init(handle, gic_data.domain);
1860                 if (err)
1861                         pr_err("Failed to initialize MBIs\n");
1862         }
1863
1864         set_handle_irq(gic_handle_irq);
1865
1866         gic_update_rdist_properties();
1867
1868         gic_dist_init();
1869         gic_cpu_init();
1870         gic_smp_init();
1871         gic_cpu_pm_init();
1872
1873         if (gic_dist_supports_lpis()) {
1874                 its_init(handle, &gic_data.rdists, gic_data.domain);
1875                 its_cpu_init();
1876                 its_lpi_memreserve_init();
1877         } else {
1878                 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1879                         gicv2m_init(handle, gic_data.domain);
1880         }
1881
1882         gic_enable_nmi_support();
1883
1884         return 0;
1885
1886 out_free:
1887         if (gic_data.domain)
1888                 irq_domain_remove(gic_data.domain);
1889         free_percpu(gic_data.rdists.rdist);
1890         return err;
1891 }
1892
1893 static int __init gic_validate_dist_version(void __iomem *dist_base)
1894 {
1895         u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1896
1897         if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1898                 return -ENODEV;
1899
1900         return 0;
1901 }
1902
1903 /* Create all possible partitions at boot time */
1904 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1905 {
1906         struct device_node *parts_node, *child_part;
1907         int part_idx = 0, i;
1908         int nr_parts;
1909         struct partition_affinity *parts;
1910
1911         parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1912         if (!parts_node)
1913                 return;
1914
1915         gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1916         if (!gic_data.ppi_descs)
1917                 goto out_put_node;
1918
1919         nr_parts = of_get_child_count(parts_node);
1920
1921         if (!nr_parts)
1922                 goto out_put_node;
1923
1924         parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1925         if (WARN_ON(!parts))
1926                 goto out_put_node;
1927
1928         for_each_child_of_node(parts_node, child_part) {
1929                 struct partition_affinity *part;
1930                 int n;
1931
1932                 part = &parts[part_idx];
1933
1934                 part->partition_id = of_node_to_fwnode(child_part);
1935
1936                 pr_info("GIC: PPI partition %pOFn[%d] { ",
1937                         child_part, part_idx);
1938
1939                 n = of_property_count_elems_of_size(child_part, "affinity",
1940                                                     sizeof(u32));
1941                 WARN_ON(n <= 0);
1942
1943                 for (i = 0; i < n; i++) {
1944                         int err, cpu;
1945                         u32 cpu_phandle;
1946                         struct device_node *cpu_node;
1947
1948                         err = of_property_read_u32_index(child_part, "affinity",
1949                                                          i, &cpu_phandle);
1950                         if (WARN_ON(err))
1951                                 continue;
1952
1953                         cpu_node = of_find_node_by_phandle(cpu_phandle);
1954                         if (WARN_ON(!cpu_node))
1955                                 continue;
1956
1957                         cpu = of_cpu_node_to_id(cpu_node);
1958                         if (WARN_ON(cpu < 0)) {
1959                                 of_node_put(cpu_node);
1960                                 continue;
1961                         }
1962
1963                         pr_cont("%pOF[%d] ", cpu_node, cpu);
1964
1965                         cpumask_set_cpu(cpu, &part->mask);
1966                         of_node_put(cpu_node);
1967                 }
1968
1969                 pr_cont("}\n");
1970                 part_idx++;
1971         }
1972
1973         for (i = 0; i < gic_data.ppi_nr; i++) {
1974                 unsigned int irq;
1975                 struct partition_desc *desc;
1976                 struct irq_fwspec ppi_fwspec = {
1977                         .fwnode         = gic_data.fwnode,
1978                         .param_count    = 3,
1979                         .param          = {
1980                                 [0]     = GIC_IRQ_TYPE_PARTITION,
1981                                 [1]     = i,
1982                                 [2]     = IRQ_TYPE_NONE,
1983                         },
1984                 };
1985
1986                 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1987                 if (WARN_ON(!irq))
1988                         continue;
1989                 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1990                                              irq, &partition_domain_ops);
1991                 if (WARN_ON(!desc))
1992                         continue;
1993
1994                 gic_data.ppi_descs[i] = desc;
1995         }
1996
1997 out_put_node:
1998         of_node_put(parts_node);
1999 }
2000
2001 static void __init gic_of_setup_kvm_info(struct device_node *node)
2002 {
2003         int ret;
2004         struct resource r;
2005         u32 gicv_idx;
2006
2007         gic_v3_kvm_info.type = GIC_V3;
2008
2009         gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2010         if (!gic_v3_kvm_info.maint_irq)
2011                 return;
2012
2013         if (of_property_read_u32(node, "#redistributor-regions",
2014                                  &gicv_idx))
2015                 gicv_idx = 1;
2016
2017         gicv_idx += 3;  /* Also skip GICD, GICC, GICH */
2018         ret = of_address_to_resource(node, gicv_idx, &r);
2019         if (!ret)
2020                 gic_v3_kvm_info.vcpu = r;
2021
2022         gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2023         gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2024         vgic_set_kvm_info(&gic_v3_kvm_info);
2025 }
2026
2027 static void gic_request_region(resource_size_t base, resource_size_t size,
2028                                const char *name)
2029 {
2030         if (!request_mem_region(base, size, name))
2031                 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2032                              name, &base);
2033 }
2034
2035 static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2036                                   const char *name, struct resource *res)
2037 {
2038         void __iomem *base;
2039         int ret;
2040
2041         ret = of_address_to_resource(node, idx, res);
2042         if (ret)
2043                 return IOMEM_ERR_PTR(ret);
2044
2045         gic_request_region(res->start, resource_size(res), name);
2046         base = of_iomap(node, idx);
2047
2048         return base ?: IOMEM_ERR_PTR(-ENOMEM);
2049 }
2050
2051 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2052 {
2053         void __iomem *dist_base;
2054         struct redist_region *rdist_regs;
2055         struct resource res;
2056         u64 redist_stride;
2057         u32 nr_redist_regions;
2058         int err, i;
2059
2060         dist_base = gic_of_iomap(node, 0, "GICD", &res);
2061         if (IS_ERR(dist_base)) {
2062                 pr_err("%pOF: unable to map gic dist registers\n", node);
2063                 return PTR_ERR(dist_base);
2064         }
2065
2066         err = gic_validate_dist_version(dist_base);
2067         if (err) {
2068                 pr_err("%pOF: no distributor detected, giving up\n", node);
2069                 goto out_unmap_dist;
2070         }
2071
2072         if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2073                 nr_redist_regions = 1;
2074
2075         rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
2076                              GFP_KERNEL);
2077         if (!rdist_regs) {
2078                 err = -ENOMEM;
2079                 goto out_unmap_dist;
2080         }
2081
2082         for (i = 0; i < nr_redist_regions; i++) {
2083                 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2084                 if (IS_ERR(rdist_regs[i].redist_base)) {
2085                         pr_err("%pOF: couldn't map region %d\n", node, i);
2086                         err = -ENODEV;
2087                         goto out_unmap_rdist;
2088                 }
2089                 rdist_regs[i].phys_base = res.start;
2090         }
2091
2092         if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2093                 redist_stride = 0;
2094
2095         gic_enable_of_quirks(node, gic_quirks, &gic_data);
2096
2097         err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
2098                              redist_stride, &node->fwnode);
2099         if (err)
2100                 goto out_unmap_rdist;
2101
2102         gic_populate_ppi_partitions(node);
2103
2104         if (static_branch_likely(&supports_deactivate_key))
2105                 gic_of_setup_kvm_info(node);
2106         return 0;
2107
2108 out_unmap_rdist:
2109         for (i = 0; i < nr_redist_regions; i++)
2110                 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
2111                         iounmap(rdist_regs[i].redist_base);
2112         kfree(rdist_regs);
2113 out_unmap_dist:
2114         iounmap(dist_base);
2115         return err;
2116 }
2117
2118 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2119
2120 #ifdef CONFIG_ACPI
2121 static struct
2122 {
2123         void __iomem *dist_base;
2124         struct redist_region *redist_regs;
2125         u32 nr_redist_regions;
2126         bool single_redist;
2127         int enabled_rdists;
2128         u32 maint_irq;
2129         int maint_irq_mode;
2130         phys_addr_t vcpu_base;
2131 } acpi_data __initdata;
2132
2133 static void __init
2134 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2135 {
2136         static int count = 0;
2137
2138         acpi_data.redist_regs[count].phys_base = phys_base;
2139         acpi_data.redist_regs[count].redist_base = redist_base;
2140         acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2141         count++;
2142 }
2143
2144 static int __init
2145 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2146                            const unsigned long end)
2147 {
2148         struct acpi_madt_generic_redistributor *redist =
2149                         (struct acpi_madt_generic_redistributor *)header;
2150         void __iomem *redist_base;
2151
2152         redist_base = ioremap(redist->base_address, redist->length);
2153         if (!redist_base) {
2154                 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2155                 return -ENOMEM;
2156         }
2157         gic_request_region(redist->base_address, redist->length, "GICR");
2158
2159         gic_acpi_register_redist(redist->base_address, redist_base);
2160         return 0;
2161 }
2162
2163 static int __init
2164 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2165                          const unsigned long end)
2166 {
2167         struct acpi_madt_generic_interrupt *gicc =
2168                                 (struct acpi_madt_generic_interrupt *)header;
2169         u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2170         u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2171         void __iomem *redist_base;
2172
2173         /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2174         if (!(gicc->flags & ACPI_MADT_ENABLED))
2175                 return 0;
2176
2177         redist_base = ioremap(gicc->gicr_base_address, size);
2178         if (!redist_base)
2179                 return -ENOMEM;
2180         gic_request_region(gicc->gicr_base_address, size, "GICR");
2181
2182         gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2183         return 0;
2184 }
2185
2186 static int __init gic_acpi_collect_gicr_base(void)
2187 {
2188         acpi_tbl_entry_handler redist_parser;
2189         enum acpi_madt_type type;
2190
2191         if (acpi_data.single_redist) {
2192                 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2193                 redist_parser = gic_acpi_parse_madt_gicc;
2194         } else {
2195                 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2196                 redist_parser = gic_acpi_parse_madt_redist;
2197         }
2198
2199         /* Collect redistributor base addresses in GICR entries */
2200         if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2201                 return 0;
2202
2203         pr_info("No valid GICR entries exist\n");
2204         return -ENODEV;
2205 }
2206
2207 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2208                                   const unsigned long end)
2209 {
2210         /* Subtable presence means that redist exists, that's it */
2211         return 0;
2212 }
2213
2214 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2215                                       const unsigned long end)
2216 {
2217         struct acpi_madt_generic_interrupt *gicc =
2218                                 (struct acpi_madt_generic_interrupt *)header;
2219
2220         /*
2221          * If GICC is enabled and has valid gicr base address, then it means
2222          * GICR base is presented via GICC
2223          */
2224         if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2225                 acpi_data.enabled_rdists++;
2226                 return 0;
2227         }
2228
2229         /*
2230          * It's perfectly valid firmware can pass disabled GICC entry, driver
2231          * should not treat as errors, skip the entry instead of probe fail.
2232          */
2233         if (!(gicc->flags & ACPI_MADT_ENABLED))
2234                 return 0;
2235
2236         return -ENODEV;
2237 }
2238
2239 static int __init gic_acpi_count_gicr_regions(void)
2240 {
2241         int count;
2242
2243         /*
2244          * Count how many redistributor regions we have. It is not allowed
2245          * to mix redistributor description, GICR and GICC subtables have to be
2246          * mutually exclusive.
2247          */
2248         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2249                                       gic_acpi_match_gicr, 0);
2250         if (count > 0) {
2251                 acpi_data.single_redist = false;
2252                 return count;
2253         }
2254
2255         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2256                                       gic_acpi_match_gicc, 0);
2257         if (count > 0) {
2258                 acpi_data.single_redist = true;
2259                 count = acpi_data.enabled_rdists;
2260         }
2261
2262         return count;
2263 }
2264
2265 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2266                                            struct acpi_probe_entry *ape)
2267 {
2268         struct acpi_madt_generic_distributor *dist;
2269         int count;
2270
2271         dist = (struct acpi_madt_generic_distributor *)header;
2272         if (dist->version != ape->driver_data)
2273                 return false;
2274
2275         /* We need to do that exercise anyway, the sooner the better */
2276         count = gic_acpi_count_gicr_regions();
2277         if (count <= 0)
2278                 return false;
2279
2280         acpi_data.nr_redist_regions = count;
2281         return true;
2282 }
2283
2284 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2285                                                 const unsigned long end)
2286 {
2287         struct acpi_madt_generic_interrupt *gicc =
2288                 (struct acpi_madt_generic_interrupt *)header;
2289         int maint_irq_mode;
2290         static int first_madt = true;
2291
2292         /* Skip unusable CPUs */
2293         if (!(gicc->flags & ACPI_MADT_ENABLED))
2294                 return 0;
2295
2296         maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2297                 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2298
2299         if (first_madt) {
2300                 first_madt = false;
2301
2302                 acpi_data.maint_irq = gicc->vgic_interrupt;
2303                 acpi_data.maint_irq_mode = maint_irq_mode;
2304                 acpi_data.vcpu_base = gicc->gicv_base_address;
2305
2306                 return 0;
2307         }
2308
2309         /*
2310          * The maintenance interrupt and GICV should be the same for every CPU
2311          */
2312         if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2313             (acpi_data.maint_irq_mode != maint_irq_mode) ||
2314             (acpi_data.vcpu_base != gicc->gicv_base_address))
2315                 return -EINVAL;
2316
2317         return 0;
2318 }
2319
2320 static bool __init gic_acpi_collect_virt_info(void)
2321 {
2322         int count;
2323
2324         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2325                                       gic_acpi_parse_virt_madt_gicc, 0);
2326
2327         return (count > 0);
2328 }
2329
2330 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2331 #define ACPI_GICV2_VCTRL_MEM_SIZE       (SZ_4K)
2332 #define ACPI_GICV2_VCPU_MEM_SIZE        (SZ_8K)
2333
2334 static void __init gic_acpi_setup_kvm_info(void)
2335 {
2336         int irq;
2337
2338         if (!gic_acpi_collect_virt_info()) {
2339                 pr_warn("Unable to get hardware information used for virtualization\n");
2340                 return;
2341         }
2342
2343         gic_v3_kvm_info.type = GIC_V3;
2344
2345         irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2346                                 acpi_data.maint_irq_mode,
2347                                 ACPI_ACTIVE_HIGH);
2348         if (irq <= 0)
2349                 return;
2350
2351         gic_v3_kvm_info.maint_irq = irq;
2352
2353         if (acpi_data.vcpu_base) {
2354                 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2355
2356                 vcpu->flags = IORESOURCE_MEM;
2357                 vcpu->start = acpi_data.vcpu_base;
2358                 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2359         }
2360
2361         gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2362         gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2363         vgic_set_kvm_info(&gic_v3_kvm_info);
2364 }
2365
2366 static struct fwnode_handle *gsi_domain_handle;
2367
2368 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2369 {
2370         return gsi_domain_handle;
2371 }
2372
2373 static int __init
2374 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2375 {
2376         struct acpi_madt_generic_distributor *dist;
2377         size_t size;
2378         int i, err;
2379
2380         /* Get distributor base address */
2381         dist = (struct acpi_madt_generic_distributor *)header;
2382         acpi_data.dist_base = ioremap(dist->base_address,
2383                                       ACPI_GICV3_DIST_MEM_SIZE);
2384         if (!acpi_data.dist_base) {
2385                 pr_err("Unable to map GICD registers\n");
2386                 return -ENOMEM;
2387         }
2388         gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
2389
2390         err = gic_validate_dist_version(acpi_data.dist_base);
2391         if (err) {
2392                 pr_err("No distributor detected at @%p, giving up\n",
2393                        acpi_data.dist_base);
2394                 goto out_dist_unmap;
2395         }
2396
2397         size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2398         acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2399         if (!acpi_data.redist_regs) {
2400                 err = -ENOMEM;
2401                 goto out_dist_unmap;
2402         }
2403
2404         err = gic_acpi_collect_gicr_base();
2405         if (err)
2406                 goto out_redist_unmap;
2407
2408         gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2409         if (!gsi_domain_handle) {
2410                 err = -ENOMEM;
2411                 goto out_redist_unmap;
2412         }
2413
2414         err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2415                              acpi_data.nr_redist_regions, 0, gsi_domain_handle);
2416         if (err)
2417                 goto out_fwhandle_free;
2418
2419         acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
2420
2421         if (static_branch_likely(&supports_deactivate_key))
2422                 gic_acpi_setup_kvm_info();
2423
2424         return 0;
2425
2426 out_fwhandle_free:
2427         irq_domain_free_fwnode(gsi_domain_handle);
2428 out_redist_unmap:
2429         for (i = 0; i < acpi_data.nr_redist_regions; i++)
2430                 if (acpi_data.redist_regs[i].redist_base)
2431                         iounmap(acpi_data.redist_regs[i].redist_base);
2432         kfree(acpi_data.redist_regs);
2433 out_dist_unmap:
2434         iounmap(acpi_data.dist_base);
2435         return err;
2436 }
2437 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2438                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2439                      gic_acpi_init);
2440 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2441                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2442                      gic_acpi_init);
2443 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2444                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2445                      gic_acpi_init);
2446 #endif