1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2010 John Crispin <john@phrozen.org>
5 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
8 #include <linux/interrupt.h>
9 #include <linux/ioport.h>
10 #include <linux/sched.h>
11 #include <linux/irqdomain.h>
12 #include <linux/of_platform.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
16 #include <asm/bootinfo.h>
17 #include <asm/irq_cpu.h>
19 #include <lantiq_soc.h>
22 /* register definitions - internal irqs */
23 #define LTQ_ICU_IM0_ISR 0x0000
24 #define LTQ_ICU_IM0_IER 0x0008
25 #define LTQ_ICU_IM0_IOSR 0x0010
26 #define LTQ_ICU_IM0_IRSR 0x0018
27 #define LTQ_ICU_IM0_IMR 0x0020
28 #define LTQ_ICU_IM1_ISR 0x0028
29 #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
31 /* register definitions - external irqs */
32 #define LTQ_EIU_EXIN_C 0x0000
33 #define LTQ_EIU_EXIN_INIC 0x0004
34 #define LTQ_EIU_EXIN_INC 0x0008
35 #define LTQ_EIU_EXIN_INEN 0x000C
37 /* number of external interrupts */
40 /* the performance counter */
41 #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
44 * irqs generated by devices attached to the EBU need to be acked in
47 #define LTQ_ICU_EBU_IRQ 22
49 #define ltq_icu_w32(m, x, y) ltq_w32((x), ltq_icu_membase[m] + (y))
50 #define ltq_icu_r32(m, x) ltq_r32(ltq_icu_membase[m] + (x))
52 #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
53 #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
55 /* our 2 ipi interrupts for VSMP */
56 #define MIPS_CPU_IPI_RESCHED_IRQ 0
57 #define MIPS_CPU_IPI_CALL_IRQ 1
59 /* we have a cascade of 8 irqs */
60 #define MIPS_CPU_IRQ_CASCADE 8
62 static int exin_avail;
63 static u32 ltq_eiu_irq[MAX_EIU];
64 static void __iomem *ltq_icu_membase[MAX_IM];
65 static void __iomem *ltq_eiu_membase;
66 static struct irq_domain *ltq_domain;
67 static int ltq_perfcount_irq;
69 int ltq_eiu_get_irq(int exin)
71 if (exin < exin_avail)
72 return ltq_eiu_irq[exin];
76 void ltq_disable_irq(struct irq_data *d)
78 u32 ier = LTQ_ICU_IM0_IER;
79 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
80 int im = offset / INT_NUM_IM_OFFSET;
82 offset %= INT_NUM_IM_OFFSET;
83 ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
86 void ltq_mask_and_ack_irq(struct irq_data *d)
88 u32 ier = LTQ_ICU_IM0_IER;
89 u32 isr = LTQ_ICU_IM0_ISR;
90 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
91 int im = offset / INT_NUM_IM_OFFSET;
93 offset %= INT_NUM_IM_OFFSET;
94 ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
95 ltq_icu_w32(im, BIT(offset), isr);
98 static void ltq_ack_irq(struct irq_data *d)
100 u32 isr = LTQ_ICU_IM0_ISR;
101 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
102 int im = offset / INT_NUM_IM_OFFSET;
104 offset %= INT_NUM_IM_OFFSET;
105 ltq_icu_w32(im, BIT(offset), isr);
108 void ltq_enable_irq(struct irq_data *d)
110 u32 ier = LTQ_ICU_IM0_IER;
111 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
112 int im = offset / INT_NUM_IM_OFFSET;
114 offset %= INT_NUM_IM_OFFSET;
115 ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
118 static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
122 for (i = 0; i < exin_avail; i++) {
123 if (d->hwirq == ltq_eiu_irq[i]) {
128 case IRQF_TRIGGER_NONE:
130 case IRQF_TRIGGER_RISING:
134 case IRQF_TRIGGER_FALLING:
138 case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
142 case IRQF_TRIGGER_HIGH:
145 case IRQF_TRIGGER_LOW:
149 pr_err("invalid type %d for irq %ld\n",
155 irq_set_handler(d->hwirq, handle_edge_irq);
157 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
158 (val << (i * 4)), LTQ_EIU_EXIN_C);
165 static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
170 for (i = 0; i < exin_avail; i++) {
171 if (d->hwirq == ltq_eiu_irq[i]) {
172 /* by default we are low level triggered */
173 ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
174 /* clear all pending */
175 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
178 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
187 static void ltq_shutdown_eiu_irq(struct irq_data *d)
192 for (i = 0; i < exin_avail; i++) {
193 if (d->hwirq == ltq_eiu_irq[i]) {
195 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
202 static struct irq_chip ltq_irq_type = {
204 .irq_enable = ltq_enable_irq,
205 .irq_disable = ltq_disable_irq,
206 .irq_unmask = ltq_enable_irq,
207 .irq_ack = ltq_ack_irq,
208 .irq_mask = ltq_disable_irq,
209 .irq_mask_ack = ltq_mask_and_ack_irq,
212 static struct irq_chip ltq_eiu_type = {
214 .irq_startup = ltq_startup_eiu_irq,
215 .irq_shutdown = ltq_shutdown_eiu_irq,
216 .irq_enable = ltq_enable_irq,
217 .irq_disable = ltq_disable_irq,
218 .irq_unmask = ltq_enable_irq,
219 .irq_ack = ltq_ack_irq,
220 .irq_mask = ltq_disable_irq,
221 .irq_mask_ack = ltq_mask_and_ack_irq,
222 .irq_set_type = ltq_eiu_settype,
225 static void ltq_hw_irq_handler(struct irq_desc *desc)
227 int module = irq_desc_get_irq(desc) - 2;
231 irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
236 * silicon bug causes only the msb set to 1 to be valid. all
237 * other bits might be bogus
240 hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
241 generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
243 /* if this is a EBU irq, we need to ack it or get a deadlock */
244 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
245 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
249 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
251 struct irq_chip *chip = <q_irq_type;
254 if (hw < MIPS_CPU_IRQ_CASCADE)
257 for (i = 0; i < exin_avail; i++)
258 if (hw == ltq_eiu_irq[i])
259 chip = <q_eiu_type;
261 irq_set_chip_and_handler(irq, chip, handle_level_irq);
266 static const struct irq_domain_ops irq_domain_ops = {
267 .xlate = irq_domain_xlate_onetwocell,
271 int __init icu_of_init(struct device_node *node, struct device_node *parent)
273 struct device_node *eiu_node;
277 for (i = 0; i < MAX_IM; i++) {
278 if (of_address_to_resource(node, i, &res))
279 panic("Failed to get icu memory range");
281 if (!request_mem_region(res.start, resource_size(&res),
283 pr_err("Failed to request icu memory");
285 ltq_icu_membase[i] = ioremap_nocache(res.start,
286 resource_size(&res));
287 if (!ltq_icu_membase[i])
288 panic("Failed to remap icu memory");
291 /* turn off all irqs by default */
292 for (i = 0; i < MAX_IM; i++) {
293 /* make sure all irqs are turned off by default */
294 ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
295 /* clear all possibly pending interrupts */
296 ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
301 for (i = 0; i < MAX_IM; i++)
302 irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
304 ltq_domain = irq_domain_add_linear(node,
305 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
308 /* tell oprofile which irq to use */
309 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
311 /* the external interrupts are optional and xway only */
312 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
313 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
314 /* find out how many external irq sources we have */
315 exin_avail = of_property_count_u32_elems(eiu_node,
318 if (exin_avail > MAX_EIU)
319 exin_avail = MAX_EIU;
321 ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
322 ltq_eiu_irq, exin_avail);
324 panic("failed to load external irq resources");
326 if (!request_mem_region(res.start, resource_size(&res),
328 pr_err("Failed to request eiu memory");
330 ltq_eiu_membase = ioremap_nocache(res.start,
331 resource_size(&res));
332 if (!ltq_eiu_membase)
333 panic("Failed to remap eiu memory");
339 int get_c0_perfcount_int(void)
341 return ltq_perfcount_irq;
343 EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
345 unsigned int get_c0_compare_int(void)
347 return CP0_LEGACY_COMPARE_IRQ;
350 static struct of_device_id __initdata of_irq_ids[] = {
351 { .compatible = "lantiq,icu", .data = icu_of_init },
355 void __init arch_init_irq(void)
357 of_irq_init(of_irq_ids);