1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
4 * Loongson Local IO Interrupt Controller support
7 #include <linux/errno.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/ioport.h>
12 #include <linux/irqchip.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
16 #include <linux/smp.h>
17 #include <linux/irqchip/chained_irq.h>
22 #include <asm/loongson.h>
25 #define LIOINTC_CHIP_IRQ 32
26 #define LIOINTC_NUM_PARENT 4
27 #define LIOINTC_NUM_CORES 4
29 #define LIOINTC_INTC_CHIP_START 0x20
31 #define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
32 #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
33 #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
34 #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
35 #define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10)
36 #define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14)
38 #define LIOINTC_SHIFT_INTx 4
40 #define LIOINTC_ERRATA_IRQ 10
42 #if defined(CONFIG_MIPS)
43 #define liointc_core_id get_ebase_cpunum()
45 #define liointc_core_id get_csr_cpuid()
48 struct liointc_handler_data {
49 struct liointc_priv *priv;
54 struct irq_chip_generic *gc;
55 struct liointc_handler_data handler[LIOINTC_NUM_PARENT];
56 void __iomem *core_isr[LIOINTC_NUM_CORES];
57 u8 map_cache[LIOINTC_CHIP_IRQ];
58 bool has_lpc_irq_errata;
61 struct fwnode_handle *liointc_handle;
63 static void liointc_chained_handle_irq(struct irq_desc *desc)
65 struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
66 struct irq_chip *chip = irq_desc_get_chip(desc);
67 struct irq_chip_generic *gc = handler->priv->gc;
68 int core = liointc_core_id % LIOINTC_NUM_CORES;
71 chained_irq_enter(chip, desc);
73 pending = readl(handler->priv->core_isr[core]);
76 /* Always blame LPC IRQ if we have that bug */
77 if (handler->priv->has_lpc_irq_errata &&
78 (handler->parent_int_map & gc->mask_cache &
79 BIT(LIOINTC_ERRATA_IRQ)))
80 pending = BIT(LIOINTC_ERRATA_IRQ);
86 int bit = __ffs(pending);
88 generic_handle_domain_irq(gc->domain, bit);
92 chained_irq_exit(chip, desc);
95 static void liointc_set_bit(struct irq_chip_generic *gc,
100 writel(readl(gc->reg_base + offset) | mask,
101 gc->reg_base + offset);
103 writel(readl(gc->reg_base + offset) & ~mask,
104 gc->reg_base + offset);
107 static int liointc_set_type(struct irq_data *data, unsigned int type)
109 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
110 u32 mask = data->mask;
113 irq_gc_lock_irqsave(gc, flags);
115 case IRQ_TYPE_LEVEL_HIGH:
116 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
117 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
119 case IRQ_TYPE_LEVEL_LOW:
120 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
121 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
123 case IRQ_TYPE_EDGE_RISING:
124 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
125 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
127 case IRQ_TYPE_EDGE_FALLING:
128 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
129 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
132 irq_gc_unlock_irqrestore(gc, flags);
135 irq_gc_unlock_irqrestore(gc, flags);
137 irqd_set_trigger_type(data, type);
141 static void liointc_resume(struct irq_chip_generic *gc)
143 struct liointc_priv *priv = gc->private;
147 irq_gc_lock_irqsave(gc, flags);
148 /* Disable all at first */
149 writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
150 /* Restore map cache */
151 for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
152 writeb(priv->map_cache[i], gc->reg_base + i);
153 /* Restore mask cache */
154 writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
155 irq_gc_unlock_irqrestore(gc, flags);
158 static int parent_irq[LIOINTC_NUM_PARENT];
159 static u32 parent_int_map[LIOINTC_NUM_PARENT];
160 static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
161 static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
163 static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
164 const u32 *intspec, unsigned int intsize,
165 unsigned long *out_hwirq, unsigned int *out_type)
167 if (WARN_ON(intsize < 1))
169 *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
172 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
174 *out_type = IRQ_TYPE_NONE;
179 static const struct irq_domain_ops acpi_irq_gc_ops = {
180 .map = irq_map_generic_chip,
181 .unmap = irq_unmap_generic_chip,
182 .xlate = liointc_domain_xlate,
185 static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
186 struct fwnode_handle *domain_handle, struct device_node *node)
190 struct irq_chip_type *ct;
191 struct irq_chip_generic *gc;
192 struct irq_domain *domain;
193 struct liointc_priv *priv;
195 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
199 base = ioremap(addr, size);
203 for (i = 0; i < LIOINTC_NUM_CORES; i++)
204 priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
206 for (i = 0; i < LIOINTC_NUM_PARENT; i++)
207 priv->handler[i].parent_int_map = parent_int_map[i];
210 for (i = 0; i < LIOINTC_NUM_CORES; i++) {
211 int index = of_property_match_string(node,
212 "reg-names", core_reg_names[i]);
217 priv->core_isr[i] = of_iomap(node, index);
220 if (!priv->core_isr[0])
224 /* Setup IRQ domain */
226 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
227 &acpi_irq_gc_ops, priv);
229 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
230 &irq_generic_chip_ops, priv);
232 pr_err("loongson-liointc: cannot add IRQ domain\n");
236 err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
237 (node ? node->full_name : "LIOINTC"),
238 handle_level_irq, 0, IRQ_NOPROBE, 0);
240 pr_err("loongson-liointc: unable to register IRQ domain\n");
241 goto out_free_domain;
245 /* Disable all IRQs */
246 writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE);
247 /* Set to level triggered */
248 writel(0x0, base + LIOINTC_REG_INTC_EDGE);
250 /* Generate parent INT part of map cache */
251 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
252 u32 pending = priv->handler[i].parent_int_map;
255 int bit = __ffs(pending);
257 priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx;
258 pending &= ~BIT(bit);
262 for (i = 0; i < LIOINTC_CHIP_IRQ; i++) {
263 /* Generate core part of map cache */
264 priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id);
265 writeb(priv->map_cache[i], base + i);
268 gc = irq_get_domain_generic_chip(domain, 0);
272 gc->resume = liointc_resume;
275 ct->regs.enable = LIOINTC_REG_INTC_ENABLE;
276 ct->regs.disable = LIOINTC_REG_INTC_DISABLE;
277 ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
278 ct->chip.irq_mask = irq_gc_mask_disable_reg;
279 ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
280 ct->chip.irq_set_type = liointc_set_type;
285 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
286 if (parent_irq[i] <= 0)
289 priv->handler[i].priv = priv;
290 irq_set_chained_handler_and_data(parent_irq[i],
291 liointc_chained_handle_irq, &priv->handler[i]);
294 liointc_handle = domain_handle;
298 irq_domain_remove(domain);
309 static int __init liointc_of_init(struct device_node *node,
310 struct device_node *parent)
312 bool have_parent = FALSE;
313 int sz, i, index, revision, err = 0;
316 if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
320 index = of_property_match_string(node, "reg-names", "main");
324 if (of_address_to_resource(node, index, &res))
327 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
328 parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
329 if (parent_irq[i] > 0)
335 sz = of_property_read_variable_u32_array(node,
336 "loongson,parent_int_map",
341 pr_err("loongson-liointc: No parent_int_map\n");
345 err = liointc_init(res.start, resource_size(&res),
346 revision, of_node_to_fwnode(node), node);
353 IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
354 IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
355 IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
360 static int __init htintc_parse_madt(union acpi_subtable_headers *header,
361 const unsigned long end)
363 struct acpi_madt_ht_pic *htintc_entry = (struct acpi_madt_ht_pic *)header;
364 struct irq_domain *parent = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY);
366 return htvec_acpi_init(parent, htintc_entry);
369 static int __init acpi_cascade_irqdomain_init(void)
373 r = acpi_table_parse_madt(ACPI_MADT_TYPE_HT_PIC, htintc_parse_madt, 0);
380 int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
383 struct fwnode_handle *domain_handle;
385 parent_int_map[0] = acpi_liointc->cascade_map[0];
386 parent_int_map[1] = acpi_liointc->cascade_map[1];
388 parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
389 parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
391 domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
392 if (!domain_handle) {
393 pr_err("Unable to allocate domain handle\n");
397 ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
398 1, domain_handle, NULL);
400 ret = acpi_cascade_irqdomain_init();
402 irq_domain_free_fwnode(domain_handle);