2 * Copyright (C) 2016 Marvell
4 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #define pr_fmt(fmt) "GIC-ODMI: " fmt
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/msi.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <dt-bindings/interrupt-controller/arm-gic.h>
22 #define GICP_ODMIN_SET 0x40
23 #define GICP_ODMI_INT_NUM_SHIFT 12
24 #define GICP_ODMIN_GM_EP_R0 0x110
25 #define GICP_ODMIN_GM_EP_R1 0x114
26 #define GICP_ODMIN_GM_EA_R0 0x108
27 #define GICP_ODMIN_GM_EA_R1 0x118
30 * We don't support the group events, so we simply have 8 interrupts
33 #define NODMIS_SHIFT 3
34 #define NODMIS_PER_FRAME (1 << NODMIS_SHIFT)
35 #define NODMIS_MASK (NODMIS_PER_FRAME - 1)
40 unsigned int spi_base;
43 static struct odmi_data *odmis;
44 static unsigned long *odmis_bm;
45 static unsigned int odmis_count;
47 /* Protects odmis_bm */
48 static DEFINE_SPINLOCK(odmis_bm_lock);
50 static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
52 struct odmi_data *odmi;
56 if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
59 odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
60 odmin = d->hwirq & NODMIS_MASK;
62 addr = odmi->res.start + GICP_ODMIN_SET;
64 msg->address_hi = upper_32_bits(addr);
65 msg->address_lo = lower_32_bits(addr);
66 msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
69 static struct irq_chip odmi_irq_chip = {
71 .irq_mask = irq_chip_mask_parent,
72 .irq_unmask = irq_chip_unmask_parent,
73 .irq_eoi = irq_chip_eoi_parent,
74 .irq_set_affinity = irq_chip_set_affinity_parent,
75 .irq_compose_msi_msg = odmi_compose_msi_msg,
78 static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
79 unsigned int nr_irqs, void *args)
81 struct odmi_data *odmi = NULL;
82 struct irq_fwspec fwspec;
84 unsigned int hwirq, odmin;
87 spin_lock(&odmis_bm_lock);
88 hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
89 if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
90 spin_unlock(&odmis_bm_lock);
94 __set_bit(hwirq, odmis_bm);
95 spin_unlock(&odmis_bm_lock);
97 odmi = &odmis[hwirq >> NODMIS_SHIFT];
98 odmin = hwirq & NODMIS_MASK;
100 fwspec.fwnode = domain->parent->fwnode;
101 fwspec.param_count = 3;
102 fwspec.param[0] = GIC_SPI;
103 fwspec.param[1] = odmi->spi_base - 32 + odmin;
104 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
106 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
108 pr_err("Cannot allocate parent IRQ\n");
109 spin_lock(&odmis_bm_lock);
110 __clear_bit(odmin, odmis_bm);
111 spin_unlock(&odmis_bm_lock);
115 /* Configure the interrupt line to be edge */
116 d = irq_domain_get_irq_data(domain->parent, virq);
117 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
119 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
120 &odmi_irq_chip, NULL);
125 static void odmi_irq_domain_free(struct irq_domain *domain,
126 unsigned int virq, unsigned int nr_irqs)
128 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
130 if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
131 pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
135 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
137 /* Actually free the MSI */
138 spin_lock(&odmis_bm_lock);
139 __clear_bit(d->hwirq, odmis_bm);
140 spin_unlock(&odmis_bm_lock);
143 static const struct irq_domain_ops odmi_domain_ops = {
144 .alloc = odmi_irq_domain_alloc,
145 .free = odmi_irq_domain_free,
148 static struct irq_chip odmi_msi_irq_chip = {
152 static struct msi_domain_ops odmi_msi_ops = {
155 static struct msi_domain_info odmi_msi_domain_info = {
156 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
157 .ops = &odmi_msi_ops,
158 .chip = &odmi_msi_irq_chip,
161 static int __init mvebu_odmi_init(struct device_node *node,
162 struct device_node *parent)
164 struct irq_domain *inner_domain, *plat_domain;
167 if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
170 odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
174 odmis_bm = bitmap_zalloc(odmis_count * NODMIS_PER_FRAME, GFP_KERNEL);
180 for (i = 0; i < odmis_count; i++) {
181 struct odmi_data *odmi = &odmis[i];
183 ret = of_address_to_resource(node, i, &odmi->res);
187 odmi->base = of_io_request_and_map(node, i, "odmi");
188 if (IS_ERR(odmi->base)) {
189 ret = PTR_ERR(odmi->base);
193 if (of_property_read_u32_index(node, "marvell,spi-base",
194 i, &odmi->spi_base)) {
200 inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
201 odmis_count * NODMIS_PER_FRAME,
202 &odmi_domain_ops, NULL);
208 inner_domain->parent = irq_find_host(parent);
210 plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
211 &odmi_msi_domain_info,
215 goto err_remove_inner;
221 irq_domain_remove(inner_domain);
223 for (i = 0; i < odmis_count; i++) {
224 struct odmi_data *odmi = &odmis[i];
226 if (odmi->base && !IS_ERR(odmi->base))
227 iounmap(odmis[i].base);
229 bitmap_free(odmis_bm);
235 IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);