Merge tag 'v5.13-rc2' into spi-5.13
[linux-2.6-microblaze.git] / drivers / soc / fsl / qe / qe_ic.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/powerpc/sysdev/qe_lib/qe_ic.c
4  *
5  * Copyright (C) 2006 Freescale Semiconductor, Inc.  All rights reserved.
6  *
7  * Author: Li Yang <leoli@freescale.com>
8  * Based on code from Shlomi Gridish <gridish@freescale.com>
9  *
10  * QUICC ENGINE Interrupt Controller
11  */
12
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/irq.h>
19 #include <linux/reboot.h>
20 #include <linux/slab.h>
21 #include <linux/stddef.h>
22 #include <linux/sched.h>
23 #include <linux/signal.h>
24 #include <linux/device.h>
25 #include <linux/spinlock.h>
26 #include <asm/irq.h>
27 #include <asm/io.h>
28 #include <soc/fsl/qe/qe.h>
29
30 #define NR_QE_IC_INTS           64
31
32 /* QE IC registers offset */
33 #define QEIC_CICR               0x00
34 #define QEIC_CIVEC              0x04
35 #define QEIC_CIPXCC             0x10
36 #define QEIC_CIPYCC             0x14
37 #define QEIC_CIPWCC             0x18
38 #define QEIC_CIPZCC             0x1c
39 #define QEIC_CIMR               0x20
40 #define QEIC_CRIMR              0x24
41 #define QEIC_CIPRTA             0x30
42 #define QEIC_CIPRTB             0x34
43 #define QEIC_CHIVEC             0x60
44
45 struct qe_ic {
46         /* Control registers offset */
47         __be32 __iomem *regs;
48
49         /* The remapper for this QEIC */
50         struct irq_domain *irqhost;
51
52         /* The "linux" controller struct */
53         struct irq_chip hc_irq;
54
55         /* VIRQ numbers of QE high/low irqs */
56         unsigned int virq_high;
57         unsigned int virq_low;
58 };
59
60 /*
61  * QE interrupt controller internal structure
62  */
63 struct qe_ic_info {
64         /* Location of this source at the QIMR register */
65         u32     mask;
66
67         /* Mask register offset */
68         u32     mask_reg;
69
70         /*
71          * For grouped interrupts sources - the interrupt code as
72          * appears at the group priority register
73          */
74         u8      pri_code;
75
76         /* Group priority register offset */
77         u32     pri_reg;
78 };
79
80 static DEFINE_RAW_SPINLOCK(qe_ic_lock);
81
82 static struct qe_ic_info qe_ic_info[] = {
83         [1] = {
84                .mask = 0x00008000,
85                .mask_reg = QEIC_CIMR,
86                .pri_code = 0,
87                .pri_reg = QEIC_CIPWCC,
88                },
89         [2] = {
90                .mask = 0x00004000,
91                .mask_reg = QEIC_CIMR,
92                .pri_code = 1,
93                .pri_reg = QEIC_CIPWCC,
94                },
95         [3] = {
96                .mask = 0x00002000,
97                .mask_reg = QEIC_CIMR,
98                .pri_code = 2,
99                .pri_reg = QEIC_CIPWCC,
100                },
101         [10] = {
102                 .mask = 0x00000040,
103                 .mask_reg = QEIC_CIMR,
104                 .pri_code = 1,
105                 .pri_reg = QEIC_CIPZCC,
106                 },
107         [11] = {
108                 .mask = 0x00000020,
109                 .mask_reg = QEIC_CIMR,
110                 .pri_code = 2,
111                 .pri_reg = QEIC_CIPZCC,
112                 },
113         [12] = {
114                 .mask = 0x00000010,
115                 .mask_reg = QEIC_CIMR,
116                 .pri_code = 3,
117                 .pri_reg = QEIC_CIPZCC,
118                 },
119         [13] = {
120                 .mask = 0x00000008,
121                 .mask_reg = QEIC_CIMR,
122                 .pri_code = 4,
123                 .pri_reg = QEIC_CIPZCC,
124                 },
125         [14] = {
126                 .mask = 0x00000004,
127                 .mask_reg = QEIC_CIMR,
128                 .pri_code = 5,
129                 .pri_reg = QEIC_CIPZCC,
130                 },
131         [15] = {
132                 .mask = 0x00000002,
133                 .mask_reg = QEIC_CIMR,
134                 .pri_code = 6,
135                 .pri_reg = QEIC_CIPZCC,
136                 },
137         [20] = {
138                 .mask = 0x10000000,
139                 .mask_reg = QEIC_CRIMR,
140                 .pri_code = 3,
141                 .pri_reg = QEIC_CIPRTA,
142                 },
143         [25] = {
144                 .mask = 0x00800000,
145                 .mask_reg = QEIC_CRIMR,
146                 .pri_code = 0,
147                 .pri_reg = QEIC_CIPRTB,
148                 },
149         [26] = {
150                 .mask = 0x00400000,
151                 .mask_reg = QEIC_CRIMR,
152                 .pri_code = 1,
153                 .pri_reg = QEIC_CIPRTB,
154                 },
155         [27] = {
156                 .mask = 0x00200000,
157                 .mask_reg = QEIC_CRIMR,
158                 .pri_code = 2,
159                 .pri_reg = QEIC_CIPRTB,
160                 },
161         [28] = {
162                 .mask = 0x00100000,
163                 .mask_reg = QEIC_CRIMR,
164                 .pri_code = 3,
165                 .pri_reg = QEIC_CIPRTB,
166                 },
167         [32] = {
168                 .mask = 0x80000000,
169                 .mask_reg = QEIC_CIMR,
170                 .pri_code = 0,
171                 .pri_reg = QEIC_CIPXCC,
172                 },
173         [33] = {
174                 .mask = 0x40000000,
175                 .mask_reg = QEIC_CIMR,
176                 .pri_code = 1,
177                 .pri_reg = QEIC_CIPXCC,
178                 },
179         [34] = {
180                 .mask = 0x20000000,
181                 .mask_reg = QEIC_CIMR,
182                 .pri_code = 2,
183                 .pri_reg = QEIC_CIPXCC,
184                 },
185         [35] = {
186                 .mask = 0x10000000,
187                 .mask_reg = QEIC_CIMR,
188                 .pri_code = 3,
189                 .pri_reg = QEIC_CIPXCC,
190                 },
191         [36] = {
192                 .mask = 0x08000000,
193                 .mask_reg = QEIC_CIMR,
194                 .pri_code = 4,
195                 .pri_reg = QEIC_CIPXCC,
196                 },
197         [40] = {
198                 .mask = 0x00800000,
199                 .mask_reg = QEIC_CIMR,
200                 .pri_code = 0,
201                 .pri_reg = QEIC_CIPYCC,
202                 },
203         [41] = {
204                 .mask = 0x00400000,
205                 .mask_reg = QEIC_CIMR,
206                 .pri_code = 1,
207                 .pri_reg = QEIC_CIPYCC,
208                 },
209         [42] = {
210                 .mask = 0x00200000,
211                 .mask_reg = QEIC_CIMR,
212                 .pri_code = 2,
213                 .pri_reg = QEIC_CIPYCC,
214                 },
215         [43] = {
216                 .mask = 0x00100000,
217                 .mask_reg = QEIC_CIMR,
218                 .pri_code = 3,
219                 .pri_reg = QEIC_CIPYCC,
220                 },
221 };
222
223 static inline u32 qe_ic_read(__be32  __iomem *base, unsigned int reg)
224 {
225         return ioread32be(base + (reg >> 2));
226 }
227
228 static inline void qe_ic_write(__be32  __iomem *base, unsigned int reg,
229                                u32 value)
230 {
231         iowrite32be(value, base + (reg >> 2));
232 }
233
234 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
235 {
236         return irq_get_chip_data(virq);
237 }
238
239 static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
240 {
241         return irq_data_get_irq_chip_data(d);
242 }
243
244 static void qe_ic_unmask_irq(struct irq_data *d)
245 {
246         struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
247         unsigned int src = irqd_to_hwirq(d);
248         unsigned long flags;
249         u32 temp;
250
251         raw_spin_lock_irqsave(&qe_ic_lock, flags);
252
253         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
254         qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
255                     temp | qe_ic_info[src].mask);
256
257         raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
258 }
259
260 static void qe_ic_mask_irq(struct irq_data *d)
261 {
262         struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
263         unsigned int src = irqd_to_hwirq(d);
264         unsigned long flags;
265         u32 temp;
266
267         raw_spin_lock_irqsave(&qe_ic_lock, flags);
268
269         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
270         qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
271                     temp & ~qe_ic_info[src].mask);
272
273         /* Flush the above write before enabling interrupts; otherwise,
274          * spurious interrupts will sometimes happen.  To be 100% sure
275          * that the write has reached the device before interrupts are
276          * enabled, the mask register would have to be read back; however,
277          * this is not required for correctness, only to avoid wasting
278          * time on a large number of spurious interrupts.  In testing,
279          * a sync reduced the observed spurious interrupts to zero.
280          */
281         mb();
282
283         raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
284 }
285
286 static struct irq_chip qe_ic_irq_chip = {
287         .name = "QEIC",
288         .irq_unmask = qe_ic_unmask_irq,
289         .irq_mask = qe_ic_mask_irq,
290         .irq_mask_ack = qe_ic_mask_irq,
291 };
292
293 static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
294                             enum irq_domain_bus_token bus_token)
295 {
296         /* Exact match, unless qe_ic node is NULL */
297         struct device_node *of_node = irq_domain_get_of_node(h);
298         return of_node == NULL || of_node == node;
299 }
300
301 static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
302                           irq_hw_number_t hw)
303 {
304         struct qe_ic *qe_ic = h->host_data;
305         struct irq_chip *chip;
306
307         if (hw >= ARRAY_SIZE(qe_ic_info)) {
308                 pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
309                 return -EINVAL;
310         }
311
312         if (qe_ic_info[hw].mask == 0) {
313                 printk(KERN_ERR "Can't map reserved IRQ\n");
314                 return -EINVAL;
315         }
316         /* Default chip */
317         chip = &qe_ic->hc_irq;
318
319         irq_set_chip_data(virq, qe_ic);
320         irq_set_status_flags(virq, IRQ_LEVEL);
321
322         irq_set_chip_and_handler(virq, chip, handle_level_irq);
323
324         return 0;
325 }
326
327 static const struct irq_domain_ops qe_ic_host_ops = {
328         .match = qe_ic_host_match,
329         .map = qe_ic_host_map,
330         .xlate = irq_domain_xlate_onetwocell,
331 };
332
333 /* Return an interrupt vector or 0 if no interrupt is pending. */
334 static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
335 {
336         int irq;
337
338         BUG_ON(qe_ic == NULL);
339
340         /* get the interrupt source vector. */
341         irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
342
343         if (irq == 0)
344                 return 0;
345
346         return irq_linear_revmap(qe_ic->irqhost, irq);
347 }
348
349 /* Return an interrupt vector or 0 if no interrupt is pending. */
350 static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
351 {
352         int irq;
353
354         BUG_ON(qe_ic == NULL);
355
356         /* get the interrupt source vector. */
357         irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
358
359         if (irq == 0)
360                 return 0;
361
362         return irq_linear_revmap(qe_ic->irqhost, irq);
363 }
364
365 static void qe_ic_cascade_low(struct irq_desc *desc)
366 {
367         struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
368         unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
369         struct irq_chip *chip = irq_desc_get_chip(desc);
370
371         if (cascade_irq != 0)
372                 generic_handle_irq(cascade_irq);
373
374         if (chip->irq_eoi)
375                 chip->irq_eoi(&desc->irq_data);
376 }
377
378 static void qe_ic_cascade_high(struct irq_desc *desc)
379 {
380         struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
381         unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
382         struct irq_chip *chip = irq_desc_get_chip(desc);
383
384         if (cascade_irq != 0)
385                 generic_handle_irq(cascade_irq);
386
387         if (chip->irq_eoi)
388                 chip->irq_eoi(&desc->irq_data);
389 }
390
391 static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
392 {
393         struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
394         unsigned int cascade_irq;
395         struct irq_chip *chip = irq_desc_get_chip(desc);
396
397         cascade_irq = qe_ic_get_high_irq(qe_ic);
398         if (cascade_irq == 0)
399                 cascade_irq = qe_ic_get_low_irq(qe_ic);
400
401         if (cascade_irq != 0)
402                 generic_handle_irq(cascade_irq);
403
404         chip->irq_eoi(&desc->irq_data);
405 }
406
407 static void __init qe_ic_init(struct device_node *node)
408 {
409         void (*low_handler)(struct irq_desc *desc);
410         void (*high_handler)(struct irq_desc *desc);
411         struct qe_ic *qe_ic;
412         struct resource res;
413         u32 ret;
414
415         ret = of_address_to_resource(node, 0, &res);
416         if (ret)
417                 return;
418
419         qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
420         if (qe_ic == NULL)
421                 return;
422
423         qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
424                                                &qe_ic_host_ops, qe_ic);
425         if (qe_ic->irqhost == NULL) {
426                 kfree(qe_ic);
427                 return;
428         }
429
430         qe_ic->regs = ioremap(res.start, resource_size(&res));
431
432         qe_ic->hc_irq = qe_ic_irq_chip;
433
434         qe_ic->virq_high = irq_of_parse_and_map(node, 0);
435         qe_ic->virq_low = irq_of_parse_and_map(node, 1);
436
437         if (!qe_ic->virq_low) {
438                 printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
439                 kfree(qe_ic);
440                 return;
441         }
442         if (qe_ic->virq_high != qe_ic->virq_low) {
443                 low_handler = qe_ic_cascade_low;
444                 high_handler = qe_ic_cascade_high;
445         } else {
446                 low_handler = qe_ic_cascade_muxed_mpic;
447                 high_handler = NULL;
448         }
449
450         qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
451
452         irq_set_handler_data(qe_ic->virq_low, qe_ic);
453         irq_set_chained_handler(qe_ic->virq_low, low_handler);
454
455         if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
456                 irq_set_handler_data(qe_ic->virq_high, qe_ic);
457                 irq_set_chained_handler(qe_ic->virq_high, high_handler);
458         }
459 }
460
461 static int __init qe_ic_of_init(void)
462 {
463         struct device_node *np;
464
465         np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
466         if (!np) {
467                 np = of_find_node_by_type(NULL, "qeic");
468                 if (!np)
469                         return -ENODEV;
470         }
471         qe_ic_init(np);
472         of_node_put(np);
473         return 0;
474 }
475 subsys_initcall(qe_ic_of_init);