Merge tag '5.15-rc-ksmbd-part2' of git://git.samba.org/ksmbd
[linux-2.6-microblaze.git] / drivers / irqchip / irq-mips-cpu.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2001 MontaVista Software Inc.
4  * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
5  *
6  * Copyright (C) 2001 Ralf Baechle
7  * Copyright (C) 2005  MIPS Technologies, Inc.  All rights reserved.
8  *      Author: Maciej W. Rozycki <macro@mips.com>
9  *
10  * This file define the irq handler for MIPS CPU interrupts.
11  */
12
13 /*
14  * Almost all MIPS CPUs define 8 interrupt sources.  They are typically
15  * level triggered (i.e., cannot be cleared from CPU; must be cleared from
16  * device).
17  *
18  * The first two are software interrupts (i.e. not exposed as pins) which
19  * may be used for IPIs in multi-threaded single-core systems.
20  *
21  * The last one is usually the CPU timer interrupt if the counter register
22  * is present, or for old CPUs with an external FPU by convention it's the
23  * FPU exception interrupt.
24  */
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/irq.h>
29 #include <linux/irqchip.h>
30 #include <linux/irqdomain.h>
31
32 #include <asm/irq_cpu.h>
33 #include <asm/mipsregs.h>
34 #include <asm/mipsmtregs.h>
35 #include <asm/setup.h>
36
37 static struct irq_domain *irq_domain;
38 static struct irq_domain *ipi_domain;
39
40 static inline void unmask_mips_irq(struct irq_data *d)
41 {
42         set_c0_status(IE_SW0 << d->hwirq);
43         irq_enable_hazard();
44 }
45
46 static inline void mask_mips_irq(struct irq_data *d)
47 {
48         clear_c0_status(IE_SW0 << d->hwirq);
49         irq_disable_hazard();
50 }
51
52 static struct irq_chip mips_cpu_irq_controller = {
53         .name           = "MIPS",
54         .irq_ack        = mask_mips_irq,
55         .irq_mask       = mask_mips_irq,
56         .irq_mask_ack   = mask_mips_irq,
57         .irq_unmask     = unmask_mips_irq,
58         .irq_eoi        = unmask_mips_irq,
59         .irq_disable    = mask_mips_irq,
60         .irq_enable     = unmask_mips_irq,
61 };
62
63 /*
64  * Basically the same as above but taking care of all the MT stuff
65  */
66
67 static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
68 {
69         unsigned int vpflags = dvpe();
70
71         clear_c0_cause(C_SW0 << d->hwirq);
72         evpe(vpflags);
73         unmask_mips_irq(d);
74         return 0;
75 }
76
77 /*
78  * While we ack the interrupt interrupts are disabled and thus we don't need
79  * to deal with concurrency issues.  Same for mips_cpu_irq_end.
80  */
81 static void mips_mt_cpu_irq_ack(struct irq_data *d)
82 {
83         unsigned int vpflags = dvpe();
84         clear_c0_cause(C_SW0 << d->hwirq);
85         evpe(vpflags);
86         mask_mips_irq(d);
87 }
88
89 #ifdef CONFIG_GENERIC_IRQ_IPI
90
91 static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
92 {
93         irq_hw_number_t hwirq = irqd_to_hwirq(d);
94         unsigned long flags;
95         int vpflags;
96
97         local_irq_save(flags);
98
99         /* We can only send IPIs to VPEs within the local core */
100         WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
101
102         vpflags = dvpe();
103         settc(cpu_vpe_id(&cpu_data[cpu]));
104         write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
105         evpe(vpflags);
106
107         local_irq_restore(flags);
108 }
109
110 #endif /* CONFIG_GENERIC_IRQ_IPI */
111
112 static struct irq_chip mips_mt_cpu_irq_controller = {
113         .name           = "MIPS",
114         .irq_startup    = mips_mt_cpu_irq_startup,
115         .irq_ack        = mips_mt_cpu_irq_ack,
116         .irq_mask       = mask_mips_irq,
117         .irq_mask_ack   = mips_mt_cpu_irq_ack,
118         .irq_unmask     = unmask_mips_irq,
119         .irq_eoi        = unmask_mips_irq,
120         .irq_disable    = mask_mips_irq,
121         .irq_enable     = unmask_mips_irq,
122 #ifdef CONFIG_GENERIC_IRQ_IPI
123         .ipi_send_single = mips_mt_send_ipi,
124 #endif
125 };
126
127 asmlinkage void __weak plat_irq_dispatch(void)
128 {
129         unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
130         int irq;
131
132         if (!pending) {
133                 spurious_interrupt();
134                 return;
135         }
136
137         pending >>= CAUSEB_IP;
138         while (pending) {
139                 struct irq_domain *d;
140
141                 irq = fls(pending) - 1;
142                 if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
143                         d = ipi_domain;
144                 else
145                         d = irq_domain;
146
147                 do_domain_IRQ(d, irq);
148                 pending &= ~BIT(irq);
149         }
150 }
151
152 static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
153                              irq_hw_number_t hw)
154 {
155         struct irq_chip *chip;
156
157         if (hw < 2 && cpu_has_mipsmt) {
158                 /* Software interrupts are used for MT/CMT IPI */
159                 chip = &mips_mt_cpu_irq_controller;
160         } else {
161                 chip = &mips_cpu_irq_controller;
162         }
163
164         if (cpu_has_vint)
165                 set_vi_handler(hw, plat_irq_dispatch);
166
167         irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
168
169         return 0;
170 }
171
172 static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
173         .map = mips_cpu_intc_map,
174         .xlate = irq_domain_xlate_onecell,
175 };
176
177 #ifdef CONFIG_GENERIC_IRQ_IPI
178
179 struct cpu_ipi_domain_state {
180         DECLARE_BITMAP(allocated, 2);
181 };
182
183 static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
184                               unsigned int nr_irqs, void *arg)
185 {
186         struct cpu_ipi_domain_state *state = domain->host_data;
187         unsigned int i, hwirq;
188         int ret;
189
190         for (i = 0; i < nr_irqs; i++) {
191                 hwirq = find_first_zero_bit(state->allocated, 2);
192                 if (hwirq == 2)
193                         return -EBUSY;
194                 bitmap_set(state->allocated, hwirq, 1);
195
196                 ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
197                                                     &mips_mt_cpu_irq_controller,
198                                                     NULL);
199                 if (ret)
200                         return ret;
201
202                 ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
203                                                     &mips_mt_cpu_irq_controller,
204                                                     NULL);
205
206                 if (ret)
207                         return ret;
208
209                 ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
210                 if (ret)
211                         return ret;
212         }
213
214         return 0;
215 }
216
217 static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
218                               enum irq_domain_bus_token bus_token)
219 {
220         bool is_ipi;
221
222         switch (bus_token) {
223         case DOMAIN_BUS_IPI:
224                 is_ipi = d->bus_token == bus_token;
225                 return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
226         default:
227                 return 0;
228         }
229 }
230
231 static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
232         .alloc  = mips_cpu_ipi_alloc,
233         .match  = mips_cpu_ipi_match,
234 };
235
236 static void mips_cpu_register_ipi_domain(struct device_node *of_node)
237 {
238         struct cpu_ipi_domain_state *ipi_domain_state;
239
240         ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
241         ipi_domain = irq_domain_add_hierarchy(irq_domain,
242                                               IRQ_DOMAIN_FLAG_IPI_SINGLE,
243                                               2, of_node,
244                                               &mips_cpu_ipi_chip_ops,
245                                               ipi_domain_state);
246         if (!ipi_domain)
247                 panic("Failed to add MIPS CPU IPI domain");
248         irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
249 }
250
251 #else /* !CONFIG_GENERIC_IRQ_IPI */
252
253 static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
254
255 #endif /* !CONFIG_GENERIC_IRQ_IPI */
256
257 static void __init __mips_cpu_irq_init(struct device_node *of_node)
258 {
259         /* Mask interrupts. */
260         clear_c0_status(ST0_IM);
261         clear_c0_cause(CAUSEF_IP);
262
263         irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
264                                            &mips_cpu_intc_irq_domain_ops,
265                                            NULL);
266         if (!irq_domain)
267                 panic("Failed to add irqdomain for MIPS CPU");
268
269         /*
270          * Only proceed to register the software interrupt IPI implementation
271          * for CPUs which implement the MIPS MT (multi-threading) ASE.
272          */
273         if (cpu_has_mipsmt)
274                 mips_cpu_register_ipi_domain(of_node);
275 }
276
277 void __init mips_cpu_irq_init(void)
278 {
279         __mips_cpu_irq_init(NULL);
280 }
281
282 int __init mips_cpu_irq_of_init(struct device_node *of_node,
283                                 struct device_node *parent)
284 {
285         __mips_cpu_irq_init(of_node);
286         return 0;
287 }
288 IRQCHIP_DECLARE(cpu_intc, "mti,cpu-interrupt-controller", mips_cpu_irq_of_init);