csky: Add arch_show_interrupts for IPI interrupts
[linux-2.6-microblaze.git] / arch / csky / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/module.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/kernel_stat.h>
9 #include <linux/notifier.h>
10 #include <linux/cpu.h>
11 #include <linux/percpu.h>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/irq.h>
15 #include <linux/irq_work.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of.h>
18 #include <linux/seq_file.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/hotplug.h>
22 #include <asm/irq.h>
23 #include <asm/traps.h>
24 #include <asm/sections.h>
25 #include <asm/mmu_context.h>
26 #include <asm/pgalloc.h>
27 #ifdef CONFIG_CPU_HAS_FPU
28 #include <abi/fpu.h>
29 #endif
30
31 enum ipi_message_type {
32         IPI_EMPTY,
33         IPI_RESCHEDULE,
34         IPI_CALL_FUNC,
35         IPI_IRQ_WORK,
36         IPI_MAX
37 };
38
39 struct ipi_data_struct {
40         unsigned long bits ____cacheline_aligned;
41         unsigned long stats[IPI_MAX] ____cacheline_aligned;
42 };
43 static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
44
45 static irqreturn_t handle_ipi(int irq, void *dev)
46 {
47         unsigned long *stats = this_cpu_ptr(&ipi_data)->stats;
48
49         while (true) {
50                 unsigned long ops;
51
52                 ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
53                 if (ops == 0)
54                         return IRQ_HANDLED;
55
56                 if (ops & (1 << IPI_RESCHEDULE)) {
57                         stats[IPI_RESCHEDULE]++;
58                         scheduler_ipi();
59                 }
60
61                 if (ops & (1 << IPI_CALL_FUNC)) {
62                         stats[IPI_CALL_FUNC]++;
63                         generic_smp_call_function_interrupt();
64                 }
65
66                 if (ops & (1 << IPI_IRQ_WORK)) {
67                         stats[IPI_IRQ_WORK]++;
68                         irq_work_run();
69                 }
70
71                 BUG_ON((ops >> IPI_MAX) != 0);
72         }
73
74         return IRQ_HANDLED;
75 }
76
77 static void (*send_arch_ipi)(const struct cpumask *mask);
78
79 static int ipi_irq;
80 void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
81 {
82         if (send_arch_ipi)
83                 return;
84
85         send_arch_ipi = func;
86         ipi_irq = irq;
87 }
88
89 static void
90 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
91 {
92         int i;
93
94         for_each_cpu(i, to_whom)
95                 set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
96
97         smp_mb();
98         send_arch_ipi(to_whom);
99 }
100
101 static const char * const ipi_names[] = {
102         [IPI_EMPTY]             = "Empty interrupts",
103         [IPI_RESCHEDULE]        = "Rescheduling interrupts",
104         [IPI_CALL_FUNC]         = "Function call interrupts",
105         [IPI_IRQ_WORK]          = "Irq work interrupts",
106 };
107
108 int arch_show_interrupts(struct seq_file *p, int prec)
109 {
110         unsigned int cpu, i;
111
112         for (i = 0; i < IPI_MAX; i++) {
113                 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
114                            prec >= 4 ? " " : "");
115                 for_each_online_cpu(cpu)
116                         seq_printf(p, "%10lu ",
117                                 per_cpu_ptr(&ipi_data, cpu)->stats[i]);
118                 seq_printf(p, " %s\n", ipi_names[i]);
119         }
120
121         return 0;
122 }
123
124 void arch_send_call_function_ipi_mask(struct cpumask *mask)
125 {
126         send_ipi_message(mask, IPI_CALL_FUNC);
127 }
128
129 void arch_send_call_function_single_ipi(int cpu)
130 {
131         send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
132 }
133
134 static void ipi_stop(void *unused)
135 {
136         while (1);
137 }
138
139 void smp_send_stop(void)
140 {
141         on_each_cpu(ipi_stop, NULL, 1);
142 }
143
144 void smp_send_reschedule(int cpu)
145 {
146         send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
147 }
148
149 #ifdef CONFIG_IRQ_WORK
150 void arch_irq_work_raise(void)
151 {
152         send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
153 }
154 #endif
155
156 void __init smp_prepare_boot_cpu(void)
157 {
158 }
159
160 void __init smp_prepare_cpus(unsigned int max_cpus)
161 {
162 }
163
164 static int ipi_dummy_dev;
165
166 void __init setup_smp_ipi(void)
167 {
168         int rc;
169
170         if (ipi_irq == 0)
171                 return;
172
173         rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
174                                 &ipi_dummy_dev);
175         if (rc)
176                 panic("%s IRQ request failed\n", __func__);
177
178         enable_percpu_irq(ipi_irq, 0);
179 }
180
181 void __init setup_smp(void)
182 {
183         struct device_node *node = NULL;
184         int cpu;
185
186         for_each_of_cpu_node(node) {
187                 if (!of_device_is_available(node))
188                         continue;
189
190                 if (of_property_read_u32(node, "reg", &cpu))
191                         continue;
192
193                 if (cpu >= NR_CPUS)
194                         continue;
195
196                 set_cpu_possible(cpu, true);
197                 set_cpu_present(cpu, true);
198         }
199 }
200
201 extern void _start_smp_secondary(void);
202
203 volatile unsigned int secondary_hint;
204 volatile unsigned int secondary_hint2;
205 volatile unsigned int secondary_ccr;
206 volatile unsigned int secondary_stack;
207
208 unsigned long secondary_msa1;
209
210 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
211 {
212         unsigned long mask = 1 << cpu;
213
214         secondary_stack =
215                 (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
216         secondary_hint = mfcr("cr31");
217         secondary_hint2 = mfcr("cr<21, 1>");
218         secondary_ccr  = mfcr("cr18");
219         secondary_msa1 = read_mmu_msa1();
220
221         /*
222          * Because other CPUs are in reset status, we must flush data
223          * from cache to out and secondary CPUs use them in
224          * csky_start_secondary(void)
225          */
226         mtcr("cr17", 0x22);
227
228         if (mask & mfcr("cr<29, 0>")) {
229                 send_arch_ipi(cpumask_of(cpu));
230         } else {
231                 /* Enable cpu in SMP reset ctrl reg */
232                 mask |= mfcr("cr<29, 0>");
233                 mtcr("cr<29, 0>", mask);
234         }
235
236         /* Wait for the cpu online */
237         while (!cpu_online(cpu));
238
239         secondary_stack = 0;
240
241         return 0;
242 }
243
244 void __init smp_cpus_done(unsigned int max_cpus)
245 {
246 }
247
248 int setup_profiling_timer(unsigned int multiplier)
249 {
250         return -EINVAL;
251 }
252
253 void csky_start_secondary(void)
254 {
255         struct mm_struct *mm = &init_mm;
256         unsigned int cpu = smp_processor_id();
257
258         mtcr("cr31", secondary_hint);
259         mtcr("cr<21, 1>", secondary_hint2);
260         mtcr("cr18", secondary_ccr);
261
262         mtcr("vbr", vec_base);
263
264         flush_tlb_all();
265         write_mmu_pagemask(0);
266         TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
267         TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
268
269 #ifdef CONFIG_CPU_HAS_FPU
270         init_fpu();
271 #endif
272
273         enable_percpu_irq(ipi_irq, 0);
274
275         mmget(mm);
276         mmgrab(mm);
277         current->active_mm = mm;
278         cpumask_set_cpu(cpu, mm_cpumask(mm));
279
280         notify_cpu_starting(cpu);
281         set_cpu_online(cpu, true);
282
283         pr_info("CPU%u Online: %s...\n", cpu, __func__);
284
285         local_irq_enable();
286         preempt_disable();
287         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
288 }
289
290 #ifdef CONFIG_HOTPLUG_CPU
291 int __cpu_disable(void)
292 {
293         unsigned int cpu = smp_processor_id();
294
295         set_cpu_online(cpu, false);
296
297         irq_migrate_all_off_this_cpu();
298
299         clear_tasks_mm_cpumask(cpu);
300
301         return 0;
302 }
303
304 void __cpu_die(unsigned int cpu)
305 {
306         if (!cpu_wait_death(cpu, 5)) {
307                 pr_crit("CPU%u: shutdown failed\n", cpu);
308                 return;
309         }
310         pr_notice("CPU%u: shutdown\n", cpu);
311 }
312
313 void arch_cpu_idle_dead(void)
314 {
315         idle_task_exit();
316
317         cpu_report_death();
318
319         while (!secondary_stack)
320                 arch_cpu_idle();
321
322         local_irq_disable();
323
324         asm volatile(
325                 "mov    sp, %0\n"
326                 "mov    r8, %0\n"
327                 "jmpi   csky_start_secondary"
328                 :
329                 : "r" (secondary_stack));
330 }
331 #endif