2 * Xtensa SMP support functions.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/irqdomain.h>
21 #include <linux/irq.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/sched/mm.h>
25 #include <linux/sched/hotplug.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/reboot.h>
28 #include <linux/seq_file.h>
29 #include <linux/smp.h>
30 #include <linux/thread_info.h>
32 #include <asm/cacheflush.h>
33 #include <asm/coprocessor.h>
34 #include <asm/kdebug.h>
35 #include <asm/mmu_context.h>
36 #include <asm/mxregs.h>
37 #include <asm/platform.h>
38 #include <asm/tlbflush.h>
39 #include <asm/traps.h>
42 # if XCHAL_HAVE_S32C1I == 0
43 # error "The S32C1I option is required for SMP."
47 static void system_invalidate_dcache_range(unsigned long start,
49 static void system_flush_invalidate_dcache_range(unsigned long start,
52 /* IPI (Inter Process Interrupt) */
56 static irqreturn_t ipi_interrupt(int irq, void *dev_id);
60 unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
61 if (request_irq(irq, ipi_interrupt, IRQF_PERCPU, "ipi", NULL))
62 pr_err("Failed to request irq %u (ipi)\n", irq);
65 static inline unsigned int get_core_count(void)
67 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
68 unsigned int syscfgid = get_er(SYSCFGID);
69 return ((syscfgid >> 18) & 0xf) + 1;
72 static inline int get_core_id(void)
74 /* Bits 0...18 of SYSCFGID contain the core id */
75 unsigned int core_id = get_er(SYSCFGID);
76 return core_id & 0x3fff;
79 void __init smp_prepare_cpus(unsigned int max_cpus)
83 for_each_possible_cpu(i)
84 set_cpu_present(i, true);
87 void __init smp_init_cpus(void)
90 unsigned int ncpus = get_core_count();
91 unsigned int core_id = get_core_id();
93 pr_info("%s: Core Count = %d\n", __func__, ncpus);
94 pr_info("%s: Core Id = %d\n", __func__, core_id);
96 if (ncpus > NR_CPUS) {
98 pr_info("%s: limiting core count by %d\n", __func__, ncpus);
101 for (i = 0; i < ncpus; ++i)
102 set_cpu_possible(i, true);
105 void __init smp_prepare_boot_cpu(void)
107 unsigned int cpu = smp_processor_id();
109 cpu_asid_cache(cpu) = ASID_USER_FIRST;
112 void __init smp_cpus_done(unsigned int max_cpus)
116 static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
117 static DECLARE_COMPLETION(cpu_running);
119 void secondary_start_kernel(void)
121 struct mm_struct *mm = &init_mm;
122 unsigned int cpu = smp_processor_id();
126 #ifdef CONFIG_DEBUG_MISC
127 if (boot_secondary_processors == 0) {
128 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
129 __func__, boot_secondary_processors, cpu);
131 __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
134 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
135 __func__, boot_secondary_processors, cpu);
139 secondary_trap_init();
141 /* All kernel threads share the same mm context. */
145 current->active_mm = mm;
146 cpumask_set_cpu(cpu, mm_cpumask(mm));
147 enter_lazy_tlb(mm, current);
149 trace_hardirqs_off();
153 notify_cpu_starting(cpu);
155 secondary_init_irq();
156 local_timer_setup(cpu);
158 set_cpu_online(cpu, true);
162 complete(&cpu_running);
164 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
167 static void mx_cpu_start(void *p)
169 unsigned cpu = (unsigned)p;
170 unsigned long run_stall_mask = get_er(MPSCORE);
172 set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
173 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
174 __func__, cpu, run_stall_mask, get_er(MPSCORE));
177 static void mx_cpu_stop(void *p)
179 unsigned cpu = (unsigned)p;
180 unsigned long run_stall_mask = get_er(MPSCORE);
182 set_er(run_stall_mask | (1u << cpu), MPSCORE);
183 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
184 __func__, cpu, run_stall_mask, get_er(MPSCORE));
187 #ifdef CONFIG_HOTPLUG_CPU
188 unsigned long cpu_start_id __cacheline_aligned;
190 unsigned long cpu_start_ccount;
192 static int boot_secondary(unsigned int cpu, struct task_struct *ts)
194 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
195 unsigned long ccount;
198 #ifdef CONFIG_HOTPLUG_CPU
199 WRITE_ONCE(cpu_start_id, cpu);
200 /* Pairs with the third memw in the cpu_restart */
202 system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
203 sizeof(cpu_start_id));
205 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
207 for (i = 0; i < 2; ++i) {
209 ccount = get_ccount();
212 WRITE_ONCE(cpu_start_ccount, ccount);
216 * Pairs with the first two memws in the
220 ccount = READ_ONCE(cpu_start_ccount);
221 } while (ccount && time_before(jiffies, timeout));
224 smp_call_function_single(0, mx_cpu_stop,
226 WRITE_ONCE(cpu_start_ccount, 0);
233 int __cpu_up(unsigned int cpu, struct task_struct *idle)
237 if (cpu_asid_cache(cpu) == 0)
238 cpu_asid_cache(cpu) = ASID_USER_FIRST;
240 start_info.stack = (unsigned long)task_pt_regs(idle);
243 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
244 __func__, cpu, idle, start_info.stack);
246 init_completion(&cpu_running);
247 ret = boot_secondary(cpu, idle);
249 wait_for_completion_timeout(&cpu_running,
250 msecs_to_jiffies(1000));
251 if (!cpu_online(cpu))
256 pr_err("CPU %u failed to boot\n", cpu);
261 #ifdef CONFIG_HOTPLUG_CPU
264 * __cpu_disable runs on the processor to be shutdown.
266 int __cpu_disable(void)
268 unsigned int cpu = smp_processor_id();
271 * Take this CPU offline. Once we clear this, we can't return,
272 * and we must not schedule until we're ready to give up the cpu.
274 set_cpu_online(cpu, false);
276 #if XTENSA_HAVE_COPROCESSORS
278 * Flush coprocessor contexts that are active on the current CPU.
280 local_coprocessors_flush_release_all();
283 * OK - migrate IRQs away from this CPU
288 * Flush user cache and TLB mappings, and then remove this CPU
289 * from the vm mask set of all processes.
291 local_flush_cache_all();
292 local_flush_tlb_all();
293 invalidate_page_directory();
295 clear_tasks_mm_cpumask(cpu);
300 static void platform_cpu_kill(unsigned int cpu)
302 smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
306 * called on the thread which is asking for a CPU to be shutdown -
307 * waits until shutdown has completed, or it is timed out.
309 void __cpu_die(unsigned int cpu)
311 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
312 while (time_before(jiffies, timeout)) {
313 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
314 sizeof(cpu_start_id));
315 /* Pairs with the second memw in the cpu_restart */
317 if (READ_ONCE(cpu_start_id) == -cpu) {
318 platform_cpu_kill(cpu);
322 pr_err("CPU%u: unable to kill\n", cpu);
325 void arch_cpu_idle_dead(void)
330 * Called from the idle thread for the CPU which has been shutdown.
332 * Note that we disable IRQs here, but do not re-enable them
333 * before returning to the caller. This is also the behaviour
334 * of the other hotplug-cpu capable cores, so presumably coming
335 * out of idle fixes this.
337 void __ref cpu_die(void)
341 __asm__ __volatile__(
342 " movi a2, cpu_restart\n"
346 #endif /* CONFIG_HOTPLUG_CPU */
355 static const struct {
356 const char *short_text;
357 const char *long_text;
359 { .short_text = "RES", .long_text = "Rescheduling interrupts" },
360 { .short_text = "CAL", .long_text = "Function call interrupts" },
361 { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
365 unsigned long ipi_count[IPI_MAX];
368 static DEFINE_PER_CPU(struct ipi_data, ipi_data);
370 static void send_ipi_message(const struct cpumask *callmask,
371 enum ipi_msg_type msg_id)
374 unsigned long mask = 0;
376 for_each_cpu(index, callmask)
379 set_er(mask, MIPISET(msg_id));
382 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
384 send_ipi_message(mask, IPI_CALL_FUNC);
387 void arch_send_call_function_single_ipi(int cpu)
389 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
392 void smp_send_reschedule(int cpu)
394 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
397 void smp_send_stop(void)
399 struct cpumask targets;
401 cpumask_copy(&targets, cpu_online_mask);
402 cpumask_clear_cpu(smp_processor_id(), &targets);
403 send_ipi_message(&targets, IPI_CPU_STOP);
406 static void ipi_cpu_stop(unsigned int cpu)
408 set_cpu_online(cpu, false);
412 irqreturn_t ipi_interrupt(int irq, void *dev_id)
414 unsigned int cpu = smp_processor_id();
415 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
420 msg = get_er(MIPICAUSE(cpu));
421 set_er(msg, MIPICAUSE(cpu));
426 if (msg & (1 << IPI_CALL_FUNC)) {
427 ++ipi->ipi_count[IPI_CALL_FUNC];
428 generic_smp_call_function_interrupt();
431 if (msg & (1 << IPI_RESCHEDULE)) {
432 ++ipi->ipi_count[IPI_RESCHEDULE];
436 if (msg & (1 << IPI_CPU_STOP)) {
437 ++ipi->ipi_count[IPI_CPU_STOP];
445 void show_ipi_list(struct seq_file *p, int prec)
450 for (i = 0; i < IPI_MAX; ++i) {
451 seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
452 for_each_online_cpu(cpu)
453 seq_printf(p, " %10lu",
454 per_cpu(ipi_data, cpu).ipi_count[i]);
455 seq_printf(p, " %s\n", ipi_text[i].long_text);
459 int setup_profiling_timer(unsigned int multiplier)
461 pr_debug("setup_profiling_timer %d\n", multiplier);
465 /* TLB flush functions */
468 struct vm_area_struct *vma;
473 static void ipi_flush_tlb_all(void *arg)
475 local_flush_tlb_all();
478 void flush_tlb_all(void)
480 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
483 static void ipi_flush_tlb_mm(void *arg)
485 local_flush_tlb_mm(arg);
488 void flush_tlb_mm(struct mm_struct *mm)
490 on_each_cpu(ipi_flush_tlb_mm, mm, 1);
493 static void ipi_flush_tlb_page(void *arg)
495 struct flush_data *fd = arg;
496 local_flush_tlb_page(fd->vma, fd->addr1);
499 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
501 struct flush_data fd = {
505 on_each_cpu(ipi_flush_tlb_page, &fd, 1);
508 static void ipi_flush_tlb_range(void *arg)
510 struct flush_data *fd = arg;
511 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
514 void flush_tlb_range(struct vm_area_struct *vma,
515 unsigned long start, unsigned long end)
517 struct flush_data fd = {
522 on_each_cpu(ipi_flush_tlb_range, &fd, 1);
525 static void ipi_flush_tlb_kernel_range(void *arg)
527 struct flush_data *fd = arg;
528 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
531 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
533 struct flush_data fd = {
537 on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
540 /* Cache flush functions */
542 static void ipi_flush_cache_all(void *arg)
544 local_flush_cache_all();
547 void flush_cache_all(void)
549 on_each_cpu(ipi_flush_cache_all, NULL, 1);
552 static void ipi_flush_cache_page(void *arg)
554 struct flush_data *fd = arg;
555 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
558 void flush_cache_page(struct vm_area_struct *vma,
559 unsigned long address, unsigned long pfn)
561 struct flush_data fd = {
566 on_each_cpu(ipi_flush_cache_page, &fd, 1);
569 static void ipi_flush_cache_range(void *arg)
571 struct flush_data *fd = arg;
572 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
575 void flush_cache_range(struct vm_area_struct *vma,
576 unsigned long start, unsigned long end)
578 struct flush_data fd = {
583 on_each_cpu(ipi_flush_cache_range, &fd, 1);
586 static void ipi_flush_icache_range(void *arg)
588 struct flush_data *fd = arg;
589 local_flush_icache_range(fd->addr1, fd->addr2);
592 void flush_icache_range(unsigned long start, unsigned long end)
594 struct flush_data fd = {
598 on_each_cpu(ipi_flush_icache_range, &fd, 1);
600 EXPORT_SYMBOL(flush_icache_range);
602 /* ------------------------------------------------------------------------- */
604 static void ipi_invalidate_dcache_range(void *arg)
606 struct flush_data *fd = arg;
607 __invalidate_dcache_range(fd->addr1, fd->addr2);
610 static void system_invalidate_dcache_range(unsigned long start,
613 struct flush_data fd = {
617 on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
620 static void ipi_flush_invalidate_dcache_range(void *arg)
622 struct flush_data *fd = arg;
623 __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
626 static void system_flush_invalidate_dcache_range(unsigned long start,
629 struct flush_data fd = {
633 on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);