2 * Xtensa SMP support functions.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/irqdomain.h>
21 #include <linux/irq.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/sched/mm.h>
25 #include <linux/sched/hotplug.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/reboot.h>
28 #include <linux/seq_file.h>
29 #include <linux/smp.h>
30 #include <linux/thread_info.h>
32 #include <asm/cacheflush.h>
33 #include <asm/kdebug.h>
34 #include <asm/mmu_context.h>
35 #include <asm/mxregs.h>
36 #include <asm/platform.h>
37 #include <asm/tlbflush.h>
38 #include <asm/traps.h>
41 # if XCHAL_HAVE_S32C1I == 0
42 # error "The S32C1I option is required for SMP."
46 static void system_invalidate_dcache_range(unsigned long start,
48 static void system_flush_invalidate_dcache_range(unsigned long start,
51 /* IPI (Inter Process Interrupt) */
55 static irqreturn_t ipi_interrupt(int irq, void *dev_id);
59 unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
60 if (request_irq(irq, ipi_interrupt, IRQF_PERCPU, "ipi", NULL))
61 pr_err("Failed to request irq %u (ipi)\n", irq);
64 static inline unsigned int get_core_count(void)
66 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
67 unsigned int syscfgid = get_er(SYSCFGID);
68 return ((syscfgid >> 18) & 0xf) + 1;
71 static inline int get_core_id(void)
73 /* Bits 0...18 of SYSCFGID contain the core id */
74 unsigned int core_id = get_er(SYSCFGID);
75 return core_id & 0x3fff;
78 void __init smp_prepare_cpus(unsigned int max_cpus)
82 for_each_possible_cpu(i)
83 set_cpu_present(i, true);
86 void __init smp_init_cpus(void)
89 unsigned int ncpus = get_core_count();
90 unsigned int core_id = get_core_id();
92 pr_info("%s: Core Count = %d\n", __func__, ncpus);
93 pr_info("%s: Core Id = %d\n", __func__, core_id);
95 if (ncpus > NR_CPUS) {
97 pr_info("%s: limiting core count by %d\n", __func__, ncpus);
100 for (i = 0; i < ncpus; ++i)
101 set_cpu_possible(i, true);
104 void __init smp_prepare_boot_cpu(void)
106 unsigned int cpu = smp_processor_id();
108 cpu_asid_cache(cpu) = ASID_USER_FIRST;
111 void __init smp_cpus_done(unsigned int max_cpus)
115 static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
116 static DECLARE_COMPLETION(cpu_running);
118 void secondary_start_kernel(void)
120 struct mm_struct *mm = &init_mm;
121 unsigned int cpu = smp_processor_id();
125 #ifdef CONFIG_DEBUG_MISC
126 if (boot_secondary_processors == 0) {
127 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
128 __func__, boot_secondary_processors, cpu);
130 __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
133 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
134 __func__, boot_secondary_processors, cpu);
138 secondary_trap_init();
140 /* All kernel threads share the same mm context. */
144 current->active_mm = mm;
145 cpumask_set_cpu(cpu, mm_cpumask(mm));
146 enter_lazy_tlb(mm, current);
148 trace_hardirqs_off();
152 notify_cpu_starting(cpu);
154 secondary_init_irq();
155 local_timer_setup(cpu);
157 set_cpu_online(cpu, true);
161 complete(&cpu_running);
163 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
166 static void mx_cpu_start(void *p)
168 unsigned cpu = (unsigned)p;
169 unsigned long run_stall_mask = get_er(MPSCORE);
171 set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
172 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
173 __func__, cpu, run_stall_mask, get_er(MPSCORE));
176 static void mx_cpu_stop(void *p)
178 unsigned cpu = (unsigned)p;
179 unsigned long run_stall_mask = get_er(MPSCORE);
181 set_er(run_stall_mask | (1u << cpu), MPSCORE);
182 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
183 __func__, cpu, run_stall_mask, get_er(MPSCORE));
186 #ifdef CONFIG_HOTPLUG_CPU
187 unsigned long cpu_start_id __cacheline_aligned;
189 unsigned long cpu_start_ccount;
191 static int boot_secondary(unsigned int cpu, struct task_struct *ts)
193 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
194 unsigned long ccount;
197 #ifdef CONFIG_HOTPLUG_CPU
198 WRITE_ONCE(cpu_start_id, cpu);
199 /* Pairs with the third memw in the cpu_restart */
201 system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
202 sizeof(cpu_start_id));
204 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
206 for (i = 0; i < 2; ++i) {
208 ccount = get_ccount();
211 WRITE_ONCE(cpu_start_ccount, ccount);
215 * Pairs with the first two memws in the
219 ccount = READ_ONCE(cpu_start_ccount);
220 } while (ccount && time_before(jiffies, timeout));
223 smp_call_function_single(0, mx_cpu_stop,
225 WRITE_ONCE(cpu_start_ccount, 0);
232 int __cpu_up(unsigned int cpu, struct task_struct *idle)
236 if (cpu_asid_cache(cpu) == 0)
237 cpu_asid_cache(cpu) = ASID_USER_FIRST;
239 start_info.stack = (unsigned long)task_pt_regs(idle);
242 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
243 __func__, cpu, idle, start_info.stack);
245 init_completion(&cpu_running);
246 ret = boot_secondary(cpu, idle);
248 wait_for_completion_timeout(&cpu_running,
249 msecs_to_jiffies(1000));
250 if (!cpu_online(cpu))
255 pr_err("CPU %u failed to boot\n", cpu);
260 #ifdef CONFIG_HOTPLUG_CPU
263 * __cpu_disable runs on the processor to be shutdown.
265 int __cpu_disable(void)
267 unsigned int cpu = smp_processor_id();
270 * Take this CPU offline. Once we clear this, we can't return,
271 * and we must not schedule until we're ready to give up the cpu.
273 set_cpu_online(cpu, false);
276 * OK - migrate IRQs away from this CPU
281 * Flush user cache and TLB mappings, and then remove this CPU
282 * from the vm mask set of all processes.
284 local_flush_cache_all();
285 local_flush_tlb_all();
286 invalidate_page_directory();
288 clear_tasks_mm_cpumask(cpu);
293 static void platform_cpu_kill(unsigned int cpu)
295 smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
299 * called on the thread which is asking for a CPU to be shutdown -
300 * waits until shutdown has completed, or it is timed out.
302 void __cpu_die(unsigned int cpu)
304 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
305 while (time_before(jiffies, timeout)) {
306 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
307 sizeof(cpu_start_id));
308 /* Pairs with the second memw in the cpu_restart */
310 if (READ_ONCE(cpu_start_id) == -cpu) {
311 platform_cpu_kill(cpu);
315 pr_err("CPU%u: unable to kill\n", cpu);
318 void arch_cpu_idle_dead(void)
323 * Called from the idle thread for the CPU which has been shutdown.
325 * Note that we disable IRQs here, but do not re-enable them
326 * before returning to the caller. This is also the behaviour
327 * of the other hotplug-cpu capable cores, so presumably coming
328 * out of idle fixes this.
330 void __ref cpu_die(void)
334 __asm__ __volatile__(
335 " movi a2, cpu_restart\n"
339 #endif /* CONFIG_HOTPLUG_CPU */
348 static const struct {
349 const char *short_text;
350 const char *long_text;
352 { .short_text = "RES", .long_text = "Rescheduling interrupts" },
353 { .short_text = "CAL", .long_text = "Function call interrupts" },
354 { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
358 unsigned long ipi_count[IPI_MAX];
361 static DEFINE_PER_CPU(struct ipi_data, ipi_data);
363 static void send_ipi_message(const struct cpumask *callmask,
364 enum ipi_msg_type msg_id)
367 unsigned long mask = 0;
369 for_each_cpu(index, callmask)
372 set_er(mask, MIPISET(msg_id));
375 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
377 send_ipi_message(mask, IPI_CALL_FUNC);
380 void arch_send_call_function_single_ipi(int cpu)
382 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
385 void smp_send_reschedule(int cpu)
387 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
390 void smp_send_stop(void)
392 struct cpumask targets;
394 cpumask_copy(&targets, cpu_online_mask);
395 cpumask_clear_cpu(smp_processor_id(), &targets);
396 send_ipi_message(&targets, IPI_CPU_STOP);
399 static void ipi_cpu_stop(unsigned int cpu)
401 set_cpu_online(cpu, false);
405 irqreturn_t ipi_interrupt(int irq, void *dev_id)
407 unsigned int cpu = smp_processor_id();
408 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
413 msg = get_er(MIPICAUSE(cpu));
414 set_er(msg, MIPICAUSE(cpu));
419 if (msg & (1 << IPI_CALL_FUNC)) {
420 ++ipi->ipi_count[IPI_CALL_FUNC];
421 generic_smp_call_function_interrupt();
424 if (msg & (1 << IPI_RESCHEDULE)) {
425 ++ipi->ipi_count[IPI_RESCHEDULE];
429 if (msg & (1 << IPI_CPU_STOP)) {
430 ++ipi->ipi_count[IPI_CPU_STOP];
438 void show_ipi_list(struct seq_file *p, int prec)
443 for (i = 0; i < IPI_MAX; ++i) {
444 seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
445 for_each_online_cpu(cpu)
446 seq_printf(p, " %10lu",
447 per_cpu(ipi_data, cpu).ipi_count[i]);
448 seq_printf(p, " %s\n", ipi_text[i].long_text);
452 int setup_profiling_timer(unsigned int multiplier)
454 pr_debug("setup_profiling_timer %d\n", multiplier);
458 /* TLB flush functions */
461 struct vm_area_struct *vma;
466 static void ipi_flush_tlb_all(void *arg)
468 local_flush_tlb_all();
471 void flush_tlb_all(void)
473 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
476 static void ipi_flush_tlb_mm(void *arg)
478 local_flush_tlb_mm(arg);
481 void flush_tlb_mm(struct mm_struct *mm)
483 on_each_cpu(ipi_flush_tlb_mm, mm, 1);
486 static void ipi_flush_tlb_page(void *arg)
488 struct flush_data *fd = arg;
489 local_flush_tlb_page(fd->vma, fd->addr1);
492 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
494 struct flush_data fd = {
498 on_each_cpu(ipi_flush_tlb_page, &fd, 1);
501 static void ipi_flush_tlb_range(void *arg)
503 struct flush_data *fd = arg;
504 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
507 void flush_tlb_range(struct vm_area_struct *vma,
508 unsigned long start, unsigned long end)
510 struct flush_data fd = {
515 on_each_cpu(ipi_flush_tlb_range, &fd, 1);
518 static void ipi_flush_tlb_kernel_range(void *arg)
520 struct flush_data *fd = arg;
521 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
524 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
526 struct flush_data fd = {
530 on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
533 /* Cache flush functions */
535 static void ipi_flush_cache_all(void *arg)
537 local_flush_cache_all();
540 void flush_cache_all(void)
542 on_each_cpu(ipi_flush_cache_all, NULL, 1);
545 static void ipi_flush_cache_page(void *arg)
547 struct flush_data *fd = arg;
548 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
551 void flush_cache_page(struct vm_area_struct *vma,
552 unsigned long address, unsigned long pfn)
554 struct flush_data fd = {
559 on_each_cpu(ipi_flush_cache_page, &fd, 1);
562 static void ipi_flush_cache_range(void *arg)
564 struct flush_data *fd = arg;
565 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
568 void flush_cache_range(struct vm_area_struct *vma,
569 unsigned long start, unsigned long end)
571 struct flush_data fd = {
576 on_each_cpu(ipi_flush_cache_range, &fd, 1);
579 static void ipi_flush_icache_range(void *arg)
581 struct flush_data *fd = arg;
582 local_flush_icache_range(fd->addr1, fd->addr2);
585 void flush_icache_range(unsigned long start, unsigned long end)
587 struct flush_data fd = {
591 on_each_cpu(ipi_flush_icache_range, &fd, 1);
593 EXPORT_SYMBOL(flush_icache_range);
595 /* ------------------------------------------------------------------------- */
597 static void ipi_invalidate_dcache_range(void *arg)
599 struct flush_data *fd = arg;
600 __invalidate_dcache_range(fd->addr1, fd->addr2);
603 static void system_invalidate_dcache_range(unsigned long start,
606 struct flush_data fd = {
610 on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
613 static void ipi_flush_invalidate_dcache_range(void *arg)
615 struct flush_data *fd = arg;
616 __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
619 static void system_flush_invalidate_dcache_range(unsigned long start,
622 struct flush_data fd = {
626 on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);