2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/export.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched/mm.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36 #include <linux/irqdomain.h>
38 #include <linux/of_irq.h>
40 #include <linux/atomic.h>
42 #include <asm/processor.h>
44 #include <asm/r4k-timer.h>
45 #include <asm/mips-cpc.h>
46 #include <asm/mmu_context.h>
48 #include <asm/setup.h>
51 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
52 EXPORT_SYMBOL(__cpu_number_map);
54 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
55 EXPORT_SYMBOL(__cpu_logical_map);
57 /* Number of TCs (or siblings in Intel speak) per CPU core */
58 int smp_num_siblings = 1;
59 EXPORT_SYMBOL(smp_num_siblings);
61 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
62 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
63 EXPORT_SYMBOL(cpu_sibling_map);
65 /* representing the core map of multi-core chips of each logical CPU */
66 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
67 EXPORT_SYMBOL(cpu_core_map);
69 static DECLARE_COMPLETION(cpu_running);
72 * A logcal cpu mask containing only one VPE per core to
73 * reduce the number of IPIs on large MT systems.
75 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
76 EXPORT_SYMBOL(cpu_foreign_map);
78 /* representing cpus for which sibling maps can be computed */
79 static cpumask_t cpu_sibling_setup_map;
81 /* representing cpus for which core maps can be computed */
82 static cpumask_t cpu_core_setup_map;
84 cpumask_t cpu_coherent_mask;
86 #ifdef CONFIG_GENERIC_IRQ_IPI
87 static struct irq_desc *call_desc;
88 static struct irq_desc *sched_desc;
91 static inline void set_cpu_sibling_map(int cpu)
95 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
97 if (smp_num_siblings > 1) {
98 for_each_cpu(i, &cpu_sibling_setup_map) {
99 if (cpus_are_siblings(cpu, i)) {
100 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
101 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
105 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
108 static inline void set_cpu_core_map(int cpu)
112 cpumask_set_cpu(cpu, &cpu_core_setup_map);
114 for_each_cpu(i, &cpu_core_setup_map) {
115 if (cpu_data[cpu].package == cpu_data[i].package) {
116 cpumask_set_cpu(i, &cpu_core_map[cpu]);
117 cpumask_set_cpu(cpu, &cpu_core_map[i]);
123 * Calculate a new cpu_foreign_map mask whenever a
124 * new cpu appears or disappears.
126 void calculate_cpu_foreign_map(void)
128 int i, k, core_present;
129 cpumask_t temp_foreign_map;
131 /* Re-calculate the mask */
132 cpumask_clear(&temp_foreign_map);
133 for_each_online_cpu(i) {
135 for_each_cpu(k, &temp_foreign_map)
136 if (cpus_are_siblings(i, k))
139 cpumask_set_cpu(i, &temp_foreign_map);
142 for_each_online_cpu(i)
143 cpumask_andnot(&cpu_foreign_map[i],
144 &temp_foreign_map, &cpu_sibling_map[i]);
147 const struct plat_smp_ops *mp_ops;
148 EXPORT_SYMBOL(mp_ops);
150 void register_smp_ops(const struct plat_smp_ops *ops)
153 printk(KERN_WARNING "Overriding previously set SMP ops\n");
158 #ifdef CONFIG_GENERIC_IRQ_IPI
159 void mips_smp_send_ipi_single(int cpu, unsigned int action)
161 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
164 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
170 local_irq_save(flags);
173 case SMP_CALL_FUNCTION:
174 __ipi_send_mask(call_desc, mask);
177 case SMP_RESCHEDULE_YOURSELF:
178 __ipi_send_mask(sched_desc, mask);
185 if (mips_cpc_present()) {
186 for_each_cpu(cpu, mask) {
187 if (cpus_are_siblings(cpu, smp_processor_id()))
190 core = cpu_core(&cpu_data[cpu]);
192 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
193 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
194 mips_cpc_lock_other(core);
195 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
196 mips_cpc_unlock_other();
197 mips_cm_unlock_other();
202 local_irq_restore(flags);
206 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
213 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
215 generic_smp_call_function_interrupt();
220 static struct irqaction irq_resched = {
221 .handler = ipi_resched_interrupt,
222 .flags = IRQF_PERCPU,
223 .name = "IPI resched"
226 static struct irqaction irq_call = {
227 .handler = ipi_call_interrupt,
228 .flags = IRQF_PERCPU,
232 static void smp_ipi_init_one(unsigned int virq,
233 struct irqaction *action)
237 irq_set_handler(virq, handle_percpu_irq);
238 ret = setup_irq(virq, action);
242 static unsigned int call_virq, sched_virq;
244 int mips_smp_ipi_allocate(const struct cpumask *mask)
247 struct irq_domain *ipidomain;
248 struct device_node *node;
250 node = of_irq_find_parent(of_root);
251 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
254 * Some platforms have half DT setup. So if we found irq node but
255 * didn't find an ipidomain, try to search for one that is not in the
258 if (node && !ipidomain)
259 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
262 * There are systems which use IPI IRQ domains, but only have one
263 * registered when some runtime condition is met. For example a Malta
264 * kernel may include support for GIC & CPU interrupt controller IPI
265 * IRQ domains, but if run on a system with no GIC & no MT ASE then
266 * neither will be supported or registered.
268 * We only have a problem if we're actually using multiple CPUs so fail
269 * loudly if that is the case. Otherwise simply return, skipping IPI
270 * setup, if we're running with only a single CPU.
273 BUG_ON(num_present_cpus() > 1);
277 virq = irq_reserve_ipi(ipidomain, mask);
282 virq = irq_reserve_ipi(ipidomain, mask);
287 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
290 for_each_cpu(cpu, mask) {
291 smp_ipi_init_one(call_virq + cpu, &irq_call);
292 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
295 smp_ipi_init_one(call_virq, &irq_call);
296 smp_ipi_init_one(sched_virq, &irq_resched);
302 int mips_smp_ipi_free(const struct cpumask *mask)
304 struct irq_domain *ipidomain;
305 struct device_node *node;
307 node = of_irq_find_parent(of_root);
308 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
311 * Some platforms have half DT setup. So if we found irq node but
312 * didn't find an ipidomain, try to search for one that is not in the
315 if (node && !ipidomain)
316 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
320 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
323 for_each_cpu(cpu, mask) {
324 remove_irq(call_virq + cpu, &irq_call);
325 remove_irq(sched_virq + cpu, &irq_resched);
328 irq_destroy_ipi(call_virq, mask);
329 irq_destroy_ipi(sched_virq, mask);
334 static int __init mips_smp_ipi_init(void)
336 if (num_possible_cpus() == 1)
339 mips_smp_ipi_allocate(cpu_possible_mask);
341 call_desc = irq_to_desc(call_virq);
342 sched_desc = irq_to_desc(sched_virq);
346 early_initcall(mips_smp_ipi_init);
350 * First C code run on the secondary CPUs after being started up by
353 asmlinkage void start_secondary(void)
358 per_cpu_trap_init(false);
359 mips_clockevent_init();
360 mp_ops->init_secondary();
365 * XXX parity protection should be folded in here when it's converted
366 * to an option instead of something based on .cputype
371 cpu = smp_processor_id();
372 cpu_data[cpu].udelay_val = loops_per_jiffy;
374 cpumask_set_cpu(cpu, &cpu_coherent_mask);
375 notify_cpu_starting(cpu);
377 set_cpu_online(cpu, true);
379 set_cpu_sibling_map(cpu);
380 set_cpu_core_map(cpu);
382 calculate_cpu_foreign_map();
384 complete(&cpu_running);
385 synchronise_count_slave(cpu);
388 * irq will be enabled in ->smp_finish(), enabling it too early
391 WARN_ON_ONCE(!irqs_disabled());
392 mp_ops->smp_finish();
394 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
397 static void stop_this_cpu(void *dummy)
403 set_cpu_online(smp_processor_id(), false);
404 calculate_cpu_foreign_map();
409 void smp_send_stop(void)
411 smp_call_function(stop_this_cpu, NULL, 0);
414 void __init smp_cpus_done(unsigned int max_cpus)
418 /* called from main before smp_init() */
419 void __init smp_prepare_cpus(unsigned int max_cpus)
421 init_new_context(current, &init_mm);
422 current_thread_info()->cpu = 0;
423 mp_ops->prepare_cpus(max_cpus);
424 set_cpu_sibling_map(0);
426 calculate_cpu_foreign_map();
427 #ifndef CONFIG_HOTPLUG_CPU
428 init_cpu_present(cpu_possible_mask);
430 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
433 /* preload SMP state for boot cpu */
434 void smp_prepare_boot_cpu(void)
436 set_cpu_possible(0, true);
437 set_cpu_online(0, true);
440 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
444 err = mp_ops->boot_secondary(cpu, tidle);
449 * We must check for timeout here, as the CPU will not be marked
450 * online until the counters are synchronised.
452 if (!wait_for_completion_timeout(&cpu_running,
453 msecs_to_jiffies(1000))) {
454 pr_crit("CPU%u: failed to start\n", cpu);
458 synchronise_count_master(cpu);
462 /* Not really SMP stuff ... */
463 int setup_profiling_timer(unsigned int multiplier)
468 static void flush_tlb_all_ipi(void *info)
470 local_flush_tlb_all();
473 void flush_tlb_all(void)
475 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
478 static void flush_tlb_mm_ipi(void *mm)
480 local_flush_tlb_mm((struct mm_struct *)mm);
484 * Special Variant of smp_call_function for use by TLB functions:
487 * o collapses to normal function call on UP kernels
488 * o collapses to normal function call on systems with a single shared
491 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
493 smp_call_function(func, info, 1);
496 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
500 smp_on_other_tlbs(func, info);
507 * The following tlb flush calls are invoked when old translations are
508 * being torn down, or pte attributes are changing. For single threaded
509 * address spaces, a new context is obtained on the current cpu, and tlb
510 * context on other cpus are invalidated to force a new context allocation
511 * at switch_mm time, should the mm ever be used on other cpus. For
512 * multithreaded address spaces, intercpu interrupts have to be sent.
513 * Another case where intercpu interrupts are required is when the target
514 * mm might be active on another cpu (eg debuggers doing the flushes on
515 * behalf of debugees, kswapd stealing pages from another process etc).
519 void flush_tlb_mm(struct mm_struct *mm)
523 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
524 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
528 for_each_online_cpu(cpu) {
529 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
530 cpu_context(cpu, mm) = 0;
533 local_flush_tlb_mm(mm);
538 struct flush_tlb_data {
539 struct vm_area_struct *vma;
544 static void flush_tlb_range_ipi(void *info)
546 struct flush_tlb_data *fd = info;
548 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
551 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
553 struct mm_struct *mm = vma->vm_mm;
556 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
557 struct flush_tlb_data fd = {
563 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
566 int exec = vma->vm_flags & VM_EXEC;
568 for_each_online_cpu(cpu) {
570 * flush_cache_range() will only fully flush icache if
571 * the VMA is executable, otherwise we must invalidate
572 * ASID without it appearing to has_valid_asid() as if
573 * mm has been completely unused by that CPU.
575 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
576 cpu_context(cpu, mm) = !exec;
579 local_flush_tlb_range(vma, start, end);
583 static void flush_tlb_kernel_range_ipi(void *info)
585 struct flush_tlb_data *fd = info;
587 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
590 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
592 struct flush_tlb_data fd = {
597 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
600 static void flush_tlb_page_ipi(void *info)
602 struct flush_tlb_data *fd = info;
604 local_flush_tlb_page(fd->vma, fd->addr1);
607 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
610 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
611 struct flush_tlb_data fd = {
616 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
620 for_each_online_cpu(cpu) {
622 * flush_cache_page() only does partial flushes, so
623 * invalidate ASID without it appearing to
624 * has_valid_asid() as if mm has been completely unused
627 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
628 cpu_context(cpu, vma->vm_mm) = 1;
631 local_flush_tlb_page(vma, page);
635 static void flush_tlb_one_ipi(void *info)
637 unsigned long vaddr = (unsigned long) info;
639 local_flush_tlb_one(vaddr);
642 void flush_tlb_one(unsigned long vaddr)
644 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
647 EXPORT_SYMBOL(flush_tlb_page);
648 EXPORT_SYMBOL(flush_tlb_one);
650 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
652 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
653 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
655 void tick_broadcast(const struct cpumask *mask)
658 call_single_data_t *csd;
661 for_each_cpu(cpu, mask) {
662 count = &per_cpu(tick_broadcast_count, cpu);
663 csd = &per_cpu(tick_broadcast_csd, cpu);
665 if (atomic_inc_return(count) == 1)
666 smp_call_function_single_async(cpu, csd);
670 static void tick_broadcast_callee(void *info)
672 int cpu = smp_processor_id();
673 tick_receive_broadcast();
674 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
677 static int __init tick_broadcast_init(void)
679 call_single_data_t *csd;
682 for (cpu = 0; cpu < NR_CPUS; cpu++) {
683 csd = &per_cpu(tick_broadcast_csd, cpu);
684 csd->func = tick_broadcast_callee;
689 early_initcall(tick_broadcast_init);
691 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */