2da087926ebe1b5e14bd3aa516a34292d38ec951
[linux-2.6-microblaze.git] / arch / arm / kernel / smp.c
1 /*
2  *  linux/arch/arm/kernel/smp.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/interrupt.h>
18 #include <linux/cache.h>
19 #include <linux/profile.h>
20 #include <linux/errno.h>
21 #include <linux/mm.h>
22 #include <linux/err.h>
23 #include <linux/cpu.h>
24 #include <linux/seq_file.h>
25 #include <linux/irq.h>
26 #include <linux/nmi.h>
27 #include <linux/percpu.h>
28 #include <linux/clockchips.h>
29 #include <linux/completion.h>
30 #include <linux/cpufreq.h>
31 #include <linux/irq_work.h>
32
33 #include <linux/atomic.h>
34 #include <asm/smp.h>
35 #include <asm/cacheflush.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
39 #include <asm/idmap.h>
40 #include <asm/topology.h>
41 #include <asm/mmu_context.h>
42 #include <asm/pgtable.h>
43 #include <asm/pgalloc.h>
44 #include <asm/processor.h>
45 #include <asm/sections.h>
46 #include <asm/tlbflush.h>
47 #include <asm/ptrace.h>
48 #include <asm/smp_plat.h>
49 #include <asm/virt.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mpu.h>
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/ipi.h>
55
56 /*
57  * as from 2.5, kernels no longer have an init_tasks structure
58  * so we need some other way of telling a new secondary core
59  * where to place its SVC stack
60  */
61 struct secondary_data secondary_data;
62
63 /*
64  * control for which core is the next to come out of the secondary
65  * boot "holding pen"
66  */
67 volatile int pen_release = -1;
68
69 enum ipi_msg_type {
70         IPI_WAKEUP,
71         IPI_TIMER,
72         IPI_RESCHEDULE,
73         IPI_CALL_FUNC,
74         IPI_CPU_STOP,
75         IPI_IRQ_WORK,
76         IPI_COMPLETION,
77         IPI_CPU_BACKTRACE,
78         /*
79          * SGI8-15 can be reserved by secure firmware, and thus may
80          * not be usable by the kernel. Please keep the above limited
81          * to at most 8 entries.
82          */
83 };
84
85 static DECLARE_COMPLETION(cpu_running);
86
87 static struct smp_operations smp_ops __ro_after_init;
88
89 void __init smp_set_ops(const struct smp_operations *ops)
90 {
91         if (ops)
92                 smp_ops = *ops;
93 };
94
95 static unsigned long get_arch_pgd(pgd_t *pgd)
96 {
97 #ifdef CONFIG_ARM_LPAE
98         return __phys_to_pfn(virt_to_phys(pgd));
99 #else
100         return virt_to_phys(pgd);
101 #endif
102 }
103
104 int __cpu_up(unsigned int cpu, struct task_struct *idle)
105 {
106         int ret;
107
108         if (!smp_ops.smp_boot_secondary)
109                 return -ENOSYS;
110
111         /*
112          * We need to tell the secondary core where to find
113          * its stack and the page tables.
114          */
115         secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
116 #ifdef CONFIG_ARM_MPU
117         secondary_data.mpu_rgn_info = &mpu_rgn_info;
118 #endif
119
120 #ifdef CONFIG_MMU
121         secondary_data.pgdir = virt_to_phys(idmap_pgd);
122         secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
123 #endif
124         sync_cache_w(&secondary_data);
125
126         /*
127          * Now bring the CPU into our world.
128          */
129         ret = smp_ops.smp_boot_secondary(cpu, idle);
130         if (ret == 0) {
131                 /*
132                  * CPU was successfully started, wait for it
133                  * to come online or time out.
134                  */
135                 wait_for_completion_timeout(&cpu_running,
136                                                  msecs_to_jiffies(1000));
137
138                 if (!cpu_online(cpu)) {
139                         pr_crit("CPU%u: failed to come online\n", cpu);
140                         ret = -EIO;
141                 }
142         } else {
143                 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
144         }
145
146
147         memset(&secondary_data, 0, sizeof(secondary_data));
148         return ret;
149 }
150
151 /* platform specific SMP operations */
152 void __init smp_init_cpus(void)
153 {
154         if (smp_ops.smp_init_cpus)
155                 smp_ops.smp_init_cpus();
156 }
157
158 int platform_can_secondary_boot(void)
159 {
160         return !!smp_ops.smp_boot_secondary;
161 }
162
163 int platform_can_cpu_hotplug(void)
164 {
165 #ifdef CONFIG_HOTPLUG_CPU
166         if (smp_ops.cpu_kill)
167                 return 1;
168 #endif
169
170         return 0;
171 }
172
173 #ifdef CONFIG_HOTPLUG_CPU
174 static int platform_cpu_kill(unsigned int cpu)
175 {
176         if (smp_ops.cpu_kill)
177                 return smp_ops.cpu_kill(cpu);
178         return 1;
179 }
180
181 static int platform_cpu_disable(unsigned int cpu)
182 {
183         if (smp_ops.cpu_disable)
184                 return smp_ops.cpu_disable(cpu);
185
186         return 0;
187 }
188
189 int platform_can_hotplug_cpu(unsigned int cpu)
190 {
191         /* cpu_die must be specified to support hotplug */
192         if (!smp_ops.cpu_die)
193                 return 0;
194
195         if (smp_ops.cpu_can_disable)
196                 return smp_ops.cpu_can_disable(cpu);
197
198         /*
199          * By default, allow disabling all CPUs except the first one,
200          * since this is special on a lot of platforms, e.g. because
201          * of clock tick interrupts.
202          */
203         return cpu != 0;
204 }
205
206 /*
207  * __cpu_disable runs on the processor to be shutdown.
208  */
209 int __cpu_disable(void)
210 {
211         unsigned int cpu = smp_processor_id();
212         int ret;
213
214         ret = platform_cpu_disable(cpu);
215         if (ret)
216                 return ret;
217
218         /*
219          * Take this CPU offline.  Once we clear this, we can't return,
220          * and we must not schedule until we're ready to give up the cpu.
221          */
222         set_cpu_online(cpu, false);
223
224         /*
225          * OK - migrate IRQs away from this CPU
226          */
227         migrate_irqs();
228
229         /*
230          * Flush user cache and TLB mappings, and then remove this CPU
231          * from the vm mask set of all processes.
232          *
233          * Caches are flushed to the Level of Unification Inner Shareable
234          * to write-back dirty lines to unified caches shared by all CPUs.
235          */
236         flush_cache_louis();
237         local_flush_tlb_all();
238
239         clear_tasks_mm_cpumask(cpu);
240
241         return 0;
242 }
243
244 static DECLARE_COMPLETION(cpu_died);
245
246 /*
247  * called on the thread which is asking for a CPU to be shutdown -
248  * waits until shutdown has completed, or it is timed out.
249  */
250 void __cpu_die(unsigned int cpu)
251 {
252         if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
253                 pr_err("CPU%u: cpu didn't die\n", cpu);
254                 return;
255         }
256         pr_debug("CPU%u: shutdown\n", cpu);
257
258         /*
259          * platform_cpu_kill() is generally expected to do the powering off
260          * and/or cutting of clocks to the dying CPU.  Optionally, this may
261          * be done by the CPU which is dying in preference to supporting
262          * this call, but that means there is _no_ synchronisation between
263          * the requesting CPU and the dying CPU actually losing power.
264          */
265         if (!platform_cpu_kill(cpu))
266                 pr_err("CPU%u: unable to kill\n", cpu);
267 }
268
269 /*
270  * Called from the idle thread for the CPU which has been shutdown.
271  *
272  * Note that we disable IRQs here, but do not re-enable them
273  * before returning to the caller. This is also the behaviour
274  * of the other hotplug-cpu capable cores, so presumably coming
275  * out of idle fixes this.
276  */
277 void arch_cpu_idle_dead(void)
278 {
279         unsigned int cpu = smp_processor_id();
280
281         idle_task_exit();
282
283         local_irq_disable();
284
285         /*
286          * Flush the data out of the L1 cache for this CPU.  This must be
287          * before the completion to ensure that data is safely written out
288          * before platform_cpu_kill() gets called - which may disable
289          * *this* CPU and power down its cache.
290          */
291         flush_cache_louis();
292
293         /*
294          * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
295          * this returns, power and/or clocks can be removed at any point
296          * from this CPU and its cache by platform_cpu_kill().
297          */
298         complete(&cpu_died);
299
300         /*
301          * Ensure that the cache lines associated with that completion are
302          * written out.  This covers the case where _this_ CPU is doing the
303          * powering down, to ensure that the completion is visible to the
304          * CPU waiting for this one.
305          */
306         flush_cache_louis();
307
308         /*
309          * The actual CPU shutdown procedure is at least platform (if not
310          * CPU) specific.  This may remove power, or it may simply spin.
311          *
312          * Platforms are generally expected *NOT* to return from this call,
313          * although there are some which do because they have no way to
314          * power down the CPU.  These platforms are the _only_ reason we
315          * have a return path which uses the fragment of assembly below.
316          *
317          * The return path should not be used for platforms which can
318          * power off the CPU.
319          */
320         if (smp_ops.cpu_die)
321                 smp_ops.cpu_die(cpu);
322
323         pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
324                 cpu);
325
326         /*
327          * Do not return to the idle loop - jump back to the secondary
328          * cpu initialisation.  There's some initialisation which needs
329          * to be repeated to undo the effects of taking the CPU offline.
330          */
331         __asm__("mov    sp, %0\n"
332         "       mov     fp, #0\n"
333         "       b       secondary_start_kernel"
334                 :
335                 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
336 }
337 #endif /* CONFIG_HOTPLUG_CPU */
338
339 /*
340  * Called by both boot and secondaries to move global data into
341  * per-processor storage.
342  */
343 static void smp_store_cpu_info(unsigned int cpuid)
344 {
345         struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
346
347         cpu_info->loops_per_jiffy = loops_per_jiffy;
348         cpu_info->cpuid = read_cpuid_id();
349
350         store_cpu_topology(cpuid);
351 }
352
353 /*
354  * This is the secondary CPU boot entry.  We're using this CPUs
355  * idle thread stack, but a set of temporary page tables.
356  */
357 asmlinkage void secondary_start_kernel(void)
358 {
359         struct mm_struct *mm = &init_mm;
360         unsigned int cpu;
361
362         /*
363          * The identity mapping is uncached (strongly ordered), so
364          * switch away from it before attempting any exclusive accesses.
365          */
366         cpu_switch_mm(mm->pgd, mm);
367         local_flush_bp_all();
368         enter_lazy_tlb(mm, current);
369         local_flush_tlb_all();
370
371         /*
372          * All kernel threads share the same mm context; grab a
373          * reference and switch to it.
374          */
375         cpu = smp_processor_id();
376         mmgrab(mm);
377         current->active_mm = mm;
378         cpumask_set_cpu(cpu, mm_cpumask(mm));
379
380         cpu_init();
381
382 #ifndef CONFIG_MMU
383         setup_vectors_base();
384 #endif
385         pr_debug("CPU%u: Booted secondary processor\n", cpu);
386
387         preempt_disable();
388         trace_hardirqs_off();
389
390         /*
391          * Give the platform a chance to do its own initialisation.
392          */
393         if (smp_ops.smp_secondary_init)
394                 smp_ops.smp_secondary_init(cpu);
395
396         notify_cpu_starting(cpu);
397
398         calibrate_delay();
399
400         smp_store_cpu_info(cpu);
401
402         /*
403          * OK, now it's safe to let the boot CPU continue.  Wait for
404          * the CPU migration code to notice that the CPU is online
405          * before we continue - which happens after __cpu_up returns.
406          */
407         set_cpu_online(cpu, true);
408         complete(&cpu_running);
409
410         local_irq_enable();
411         local_fiq_enable();
412         local_abt_enable();
413
414         /*
415          * OK, it's off to the idle thread for us
416          */
417         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
418 }
419
420 void __init smp_cpus_done(unsigned int max_cpus)
421 {
422         int cpu;
423         unsigned long bogosum = 0;
424
425         for_each_online_cpu(cpu)
426                 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
427
428         printk(KERN_INFO "SMP: Total of %d processors activated "
429                "(%lu.%02lu BogoMIPS).\n",
430                num_online_cpus(),
431                bogosum / (500000/HZ),
432                (bogosum / (5000/HZ)) % 100);
433
434         hyp_mode_check();
435 }
436
437 void __init smp_prepare_boot_cpu(void)
438 {
439         set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
440 }
441
442 void __init smp_prepare_cpus(unsigned int max_cpus)
443 {
444         unsigned int ncores = num_possible_cpus();
445
446         init_cpu_topology();
447
448         smp_store_cpu_info(smp_processor_id());
449
450         /*
451          * are we trying to boot more cores than exist?
452          */
453         if (max_cpus > ncores)
454                 max_cpus = ncores;
455         if (ncores > 1 && max_cpus) {
456                 /*
457                  * Initialise the present map, which describes the set of CPUs
458                  * actually populated at the present time. A platform should
459                  * re-initialize the map in the platforms smp_prepare_cpus()
460                  * if present != possible (e.g. physical hotplug).
461                  */
462                 init_cpu_present(cpu_possible_mask);
463
464                 /*
465                  * Initialise the SCU if there are more than one CPU
466                  * and let them know where to start.
467                  */
468                 if (smp_ops.smp_prepare_cpus)
469                         smp_ops.smp_prepare_cpus(max_cpus);
470         }
471 }
472
473 static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
474
475 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
476 {
477         if (!__smp_cross_call)
478                 __smp_cross_call = fn;
479 }
480
481 static const char *ipi_types[NR_IPI] __tracepoint_string = {
482 #define S(x,s)  [x] = s
483         S(IPI_WAKEUP, "CPU wakeup interrupts"),
484         S(IPI_TIMER, "Timer broadcast interrupts"),
485         S(IPI_RESCHEDULE, "Rescheduling interrupts"),
486         S(IPI_CALL_FUNC, "Function call interrupts"),
487         S(IPI_CPU_STOP, "CPU stop interrupts"),
488         S(IPI_IRQ_WORK, "IRQ work interrupts"),
489         S(IPI_COMPLETION, "completion interrupts"),
490 };
491
492 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
493 {
494         trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
495         __smp_cross_call(target, ipinr);
496 }
497
498 void show_ipi_list(struct seq_file *p, int prec)
499 {
500         unsigned int cpu, i;
501
502         for (i = 0; i < NR_IPI; i++) {
503                 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
504
505                 for_each_online_cpu(cpu)
506                         seq_printf(p, "%10u ",
507                                    __get_irq_stat(cpu, ipi_irqs[i]));
508
509                 seq_printf(p, " %s\n", ipi_types[i]);
510         }
511 }
512
513 u64 smp_irq_stat_cpu(unsigned int cpu)
514 {
515         u64 sum = 0;
516         int i;
517
518         for (i = 0; i < NR_IPI; i++)
519                 sum += __get_irq_stat(cpu, ipi_irqs[i]);
520
521         return sum;
522 }
523
524 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
525 {
526         smp_cross_call(mask, IPI_CALL_FUNC);
527 }
528
529 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
530 {
531         smp_cross_call(mask, IPI_WAKEUP);
532 }
533
534 void arch_send_call_function_single_ipi(int cpu)
535 {
536         smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
537 }
538
539 #ifdef CONFIG_IRQ_WORK
540 void arch_irq_work_raise(void)
541 {
542         if (arch_irq_work_has_interrupt())
543                 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
544 }
545 #endif
546
547 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
548 void tick_broadcast(const struct cpumask *mask)
549 {
550         smp_cross_call(mask, IPI_TIMER);
551 }
552 #endif
553
554 static DEFINE_RAW_SPINLOCK(stop_lock);
555
556 /*
557  * ipi_cpu_stop - handle IPI from smp_send_stop()
558  */
559 static void ipi_cpu_stop(unsigned int cpu)
560 {
561         if (system_state <= SYSTEM_RUNNING) {
562                 raw_spin_lock(&stop_lock);
563                 pr_crit("CPU%u: stopping\n", cpu);
564                 dump_stack();
565                 raw_spin_unlock(&stop_lock);
566         }
567
568         set_cpu_online(cpu, false);
569
570         local_fiq_disable();
571         local_irq_disable();
572
573         while (1)
574                 cpu_relax();
575 }
576
577 static DEFINE_PER_CPU(struct completion *, cpu_completion);
578
579 int register_ipi_completion(struct completion *completion, int cpu)
580 {
581         per_cpu(cpu_completion, cpu) = completion;
582         return IPI_COMPLETION;
583 }
584
585 static void ipi_complete(unsigned int cpu)
586 {
587         complete(per_cpu(cpu_completion, cpu));
588 }
589
590 /*
591  * Main handler for inter-processor interrupts
592  */
593 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
594 {
595         handle_IPI(ipinr, regs);
596 }
597
598 void handle_IPI(int ipinr, struct pt_regs *regs)
599 {
600         unsigned int cpu = smp_processor_id();
601         struct pt_regs *old_regs = set_irq_regs(regs);
602
603         if ((unsigned)ipinr < NR_IPI) {
604                 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
605                 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
606         }
607
608         switch (ipinr) {
609         case IPI_WAKEUP:
610                 break;
611
612 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
613         case IPI_TIMER:
614                 irq_enter();
615                 tick_receive_broadcast();
616                 irq_exit();
617                 break;
618 #endif
619
620         case IPI_RESCHEDULE:
621                 scheduler_ipi();
622                 break;
623
624         case IPI_CALL_FUNC:
625                 irq_enter();
626                 generic_smp_call_function_interrupt();
627                 irq_exit();
628                 break;
629
630         case IPI_CPU_STOP:
631                 irq_enter();
632                 ipi_cpu_stop(cpu);
633                 irq_exit();
634                 break;
635
636 #ifdef CONFIG_IRQ_WORK
637         case IPI_IRQ_WORK:
638                 irq_enter();
639                 irq_work_run();
640                 irq_exit();
641                 break;
642 #endif
643
644         case IPI_COMPLETION:
645                 irq_enter();
646                 ipi_complete(cpu);
647                 irq_exit();
648                 break;
649
650         case IPI_CPU_BACKTRACE:
651                 printk_nmi_enter();
652                 irq_enter();
653                 nmi_cpu_backtrace(regs);
654                 irq_exit();
655                 printk_nmi_exit();
656                 break;
657
658         default:
659                 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
660                         cpu, ipinr);
661                 break;
662         }
663
664         if ((unsigned)ipinr < NR_IPI)
665                 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
666         set_irq_regs(old_regs);
667 }
668
669 void smp_send_reschedule(int cpu)
670 {
671         smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
672 }
673
674 void smp_send_stop(void)
675 {
676         unsigned long timeout;
677         struct cpumask mask;
678
679         cpumask_copy(&mask, cpu_online_mask);
680         cpumask_clear_cpu(smp_processor_id(), &mask);
681         if (!cpumask_empty(&mask))
682                 smp_cross_call(&mask, IPI_CPU_STOP);
683
684         /* Wait up to one second for other CPUs to stop */
685         timeout = USEC_PER_SEC;
686         while (num_online_cpus() > 1 && timeout--)
687                 udelay(1);
688
689         if (num_online_cpus() > 1)
690                 pr_warn("SMP: failed to stop secondary CPUs\n");
691 }
692
693 /*
694  * not supported here
695  */
696 int setup_profiling_timer(unsigned int multiplier)
697 {
698         return -EINVAL;
699 }
700
701 #ifdef CONFIG_CPU_FREQ
702
703 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
704 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
705 static unsigned long global_l_p_j_ref;
706 static unsigned long global_l_p_j_ref_freq;
707
708 static int cpufreq_callback(struct notifier_block *nb,
709                                         unsigned long val, void *data)
710 {
711         struct cpufreq_freqs *freq = data;
712         int cpu = freq->cpu;
713
714         if (freq->flags & CPUFREQ_CONST_LOOPS)
715                 return NOTIFY_OK;
716
717         if (!per_cpu(l_p_j_ref, cpu)) {
718                 per_cpu(l_p_j_ref, cpu) =
719                         per_cpu(cpu_data, cpu).loops_per_jiffy;
720                 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
721                 if (!global_l_p_j_ref) {
722                         global_l_p_j_ref = loops_per_jiffy;
723                         global_l_p_j_ref_freq = freq->old;
724                 }
725         }
726
727         if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
728             (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
729                 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
730                                                 global_l_p_j_ref_freq,
731                                                 freq->new);
732                 per_cpu(cpu_data, cpu).loops_per_jiffy =
733                         cpufreq_scale(per_cpu(l_p_j_ref, cpu),
734                                         per_cpu(l_p_j_ref_freq, cpu),
735                                         freq->new);
736         }
737         return NOTIFY_OK;
738 }
739
740 static struct notifier_block cpufreq_notifier = {
741         .notifier_call  = cpufreq_callback,
742 };
743
744 static int __init register_cpufreq_notifier(void)
745 {
746         return cpufreq_register_notifier(&cpufreq_notifier,
747                                                 CPUFREQ_TRANSITION_NOTIFIER);
748 }
749 core_initcall(register_cpufreq_notifier);
750
751 #endif
752
753 static void raise_nmi(cpumask_t *mask)
754 {
755         smp_cross_call(mask, IPI_CPU_BACKTRACE);
756 }
757
758 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
759 {
760         nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
761 }