1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/kernel/smp.c
5 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/spinlock.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/interrupt.h>
15 #include <linux/cache.h>
16 #include <linux/profile.h>
17 #include <linux/errno.h>
19 #include <linux/err.h>
20 #include <linux/cpu.h>
21 #include <linux/seq_file.h>
22 #include <linux/irq.h>
23 #include <linux/nmi.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
28 #include <linux/irq_work.h>
30 #include <linux/atomic.h>
33 #include <asm/cacheflush.h>
35 #include <asm/cputype.h>
36 #include <asm/exception.h>
37 #include <asm/idmap.h>
38 #include <asm/topology.h>
39 #include <asm/mmu_context.h>
40 #include <asm/procinfo.h>
41 #include <asm/processor.h>
42 #include <asm/sections.h>
43 #include <asm/tlbflush.h>
44 #include <asm/ptrace.h>
45 #include <asm/smp_plat.h>
47 #include <asm/mach/arch.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/ipi.h>
54 * as from 2.5, kernels no longer have an init_tasks structure
55 * so we need some other way of telling a new secondary core
56 * where to place its SVC stack
58 struct secondary_data secondary_data;
69 * CPU_BACKTRACE is special and not included in NR_IPI
70 * or tracable with trace_ipi_*
72 IPI_CPU_BACKTRACE = NR_IPI,
74 * SGI8-15 can be reserved by secure firmware, and thus may
75 * not be usable by the kernel. Please keep the above limited
76 * to at most 8 entries.
81 static int ipi_irq_base __read_mostly;
82 static int nr_ipi __read_mostly = NR_IPI;
83 static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
85 static void ipi_setup(int cpu);
86 static void ipi_teardown(int cpu);
88 static DECLARE_COMPLETION(cpu_running);
90 static struct smp_operations smp_ops __ro_after_init;
92 void __init smp_set_ops(const struct smp_operations *ops)
98 static unsigned long get_arch_pgd(pgd_t *pgd)
100 #ifdef CONFIG_ARM_LPAE
101 return __phys_to_pfn(virt_to_phys(pgd));
103 return virt_to_phys(pgd);
107 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
108 static int secondary_biglittle_prepare(unsigned int cpu)
110 if (!cpu_vtable[cpu])
111 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
113 return cpu_vtable[cpu] ? 0 : -ENOMEM;
116 static void secondary_biglittle_init(void)
118 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
121 static int secondary_biglittle_prepare(unsigned int cpu)
126 static void secondary_biglittle_init(void)
131 int __cpu_up(unsigned int cpu, struct task_struct *idle)
135 if (!smp_ops.smp_boot_secondary)
138 ret = secondary_biglittle_prepare(cpu);
143 * We need to tell the secondary core where to find
144 * its stack and the page tables.
146 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
147 #ifdef CONFIG_ARM_MPU
148 secondary_data.mpu_rgn_info = &mpu_rgn_info;
152 secondary_data.pgdir = virt_to_phys(idmap_pgd);
153 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
155 sync_cache_w(&secondary_data);
158 * Now bring the CPU into our world.
160 ret = smp_ops.smp_boot_secondary(cpu, idle);
163 * CPU was successfully started, wait for it
164 * to come online or time out.
166 wait_for_completion_timeout(&cpu_running,
167 msecs_to_jiffies(1000));
169 if (!cpu_online(cpu)) {
170 pr_crit("CPU%u: failed to come online\n", cpu);
174 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
178 memset(&secondary_data, 0, sizeof(secondary_data));
182 /* platform specific SMP operations */
183 void __init smp_init_cpus(void)
185 if (smp_ops.smp_init_cpus)
186 smp_ops.smp_init_cpus();
189 int platform_can_secondary_boot(void)
191 return !!smp_ops.smp_boot_secondary;
194 int platform_can_cpu_hotplug(void)
196 #ifdef CONFIG_HOTPLUG_CPU
197 if (smp_ops.cpu_kill)
204 #ifdef CONFIG_HOTPLUG_CPU
205 static int platform_cpu_kill(unsigned int cpu)
207 if (smp_ops.cpu_kill)
208 return smp_ops.cpu_kill(cpu);
212 static int platform_cpu_disable(unsigned int cpu)
214 if (smp_ops.cpu_disable)
215 return smp_ops.cpu_disable(cpu);
220 int platform_can_hotplug_cpu(unsigned int cpu)
222 /* cpu_die must be specified to support hotplug */
223 if (!smp_ops.cpu_die)
226 if (smp_ops.cpu_can_disable)
227 return smp_ops.cpu_can_disable(cpu);
230 * By default, allow disabling all CPUs except the first one,
231 * since this is special on a lot of platforms, e.g. because
232 * of clock tick interrupts.
238 * __cpu_disable runs on the processor to be shutdown.
240 int __cpu_disable(void)
242 unsigned int cpu = smp_processor_id();
245 ret = platform_cpu_disable(cpu);
249 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
250 remove_cpu_topology(cpu);
254 * Take this CPU offline. Once we clear this, we can't return,
255 * and we must not schedule until we're ready to give up the cpu.
257 set_cpu_online(cpu, false);
261 * OK - migrate IRQs away from this CPU
263 irq_migrate_all_off_this_cpu();
266 * Flush user cache and TLB mappings, and then remove this CPU
267 * from the vm mask set of all processes.
269 * Caches are flushed to the Level of Unification Inner Shareable
270 * to write-back dirty lines to unified caches shared by all CPUs.
273 local_flush_tlb_all();
279 * called on the thread which is asking for a CPU to be shutdown -
280 * waits until shutdown has completed, or it is timed out.
282 void __cpu_die(unsigned int cpu)
284 if (!cpu_wait_death(cpu, 5)) {
285 pr_err("CPU%u: cpu didn't die\n", cpu);
288 pr_debug("CPU%u: shutdown\n", cpu);
290 clear_tasks_mm_cpumask(cpu);
292 * platform_cpu_kill() is generally expected to do the powering off
293 * and/or cutting of clocks to the dying CPU. Optionally, this may
294 * be done by the CPU which is dying in preference to supporting
295 * this call, but that means there is _no_ synchronisation between
296 * the requesting CPU and the dying CPU actually losing power.
298 if (!platform_cpu_kill(cpu))
299 pr_err("CPU%u: unable to kill\n", cpu);
303 * Called from the idle thread for the CPU which has been shutdown.
305 * Note that we disable IRQs here, but do not re-enable them
306 * before returning to the caller. This is also the behaviour
307 * of the other hotplug-cpu capable cores, so presumably coming
308 * out of idle fixes this.
310 void arch_cpu_idle_dead(void)
312 unsigned int cpu = smp_processor_id();
319 * Flush the data out of the L1 cache for this CPU. This must be
320 * before the completion to ensure that data is safely written out
321 * before platform_cpu_kill() gets called - which may disable
322 * *this* CPU and power down its cache.
327 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
328 * this returns, power and/or clocks can be removed at any point
329 * from this CPU and its cache by platform_cpu_kill().
331 (void)cpu_report_death();
334 * Ensure that the cache lines associated with that completion are
335 * written out. This covers the case where _this_ CPU is doing the
336 * powering down, to ensure that the completion is visible to the
337 * CPU waiting for this one.
342 * The actual CPU shutdown procedure is at least platform (if not
343 * CPU) specific. This may remove power, or it may simply spin.
345 * Platforms are generally expected *NOT* to return from this call,
346 * although there are some which do because they have no way to
347 * power down the CPU. These platforms are the _only_ reason we
348 * have a return path which uses the fragment of assembly below.
350 * The return path should not be used for platforms which can
354 smp_ops.cpu_die(cpu);
356 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
360 * Do not return to the idle loop - jump back to the secondary
361 * cpu initialisation. There's some initialisation which needs
362 * to be repeated to undo the effects of taking the CPU offline.
364 __asm__("mov sp, %0\n"
366 " b secondary_start_kernel"
368 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
370 #endif /* CONFIG_HOTPLUG_CPU */
373 * Called by both boot and secondaries to move global data into
374 * per-processor storage.
376 static void smp_store_cpu_info(unsigned int cpuid)
378 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
380 cpu_info->loops_per_jiffy = loops_per_jiffy;
381 cpu_info->cpuid = read_cpuid_id();
383 store_cpu_topology(cpuid);
384 check_cpu_icache_size(cpuid);
388 * This is the secondary CPU boot entry. We're using this CPUs
389 * idle thread stack, but a set of temporary page tables.
391 asmlinkage void secondary_start_kernel(void)
393 struct mm_struct *mm = &init_mm;
396 secondary_biglittle_init();
399 * The identity mapping is uncached (strongly ordered), so
400 * switch away from it before attempting any exclusive accesses.
402 cpu_switch_mm(mm->pgd, mm);
403 local_flush_bp_all();
404 enter_lazy_tlb(mm, current);
405 local_flush_tlb_all();
408 * All kernel threads share the same mm context; grab a
409 * reference and switch to it.
411 cpu = smp_processor_id();
413 current->active_mm = mm;
414 cpumask_set_cpu(cpu, mm_cpumask(mm));
419 setup_vectors_base();
421 pr_debug("CPU%u: Booted secondary processor\n", cpu);
424 trace_hardirqs_off();
427 * Give the platform a chance to do its own initialisation.
429 if (smp_ops.smp_secondary_init)
430 smp_ops.smp_secondary_init(cpu);
432 notify_cpu_starting(cpu);
438 smp_store_cpu_info(cpu);
441 * OK, now it's safe to let the boot CPU continue. Wait for
442 * the CPU migration code to notice that the CPU is online
443 * before we continue - which happens after __cpu_up returns.
445 set_cpu_online(cpu, true);
449 complete(&cpu_running);
456 * OK, it's off to the idle thread for us
458 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
461 void __init smp_cpus_done(unsigned int max_cpus)
464 unsigned long bogosum = 0;
466 for_each_online_cpu(cpu)
467 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
469 printk(KERN_INFO "SMP: Total of %d processors activated "
470 "(%lu.%02lu BogoMIPS).\n",
472 bogosum / (500000/HZ),
473 (bogosum / (5000/HZ)) % 100);
478 void __init smp_prepare_boot_cpu(void)
480 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
483 void __init smp_prepare_cpus(unsigned int max_cpus)
485 unsigned int ncores = num_possible_cpus();
489 smp_store_cpu_info(smp_processor_id());
492 * are we trying to boot more cores than exist?
494 if (max_cpus > ncores)
496 if (ncores > 1 && max_cpus) {
498 * Initialise the present map, which describes the set of CPUs
499 * actually populated at the present time. A platform should
500 * re-initialize the map in the platforms smp_prepare_cpus()
501 * if present != possible (e.g. physical hotplug).
503 init_cpu_present(cpu_possible_mask);
506 * Initialise the SCU if there are more than one CPU
507 * and let them know where to start.
509 if (smp_ops.smp_prepare_cpus)
510 smp_ops.smp_prepare_cpus(max_cpus);
514 static const char *ipi_types[NR_IPI] __tracepoint_string = {
515 #define S(x,s) [x] = s
516 S(IPI_WAKEUP, "CPU wakeup interrupts"),
517 S(IPI_TIMER, "Timer broadcast interrupts"),
518 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
519 S(IPI_CALL_FUNC, "Function call interrupts"),
520 S(IPI_CPU_STOP, "CPU stop interrupts"),
521 S(IPI_IRQ_WORK, "IRQ work interrupts"),
522 S(IPI_COMPLETION, "completion interrupts"),
525 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
527 void show_ipi_list(struct seq_file *p, int prec)
531 for (i = 0; i < NR_IPI; i++) {
532 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
534 for_each_online_cpu(cpu)
535 seq_printf(p, "%10u ",
536 __get_irq_stat(cpu, ipi_irqs[i]));
538 seq_printf(p, " %s\n", ipi_types[i]);
542 u64 smp_irq_stat_cpu(unsigned int cpu)
547 for (i = 0; i < NR_IPI; i++)
548 sum += __get_irq_stat(cpu, ipi_irqs[i]);
553 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
555 smp_cross_call(mask, IPI_CALL_FUNC);
558 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
560 smp_cross_call(mask, IPI_WAKEUP);
563 void arch_send_call_function_single_ipi(int cpu)
565 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
568 #ifdef CONFIG_IRQ_WORK
569 void arch_irq_work_raise(void)
571 if (arch_irq_work_has_interrupt())
572 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
576 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
577 void tick_broadcast(const struct cpumask *mask)
579 smp_cross_call(mask, IPI_TIMER);
583 static DEFINE_RAW_SPINLOCK(stop_lock);
586 * ipi_cpu_stop - handle IPI from smp_send_stop()
588 static void ipi_cpu_stop(unsigned int cpu)
590 if (system_state <= SYSTEM_RUNNING) {
591 raw_spin_lock(&stop_lock);
592 pr_crit("CPU%u: stopping\n", cpu);
594 raw_spin_unlock(&stop_lock);
597 set_cpu_online(cpu, false);
608 static DEFINE_PER_CPU(struct completion *, cpu_completion);
610 int register_ipi_completion(struct completion *completion, int cpu)
612 per_cpu(cpu_completion, cpu) = completion;
613 return IPI_COMPLETION;
616 static void ipi_complete(unsigned int cpu)
618 complete(per_cpu(cpu_completion, cpu));
622 * Main handler for inter-processor interrupts
624 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
626 handle_IPI(ipinr, regs);
629 static void do_handle_IPI(int ipinr)
631 unsigned int cpu = smp_processor_id();
633 if ((unsigned)ipinr < NR_IPI) {
634 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
635 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
642 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
644 tick_receive_broadcast();
653 generic_smp_call_function_interrupt();
660 #ifdef CONFIG_IRQ_WORK
670 case IPI_CPU_BACKTRACE:
672 nmi_cpu_backtrace(get_irq_regs());
677 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
682 if ((unsigned)ipinr < NR_IPI)
683 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
686 /* Legacy version, should go away once all irqchips have been converted */
687 void handle_IPI(int ipinr, struct pt_regs *regs)
689 struct pt_regs *old_regs = set_irq_regs(regs);
692 do_handle_IPI(ipinr);
695 set_irq_regs(old_regs);
698 static irqreturn_t ipi_handler(int irq, void *data)
700 do_handle_IPI(irq - ipi_irq_base);
704 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
706 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
707 __ipi_send_mask(ipi_desc[ipinr], target);
710 static void ipi_setup(int cpu)
714 if (WARN_ON_ONCE(!ipi_irq_base))
717 for (i = 0; i < nr_ipi; i++)
718 enable_percpu_irq(ipi_irq_base + i, 0);
721 static void ipi_teardown(int cpu)
725 if (WARN_ON_ONCE(!ipi_irq_base))
728 for (i = 0; i < nr_ipi; i++)
729 disable_percpu_irq(ipi_irq_base + i);
732 void __init set_smp_ipi_range(int ipi_base, int n)
736 WARN_ON(n < MAX_IPI);
737 nr_ipi = min(n, MAX_IPI);
739 for (i = 0; i < nr_ipi; i++) {
742 err = request_percpu_irq(ipi_base + i, ipi_handler,
746 ipi_desc[i] = irq_to_desc(ipi_base + i);
747 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
750 ipi_irq_base = ipi_base;
752 /* Setup the boot CPU immediately */
753 ipi_setup(smp_processor_id());
756 void smp_send_reschedule(int cpu)
758 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
761 void smp_send_stop(void)
763 unsigned long timeout;
766 cpumask_copy(&mask, cpu_online_mask);
767 cpumask_clear_cpu(smp_processor_id(), &mask);
768 if (!cpumask_empty(&mask))
769 smp_cross_call(&mask, IPI_CPU_STOP);
771 /* Wait up to one second for other CPUs to stop */
772 timeout = USEC_PER_SEC;
773 while (num_online_cpus() > 1 && timeout--)
776 if (num_online_cpus() > 1)
777 pr_warn("SMP: failed to stop secondary CPUs\n");
780 /* In case panic() and panic() called at the same time on CPU1 and CPU2,
781 * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
782 * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
783 * kdump fails. So split out the panic_smp_self_stop() and add
784 * set_cpu_online(smp_processor_id(), false).
786 void panic_smp_self_stop(void)
788 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
790 set_cpu_online(smp_processor_id(), false);
798 int setup_profiling_timer(unsigned int multiplier)
803 #ifdef CONFIG_CPU_FREQ
805 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
806 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
807 static unsigned long global_l_p_j_ref;
808 static unsigned long global_l_p_j_ref_freq;
810 static int cpufreq_callback(struct notifier_block *nb,
811 unsigned long val, void *data)
813 struct cpufreq_freqs *freq = data;
814 struct cpumask *cpus = freq->policy->cpus;
815 int cpu, first = cpumask_first(cpus);
818 if (freq->flags & CPUFREQ_CONST_LOOPS)
821 if (!per_cpu(l_p_j_ref, first)) {
822 for_each_cpu(cpu, cpus) {
823 per_cpu(l_p_j_ref, cpu) =
824 per_cpu(cpu_data, cpu).loops_per_jiffy;
825 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
828 if (!global_l_p_j_ref) {
829 global_l_p_j_ref = loops_per_jiffy;
830 global_l_p_j_ref_freq = freq->old;
834 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
835 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
836 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
837 global_l_p_j_ref_freq,
840 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
841 per_cpu(l_p_j_ref_freq, first), freq->new);
842 for_each_cpu(cpu, cpus)
843 per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
848 static struct notifier_block cpufreq_notifier = {
849 .notifier_call = cpufreq_callback,
852 static int __init register_cpufreq_notifier(void)
854 return cpufreq_register_notifier(&cpufreq_notifier,
855 CPUFREQ_TRANSITION_NOTIFIER);
857 core_initcall(register_cpufreq_notifier);
861 static void raise_nmi(cpumask_t *mask)
863 __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
866 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
868 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);