1 // SPDX-License-Identifier: GPL-2.0-only
3 * SMP initialisation and IPI support
4 * Based on arch/arm/kernel/smp.c
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/acpi.h>
10 #include <linux/arm_sdei.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/interrupt.h>
18 #include <linux/cache.h>
19 #include <linux/profile.h>
20 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/cpu.h>
24 #include <linux/smp.h>
25 #include <linux/seq_file.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/arm-gic-v3.h>
28 #include <linux/percpu.h>
29 #include <linux/clockchips.h>
30 #include <linux/completion.h>
32 #include <linux/irq_work.h>
33 #include <linux/kernel_stat.h>
34 #include <linux/kexec.h>
35 #include <linux/kvm_host.h>
37 #include <asm/alternative.h>
38 #include <asm/atomic.h>
39 #include <asm/cacheflush.h>
41 #include <asm/cputype.h>
42 #include <asm/cpu_ops.h>
43 #include <asm/daifflags.h>
44 #include <asm/kvm_mmu.h>
45 #include <asm/mmu_context.h>
47 #include <asm/processor.h>
48 #include <asm/smp_plat.h>
49 #include <asm/sections.h>
50 #include <asm/tlbflush.h>
51 #include <asm/ptrace.h>
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ipi.h>
57 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
58 EXPORT_PER_CPU_SYMBOL(cpu_number);
61 * as from 2.5, kernels no longer have an init_tasks structure
62 * so we need some other way of telling a new secondary core
63 * where to place its SVC stack
65 struct secondary_data secondary_data;
66 /* Number of CPUs which aren't online, but looping in kernel text. */
67 static int cpus_stuck_in_kernel;
80 static int ipi_irq_base __read_mostly;
81 static int nr_ipi __read_mostly = NR_IPI;
82 static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
84 static void ipi_setup(int cpu);
86 #ifdef CONFIG_HOTPLUG_CPU
87 static void ipi_teardown(int cpu);
88 static int op_cpu_kill(unsigned int cpu);
90 static inline int op_cpu_kill(unsigned int cpu)
98 * Boot a secondary CPU, and assign it the specified idle task.
99 * This also gives us the initial stack to use for this CPU.
101 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
103 const struct cpu_operations *ops = get_cpu_ops(cpu);
106 return ops->cpu_boot(cpu);
111 static DECLARE_COMPLETION(cpu_running);
113 int __cpu_up(unsigned int cpu, struct task_struct *idle)
119 * We need to tell the secondary core where to find its stack and the
122 secondary_data.task = idle;
123 secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
124 update_cpu_boot_status(CPU_MMU_OFF);
125 dcache_clean_inval_poc((unsigned long)&secondary_data,
126 (unsigned long)&secondary_data +
127 sizeof(secondary_data));
129 /* Now bring the CPU into our world */
130 ret = boot_secondary(cpu, idle);
132 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
137 * CPU was successfully started, wait for it to come online or
140 wait_for_completion_timeout(&cpu_running,
141 msecs_to_jiffies(5000));
145 pr_crit("CPU%u: failed to come online\n", cpu);
146 secondary_data.task = NULL;
147 secondary_data.stack = NULL;
148 dcache_clean_inval_poc((unsigned long)&secondary_data,
149 (unsigned long)&secondary_data +
150 sizeof(secondary_data));
151 status = READ_ONCE(secondary_data.status);
152 if (status == CPU_MMU_OFF)
153 status = READ_ONCE(__early_cpu_boot_status);
155 switch (status & CPU_BOOT_STATUS_MASK) {
157 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
159 cpus_stuck_in_kernel++;
162 if (!op_cpu_kill(cpu)) {
163 pr_crit("CPU%u: died during early boot\n", cpu);
166 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
168 case CPU_STUCK_IN_KERNEL:
169 pr_crit("CPU%u: is stuck in kernel\n", cpu);
170 if (status & CPU_STUCK_REASON_52_BIT_VA)
171 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
172 if (status & CPU_STUCK_REASON_NO_GRAN) {
173 pr_crit("CPU%u: does not support %luK granule\n",
174 cpu, PAGE_SIZE / SZ_1K);
176 cpus_stuck_in_kernel++;
178 case CPU_PANIC_KERNEL:
179 panic("CPU%u detected unsupported configuration\n", cpu);
185 static void init_gic_priority_masking(void)
189 if (WARN_ON(!gic_enable_sre()))
192 cpuflags = read_sysreg(daif);
194 WARN_ON(!(cpuflags & PSR_I_BIT));
195 WARN_ON(!(cpuflags & PSR_F_BIT));
197 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
201 * This is the secondary CPU boot entry. We're using this CPUs
202 * idle thread stack, but a set of temporary page tables.
204 asmlinkage notrace void secondary_start_kernel(void)
206 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
207 struct mm_struct *mm = &init_mm;
208 const struct cpu_operations *ops;
211 cpu = task_cpu(current);
212 set_my_cpu_offset(per_cpu_offset(cpu));
215 * All kernel threads share the same mm context; grab a
216 * reference and switch to it.
219 current->active_mm = mm;
222 * TTBR0 is only used for the identity mapping at this stage. Make it
223 * point to zero page to avoid speculatively fetching new entries.
225 cpu_uninstall_idmap();
227 if (system_uses_irq_prio_masking())
228 init_gic_priority_masking();
230 rcu_cpu_starting(cpu);
232 trace_hardirqs_off();
235 * If the system has established the capabilities, make sure
236 * this CPU ticks all of those. If it doesn't, the CPU will
237 * fail to come online.
239 check_local_cpu_capabilities();
241 ops = get_cpu_ops(cpu);
242 if (ops->cpu_postboot)
246 * Log the CPU info before it is marked online and might get read.
251 * Enable GIC and timers.
253 notify_cpu_starting(cpu);
257 store_cpu_topology(cpu);
261 * OK, now it's safe to let the boot CPU continue. Wait for
262 * the CPU migration code to notice that the CPU is online
263 * before we continue.
265 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
266 cpu, (unsigned long)mpidr,
268 update_cpu_boot_status(CPU_BOOT_SUCCESS);
269 set_cpu_online(cpu, true);
270 complete(&cpu_running);
272 local_daif_restore(DAIF_PROCCTX);
275 * OK, it's off to the idle thread for us
277 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
280 #ifdef CONFIG_HOTPLUG_CPU
281 static int op_cpu_disable(unsigned int cpu)
283 const struct cpu_operations *ops = get_cpu_ops(cpu);
286 * If we don't have a cpu_die method, abort before we reach the point
287 * of no return. CPU0 may not have an cpu_ops, so test for it.
289 if (!ops || !ops->cpu_die)
293 * We may need to abort a hot unplug for some other mechanism-specific
296 if (ops->cpu_disable)
297 return ops->cpu_disable(cpu);
303 * __cpu_disable runs on the processor to be shutdown.
305 int __cpu_disable(void)
307 unsigned int cpu = smp_processor_id();
310 ret = op_cpu_disable(cpu);
314 remove_cpu_topology(cpu);
315 numa_remove_cpu(cpu);
318 * Take this CPU offline. Once we clear this, we can't return,
319 * and we must not schedule until we're ready to give up the cpu.
321 set_cpu_online(cpu, false);
325 * OK - migrate IRQs away from this CPU
327 irq_migrate_all_off_this_cpu();
332 static int op_cpu_kill(unsigned int cpu)
334 const struct cpu_operations *ops = get_cpu_ops(cpu);
337 * If we have no means of synchronising with the dying CPU, then assume
338 * that it is really dead. We can only wait for an arbitrary length of
339 * time and hope that it's dead, so let's skip the wait and just hope.
344 return ops->cpu_kill(cpu);
348 * called on the thread which is asking for a CPU to be shutdown -
349 * waits until shutdown has completed, or it is timed out.
351 void __cpu_die(unsigned int cpu)
355 if (!cpu_wait_death(cpu, 5)) {
356 pr_crit("CPU%u: cpu didn't die\n", cpu);
359 pr_notice("CPU%u: shutdown\n", cpu);
362 * Now that the dying CPU is beyond the point of no return w.r.t.
363 * in-kernel synchronisation, try to get the firwmare to help us to
364 * verify that it has really left the kernel before we consider
365 * clobbering anything it might still be using.
367 err = op_cpu_kill(cpu);
369 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
373 * Called from the idle thread for the CPU which has been shutdown.
378 unsigned int cpu = smp_processor_id();
379 const struct cpu_operations *ops = get_cpu_ops(cpu);
385 /* Tell __cpu_die() that this CPU is now safe to dispose of */
386 (void)cpu_report_death();
389 * Actually shutdown the CPU. This must never fail. The specific hotplug
390 * mechanism must perform all required cache maintenance to ensure that
391 * no dirty lines are lost in the process of shutting down the CPU.
399 static void __cpu_try_die(int cpu)
401 #ifdef CONFIG_HOTPLUG_CPU
402 const struct cpu_operations *ops = get_cpu_ops(cpu);
404 if (ops && ops->cpu_die)
410 * Kill the calling secondary CPU, early in bringup before it is turned
413 void cpu_die_early(void)
415 int cpu = smp_processor_id();
417 pr_crit("CPU%d: will not boot\n", cpu);
419 /* Mark this CPU absent */
420 set_cpu_present(cpu, 0);
421 rcu_report_dead(cpu);
423 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
424 update_cpu_boot_status(CPU_KILL_ME);
428 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
433 static void __init hyp_mode_check(void)
435 if (is_hyp_mode_available())
436 pr_info("CPU: All CPU(s) started at EL2\n");
437 else if (is_hyp_mode_mismatched())
438 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
439 "CPU: CPUs started in inconsistent modes");
441 pr_info("CPU: All CPU(s) started at EL1\n");
442 if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
443 kvm_compute_layout();
444 kvm_apply_hyp_relocations();
448 void __init smp_cpus_done(unsigned int max_cpus)
450 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
451 setup_cpu_features();
453 apply_alternatives_all();
454 mark_linear_text_alias_ro();
457 void __init smp_prepare_boot_cpu(void)
459 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
460 cpuinfo_store_boot_cpu();
463 * We now know enough about the boot CPU to apply the
464 * alternatives that cannot wait until interrupt handling
465 * and/or scheduling is enabled.
467 apply_boot_alternatives();
469 /* Conditionally switch to GIC PMR for interrupt masking */
470 if (system_uses_irq_prio_masking())
471 init_gic_priority_masking();
473 kasan_init_hw_tags();
476 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
482 * A cpu node with missing "reg" property is
483 * considered invalid to build a cpu_logical_map
486 cell = of_get_property(dn, "reg", NULL);
488 pr_err("%pOF: missing reg property\n", dn);
492 hwid = of_read_number(cell, of_n_addr_cells(dn));
494 * Non affinity bits must be set to 0 in the DT
496 if (hwid & ~MPIDR_HWID_BITMASK) {
497 pr_err("%pOF: invalid reg property\n", dn);
504 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
505 * entries and check for duplicates. If any is found just ignore the
506 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
507 * matching valid MPIDR values.
509 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
513 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
514 if (cpu_logical_map(i) == hwid)
520 * Initialize cpu operations for a logical cpu and
521 * set it in the possible mask on success
523 static int __init smp_cpu_setup(int cpu)
525 const struct cpu_operations *ops;
527 if (init_cpu_ops(cpu))
530 ops = get_cpu_ops(cpu);
531 if (ops->cpu_init(cpu))
534 set_cpu_possible(cpu, true);
539 static bool bootcpu_valid __initdata;
540 static unsigned int cpu_count = 1;
543 static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
545 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
547 return &cpu_madt_gicc[cpu];
551 * acpi_map_gic_cpu_interface - parse processor MADT entry
553 * Carry out sanity checks on MADT processor entry and initialize
554 * cpu_logical_map on success
557 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
559 u64 hwid = processor->arm_mpidr;
561 if (!(processor->flags & ACPI_MADT_ENABLED)) {
562 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
566 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
567 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
571 if (is_mpidr_duplicate(cpu_count, hwid)) {
572 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
576 /* Check if GICC structure of boot CPU is available in the MADT */
577 if (cpu_logical_map(0) == hwid) {
579 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
583 bootcpu_valid = true;
584 cpu_madt_gicc[0] = *processor;
588 if (cpu_count >= NR_CPUS)
591 /* map the logical cpu id to cpu MPIDR */
592 set_cpu_logical_map(cpu_count, hwid);
594 cpu_madt_gicc[cpu_count] = *processor;
597 * Set-up the ACPI parking protocol cpu entries
598 * while initializing the cpu_logical_map to
599 * avoid parsing MADT entries multiple times for
600 * nothing (ie a valid cpu_logical_map entry should
601 * contain a valid parking protocol data set to
602 * initialize the cpu if the parking protocol is
603 * the only available enable method).
605 acpi_set_mailbox_entry(cpu_count, processor);
611 acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
612 const unsigned long end)
614 struct acpi_madt_generic_interrupt *processor;
616 processor = (struct acpi_madt_generic_interrupt *)header;
617 if (BAD_MADT_GICC_ENTRY(processor, end))
620 acpi_table_print_madt_entry(&header->common);
622 acpi_map_gic_cpu_interface(processor);
627 static void __init acpi_parse_and_init_cpus(void)
632 * do a walk of MADT to determine how many CPUs
633 * we have including disabled CPUs, and get information
634 * we need for SMP init.
636 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
637 acpi_parse_gic_cpu_interface, 0);
640 * In ACPI, SMP and CPU NUMA information is provided in separate
641 * static tables, namely the MADT and the SRAT.
643 * Thus, it is simpler to first create the cpu logical map through
644 * an MADT walk and then map the logical cpus to their node ids
647 acpi_map_cpus_to_nodes();
649 for (i = 0; i < nr_cpu_ids; i++)
650 early_map_cpu_to_node(i, acpi_numa_get_nid(i));
653 #define acpi_parse_and_init_cpus(...) do { } while (0)
657 * Enumerate the possible CPU set from the device tree and build the
658 * cpu logical map array containing MPIDR values related to logical
659 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
661 static void __init of_parse_and_init_cpus(void)
663 struct device_node *dn;
665 for_each_of_cpu_node(dn) {
666 u64 hwid = of_get_cpu_mpidr(dn);
668 if (hwid == INVALID_HWID)
671 if (is_mpidr_duplicate(cpu_count, hwid)) {
672 pr_err("%pOF: duplicate cpu reg properties in the DT\n",
678 * The numbering scheme requires that the boot CPU
679 * must be assigned logical id 0. Record it so that
680 * the logical map built from DT is validated and can
683 if (hwid == cpu_logical_map(0)) {
685 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
690 bootcpu_valid = true;
691 early_map_cpu_to_node(0, of_node_to_nid(dn));
694 * cpu_logical_map has already been
695 * initialized and the boot cpu doesn't need
696 * the enable-method so continue without
702 if (cpu_count >= NR_CPUS)
705 pr_debug("cpu logical map 0x%llx\n", hwid);
706 set_cpu_logical_map(cpu_count, hwid);
708 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
715 * Enumerate the possible CPU set from the device tree or ACPI and build the
716 * cpu logical map array containing MPIDR values related to logical
717 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
719 void __init smp_init_cpus(void)
724 of_parse_and_init_cpus();
726 acpi_parse_and_init_cpus();
728 if (cpu_count > nr_cpu_ids)
729 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
730 cpu_count, nr_cpu_ids);
732 if (!bootcpu_valid) {
733 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
738 * We need to set the cpu_logical_map entries before enabling
739 * the cpus so that cpu processor description entries (DT cpu nodes
740 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
741 * with entries in cpu_logical_map while initializing the cpus.
742 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
744 for (i = 1; i < nr_cpu_ids; i++) {
745 if (cpu_logical_map(i) != INVALID_HWID) {
746 if (smp_cpu_setup(i))
747 set_cpu_logical_map(i, INVALID_HWID);
752 void __init smp_prepare_cpus(unsigned int max_cpus)
754 const struct cpu_operations *ops;
757 unsigned int this_cpu;
761 this_cpu = smp_processor_id();
762 store_cpu_topology(this_cpu);
763 numa_store_cpu_info(this_cpu);
764 numa_add_cpu(this_cpu);
767 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
768 * secondary CPUs present.
774 * Initialise the present map (which describes the set of CPUs
775 * actually populated at the present time) and release the
776 * secondaries from the bootloader.
778 for_each_possible_cpu(cpu) {
780 per_cpu(cpu_number, cpu) = cpu;
782 if (cpu == smp_processor_id())
785 ops = get_cpu_ops(cpu);
789 err = ops->cpu_prepare(cpu);
793 set_cpu_present(cpu, true);
794 numa_store_cpu_info(cpu);
798 static const char *ipi_types[NR_IPI] __tracepoint_string = {
799 [IPI_RESCHEDULE] = "Rescheduling interrupts",
800 [IPI_CALL_FUNC] = "Function call interrupts",
801 [IPI_CPU_STOP] = "CPU stop interrupts",
802 [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
803 [IPI_TIMER] = "Timer broadcast interrupts",
804 [IPI_IRQ_WORK] = "IRQ work interrupts",
805 [IPI_WAKEUP] = "CPU wake-up interrupts",
808 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
810 unsigned long irq_err_count;
812 int arch_show_interrupts(struct seq_file *p, int prec)
816 for (i = 0; i < NR_IPI; i++) {
817 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
818 prec >= 4 ? " " : "");
819 for_each_online_cpu(cpu)
820 seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
821 seq_printf(p, " %s\n", ipi_types[i]);
824 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
828 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
830 smp_cross_call(mask, IPI_CALL_FUNC);
833 void arch_send_call_function_single_ipi(int cpu)
835 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
838 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
839 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
841 smp_cross_call(mask, IPI_WAKEUP);
845 #ifdef CONFIG_IRQ_WORK
846 void arch_irq_work_raise(void)
848 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
852 static void local_cpu_stop(void)
854 set_cpu_online(smp_processor_id(), false);
857 sdei_mask_local_cpu();
862 * We need to implement panic_smp_self_stop() for parallel panic() calls, so
863 * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
864 * CPUs that have already stopped themselves.
866 void panic_smp_self_stop(void)
871 #ifdef CONFIG_KEXEC_CORE
872 static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
875 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
877 #ifdef CONFIG_KEXEC_CORE
878 crash_save_cpu(regs, cpu);
880 atomic_dec(&waiting_for_crash_ipi);
883 sdei_mask_local_cpu();
885 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
894 * Main handler for inter-processor interrupts
896 static void do_handle_IPI(int ipinr)
898 unsigned int cpu = smp_processor_id();
900 if ((unsigned)ipinr < NR_IPI)
901 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
909 generic_smp_call_function_interrupt();
916 case IPI_CPU_CRASH_STOP:
917 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
918 ipi_cpu_crash_stop(cpu, get_irq_regs());
924 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
926 tick_receive_broadcast();
930 #ifdef CONFIG_IRQ_WORK
936 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
938 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
939 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
945 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
949 if ((unsigned)ipinr < NR_IPI)
950 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
953 static irqreturn_t ipi_handler(int irq, void *data)
955 do_handle_IPI(irq - ipi_irq_base);
959 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
961 trace_ipi_raise(target, ipi_types[ipinr]);
962 __ipi_send_mask(ipi_desc[ipinr], target);
965 static void ipi_setup(int cpu)
969 if (WARN_ON_ONCE(!ipi_irq_base))
972 for (i = 0; i < nr_ipi; i++)
973 enable_percpu_irq(ipi_irq_base + i, 0);
976 #ifdef CONFIG_HOTPLUG_CPU
977 static void ipi_teardown(int cpu)
981 if (WARN_ON_ONCE(!ipi_irq_base))
984 for (i = 0; i < nr_ipi; i++)
985 disable_percpu_irq(ipi_irq_base + i);
989 void __init set_smp_ipi_range(int ipi_base, int n)
994 nr_ipi = min(n, NR_IPI);
996 for (i = 0; i < nr_ipi; i++) {
999 err = request_percpu_irq(ipi_base + i, ipi_handler,
1000 "IPI", &cpu_number);
1003 ipi_desc[i] = irq_to_desc(ipi_base + i);
1004 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
1007 ipi_irq_base = ipi_base;
1009 /* Setup the boot CPU immediately */
1010 ipi_setup(smp_processor_id());
1013 void smp_send_reschedule(int cpu)
1015 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
1018 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1019 void tick_broadcast(const struct cpumask *mask)
1021 smp_cross_call(mask, IPI_TIMER);
1026 * The number of CPUs online, not counting this CPU (which may not be
1027 * fully online and so not counted in num_online_cpus()).
1029 static inline unsigned int num_other_online_cpus(void)
1031 unsigned int this_cpu_online = cpu_online(smp_processor_id());
1033 return num_online_cpus() - this_cpu_online;
1036 void smp_send_stop(void)
1038 unsigned long timeout;
1040 if (num_other_online_cpus()) {
1043 cpumask_copy(&mask, cpu_online_mask);
1044 cpumask_clear_cpu(smp_processor_id(), &mask);
1046 if (system_state <= SYSTEM_RUNNING)
1047 pr_crit("SMP: stopping secondary CPUs\n");
1048 smp_cross_call(&mask, IPI_CPU_STOP);
1051 /* Wait up to one second for other CPUs to stop */
1052 timeout = USEC_PER_SEC;
1053 while (num_other_online_cpus() && timeout--)
1056 if (num_other_online_cpus())
1057 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1058 cpumask_pr_args(cpu_online_mask));
1060 sdei_mask_local_cpu();
1063 #ifdef CONFIG_KEXEC_CORE
1064 void crash_smp_send_stop(void)
1066 static int cpus_stopped;
1068 unsigned long timeout;
1071 * This function can be called twice in panic path, but obviously
1072 * we execute this only once.
1080 * If this cpu is the only one alive at this point in time, online or
1081 * not, there are no stop messages to be sent around, so just back out.
1083 if (num_other_online_cpus() == 0) {
1084 sdei_mask_local_cpu();
1088 cpumask_copy(&mask, cpu_online_mask);
1089 cpumask_clear_cpu(smp_processor_id(), &mask);
1091 atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
1093 pr_crit("SMP: stopping secondary CPUs\n");
1094 smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
1096 /* Wait up to one second for other CPUs to stop */
1097 timeout = USEC_PER_SEC;
1098 while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
1101 if (atomic_read(&waiting_for_crash_ipi) > 0)
1102 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1103 cpumask_pr_args(&mask));
1105 sdei_mask_local_cpu();
1108 bool smp_crash_stop_failed(void)
1110 return (atomic_read(&waiting_for_crash_ipi) > 0);
1115 * not supported here
1117 int setup_profiling_timer(unsigned int multiplier)
1122 static bool have_cpu_die(void)
1124 #ifdef CONFIG_HOTPLUG_CPU
1125 int any_cpu = raw_smp_processor_id();
1126 const struct cpu_operations *ops = get_cpu_ops(any_cpu);
1128 if (ops && ops->cpu_die)
1134 bool cpus_are_stuck_in_kernel(void)
1136 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1138 return !!cpus_stuck_in_kernel || smp_spin_tables;