4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
36 #include <linux/processor.h>
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
45 #include <asm/pgtable.h>
49 #include <asm/machdep.h>
50 #include <asm/cputhreads.h>
51 #include <asm/cputable.h>
53 #include <asm/vdso_datapage.h>
58 #include <asm/debug.h>
59 #include <asm/kexec.h>
60 #include <asm/asm-prototypes.h>
61 #include <asm/cpu_has_feature.h>
65 #define DBG(fmt...) udbg_printf(fmt)
70 #ifdef CONFIG_HOTPLUG_CPU
71 /* State of each CPU during hotplug phases */
72 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
75 struct thread_info *secondary_ti;
77 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
78 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
80 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
81 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
83 /* SMP operations for this machine */
84 struct smp_ops_t *smp_ops;
86 /* Can't be static due to PowerMac hackery */
87 volatile unsigned int cpu_callin_map[NR_CPUS];
89 int smt_enabled_at_boot = 1;
92 * Returns 1 if the specified cpu should be brought up during boot.
93 * Used to inhibit booting threads if they've been disabled or
94 * limited on the command line
96 int smp_generic_cpu_bootable(unsigned int nr)
98 /* Special case - we inhibit secondary thread startup
99 * during boot if the user requests it.
101 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
102 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
104 if (smt_enabled_at_boot
105 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
114 int smp_generic_kick_cpu(int nr)
116 if (nr < 0 || nr >= nr_cpu_ids)
120 * The processor is currently spinning, waiting for the
121 * cpu_start field to become non-zero After we set cpu_start,
122 * the processor will continue on to secondary_start
124 if (!paca[nr].cpu_start) {
125 paca[nr].cpu_start = 1;
130 #ifdef CONFIG_HOTPLUG_CPU
132 * Ok it's not there, so it might be soft-unplugged, let's
133 * try to bring it back
135 generic_set_cpu_up(nr);
137 smp_send_reschedule(nr);
138 #endif /* CONFIG_HOTPLUG_CPU */
142 #endif /* CONFIG_PPC64 */
144 static irqreturn_t call_function_action(int irq, void *data)
146 generic_smp_call_function_interrupt();
150 static irqreturn_t reschedule_action(int irq, void *data)
156 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
158 tick_broadcast_ipi_handler();
162 #ifdef CONFIG_NMI_IPI
163 static irqreturn_t nmi_ipi_action(int irq, void *data)
165 smp_handle_nmi_ipi(get_irq_regs());
170 static irq_handler_t smp_ipi_action[] = {
171 [PPC_MSG_CALL_FUNCTION] = call_function_action,
172 [PPC_MSG_RESCHEDULE] = reschedule_action,
173 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
174 #ifdef CONFIG_NMI_IPI
175 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
180 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
181 * than going through the call function infrastructure, and strongly
182 * serialized, so it is more appropriate for debugging.
184 const char *smp_ipi_name[] = {
185 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
186 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
187 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
188 [PPC_MSG_NMI_IPI] = "nmi ipi",
191 /* optional function to request ipi, for controllers with >= 4 ipis */
192 int smp_request_message_ipi(int virq, int msg)
196 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
198 #ifndef CONFIG_NMI_IPI
199 if (msg == PPC_MSG_NMI_IPI)
203 err = request_irq(virq, smp_ipi_action[msg],
204 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
205 smp_ipi_name[msg], NULL);
206 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
207 virq, smp_ipi_name[msg], err);
212 #ifdef CONFIG_PPC_SMP_MUXED_IPI
213 struct cpu_messages {
214 long messages; /* current messages */
216 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
218 void smp_muxed_ipi_set_message(int cpu, int msg)
220 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
221 char *message = (char *)&info->messages;
224 * Order previous accesses before accesses in the IPI handler.
230 void smp_muxed_ipi_message_pass(int cpu, int msg)
232 smp_muxed_ipi_set_message(cpu, msg);
235 * cause_ipi functions are required to include a full barrier
236 * before doing whatever causes the IPI.
238 smp_ops->cause_ipi(cpu);
241 #ifdef __BIG_ENDIAN__
242 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
244 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
247 irqreturn_t smp_ipi_demux(void)
249 mb(); /* order any irq clear */
251 return smp_ipi_demux_relaxed();
254 /* sync-free variant. Callers should ensure synchronization */
255 irqreturn_t smp_ipi_demux_relaxed(void)
257 struct cpu_messages *info;
260 info = this_cpu_ptr(&ipi_message);
262 all = xchg(&info->messages, 0);
263 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
265 * Must check for PPC_MSG_RM_HOST_ACTION messages
266 * before PPC_MSG_CALL_FUNCTION messages because when
267 * a VM is destroyed, we call kick_all_cpus_sync()
268 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
269 * messages have completed before we free any VCPUs.
271 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
272 kvmppc_xics_ipi_action();
274 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
275 generic_smp_call_function_interrupt();
276 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
278 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
279 tick_broadcast_ipi_handler();
280 #ifdef CONFIG_NMI_IPI
281 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
282 nmi_ipi_action(0, NULL);
284 } while (info->messages);
288 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
290 static inline void do_message_pass(int cpu, int msg)
292 if (smp_ops->message_pass)
293 smp_ops->message_pass(cpu, msg);
294 #ifdef CONFIG_PPC_SMP_MUXED_IPI
296 smp_muxed_ipi_message_pass(cpu, msg);
300 void smp_send_reschedule(int cpu)
303 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
305 EXPORT_SYMBOL_GPL(smp_send_reschedule);
307 void arch_send_call_function_single_ipi(int cpu)
309 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
312 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
316 for_each_cpu(cpu, mask)
317 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
320 #ifdef CONFIG_NMI_IPI
325 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
326 * a running system. They can be used for crash, debug, halt/reboot, etc.
328 * NMI IPIs are globally single threaded. No more than one in progress at
331 * The IPI call waits with interrupts disabled until all targets enter the
332 * NMI handler, then the call returns.
334 * No new NMI can be initiated until targets exit the handler.
336 * The IPI call may time out without all targets entering the NMI handler.
337 * In that case, there is some logic to recover (and ignore subsequent
338 * NMI interrupts that may eventually be raised), but the platform interrupt
339 * handler may not be able to distinguish this from other exception causes,
340 * which may cause a crash.
343 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
344 static struct cpumask nmi_ipi_pending_mask;
345 static int nmi_ipi_busy_count = 0;
346 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
348 static void nmi_ipi_lock_start(unsigned long *flags)
350 raw_local_irq_save(*flags);
352 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
353 raw_local_irq_restore(*flags);
355 raw_local_irq_save(*flags);
360 static void nmi_ipi_lock(void)
362 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
366 static void nmi_ipi_unlock(void)
369 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
370 atomic_set(&__nmi_ipi_lock, 0);
373 static void nmi_ipi_unlock_end(unsigned long *flags)
376 raw_local_irq_restore(*flags);
380 * Platform NMI handler calls this to ack
382 int smp_handle_nmi_ipi(struct pt_regs *regs)
384 void (*fn)(struct pt_regs *);
386 int me = raw_smp_processor_id();
390 * Unexpected NMIs are possible here because the interrupt may not
391 * be able to distinguish NMI IPIs from other types of NMIs, or
392 * because the caller may have timed out.
394 nmi_ipi_lock_start(&flags);
395 if (!nmi_ipi_busy_count)
397 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
400 fn = nmi_ipi_function;
404 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
405 nmi_ipi_busy_count++;
413 nmi_ipi_busy_count--;
415 nmi_ipi_unlock_end(&flags);
420 static void do_smp_send_nmi_ipi(int cpu)
422 if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
426 do_message_pass(cpu, PPC_MSG_NMI_IPI);
430 for_each_online_cpu(c) {
431 if (c == raw_smp_processor_id())
433 do_message_pass(c, PPC_MSG_NMI_IPI);
439 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
440 * - fn is the target callback function.
441 * - delay_us > 0 is the delay before giving up waiting for targets to
442 * enter the handler, == 0 specifies indefinite delay.
444 static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
447 int me = raw_smp_processor_id();
451 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
453 if (unlikely(!smp_ops))
456 /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
457 nmi_ipi_lock_start(&flags);
458 while (nmi_ipi_busy_count) {
459 nmi_ipi_unlock_end(&flags);
461 nmi_ipi_lock_start(&flags);
464 nmi_ipi_function = fn;
468 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
469 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
471 /* cpumask starts clear */
472 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
474 nmi_ipi_busy_count++;
477 do_smp_send_nmi_ipi(cpu);
479 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
489 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
490 /* Could not gather all CPUs */
492 cpumask_clear(&nmi_ipi_pending_mask);
494 nmi_ipi_busy_count--;
495 nmi_ipi_unlock_end(&flags);
499 #endif /* CONFIG_NMI_IPI */
501 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
502 void tick_broadcast(const struct cpumask *mask)
506 for_each_cpu(cpu, mask)
507 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
511 #ifdef CONFIG_DEBUGGER
512 void debugger_ipi_callback(struct pt_regs *regs)
517 void smp_send_debugger_break(void)
519 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
523 #ifdef CONFIG_KEXEC_CORE
524 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
526 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
530 static void stop_this_cpu(void *dummy)
532 /* Remove this CPU */
533 set_cpu_online(smp_processor_id(), false);
540 void smp_send_stop(void)
542 smp_call_function(stop_this_cpu, NULL, 0);
545 struct thread_info *current_set[NR_CPUS];
547 static void smp_store_cpu_info(int id)
549 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
550 #ifdef CONFIG_PPC_FSL_BOOK3E
551 per_cpu(next_tlbcam_idx, id)
552 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
556 void __init smp_prepare_cpus(unsigned int max_cpus)
560 DBG("smp_prepare_cpus\n");
563 * setup_cpu may need to be called on the boot cpu. We havent
564 * spun any cpus up but lets be paranoid.
566 BUG_ON(boot_cpuid != smp_processor_id());
569 smp_store_cpu_info(boot_cpuid);
570 cpu_callin_map[boot_cpuid] = 1;
572 for_each_possible_cpu(cpu) {
573 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
574 GFP_KERNEL, cpu_to_node(cpu));
575 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
576 GFP_KERNEL, cpu_to_node(cpu));
578 * numa_node_id() works after this.
580 if (cpu_present(cpu)) {
581 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
582 set_cpu_numa_mem(cpu,
583 local_memory_node(numa_cpu_lookup_table[cpu]));
587 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
588 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
590 if (smp_ops && smp_ops->probe)
594 void smp_prepare_boot_cpu(void)
596 BUG_ON(smp_processor_id() != boot_cpuid);
598 paca[boot_cpuid].__current = current;
600 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
601 current_set[boot_cpuid] = task_thread_info(current);
604 #ifdef CONFIG_HOTPLUG_CPU
606 int generic_cpu_disable(void)
608 unsigned int cpu = smp_processor_id();
610 if (cpu == boot_cpuid)
613 set_cpu_online(cpu, false);
615 vdso_data->processorCount--;
617 /* Update affinity of all IRQs previously aimed at this CPU */
618 irq_migrate_all_off_this_cpu();
621 * Depending on the details of the interrupt controller, it's possible
622 * that one of the interrupts we just migrated away from this CPU is
623 * actually already pending on this CPU. If we leave it in that state
624 * the interrupt will never be EOI'ed, and will never fire again. So
625 * temporarily enable interrupts here, to allow any pending interrupt to
626 * be received (and EOI'ed), before we take this CPU offline.
635 void generic_cpu_die(unsigned int cpu)
639 for (i = 0; i < 100; i++) {
641 if (is_cpu_dead(cpu))
645 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
648 void generic_set_cpu_dead(unsigned int cpu)
650 per_cpu(cpu_state, cpu) = CPU_DEAD;
654 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
655 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
656 * which makes the delay in generic_cpu_die() not happen.
658 void generic_set_cpu_up(unsigned int cpu)
660 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
663 int generic_check_cpu_restart(unsigned int cpu)
665 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
668 int is_cpu_dead(unsigned int cpu)
670 return per_cpu(cpu_state, cpu) == CPU_DEAD;
673 static bool secondaries_inhibited(void)
675 return kvm_hv_mode_active();
678 #else /* HOTPLUG_CPU */
680 #define secondaries_inhibited() 0
684 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
686 struct thread_info *ti = task_thread_info(idle);
689 paca[cpu].__current = idle;
690 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
693 secondary_ti = current_set[cpu] = ti;
696 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
701 * Don't allow secondary threads to come online if inhibited
703 if (threads_per_core > 1 && secondaries_inhibited() &&
704 cpu_thread_in_subcore(cpu))
707 if (smp_ops == NULL ||
708 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
711 cpu_idle_thread_init(cpu, tidle);
714 * The platform might need to allocate resources prior to bringing
717 if (smp_ops->prepare_cpu) {
718 rc = smp_ops->prepare_cpu(cpu);
723 /* Make sure callin-map entry is 0 (can be leftover a CPU
726 cpu_callin_map[cpu] = 0;
728 /* The information for processor bringup must
729 * be written out to main store before we release
735 DBG("smp: kicking cpu %d\n", cpu);
736 rc = smp_ops->kick_cpu(cpu);
738 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
743 * wait to see if the cpu made a callin (is actually up).
744 * use this value that I found through experimentation.
747 if (system_state < SYSTEM_RUNNING)
748 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
750 #ifdef CONFIG_HOTPLUG_CPU
753 * CPUs can take much longer to come up in the
754 * hotplug case. Wait five seconds.
756 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
760 if (!cpu_callin_map[cpu]) {
761 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
765 DBG("Processor %u found.\n", cpu);
767 if (smp_ops->give_timebase)
768 smp_ops->give_timebase();
770 /* Wait until cpu puts itself in the online & active maps */
771 spin_until_cond(cpu_online(cpu));
776 /* Return the value of the reg property corresponding to the given
779 int cpu_to_core_id(int cpu)
781 struct device_node *np;
785 np = of_get_cpu_node(cpu, NULL);
789 reg = of_get_property(np, "reg", NULL);
793 id = be32_to_cpup(reg);
798 EXPORT_SYMBOL_GPL(cpu_to_core_id);
800 /* Helper routines for cpu to core mapping */
801 int cpu_core_index_of_thread(int cpu)
803 return cpu >> threads_shift;
805 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
807 int cpu_first_thread_of_core(int core)
809 return core << threads_shift;
811 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
813 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
815 const struct cpumask *mask;
816 struct device_node *np;
820 mask = add ? cpu_online_mask : cpu_present_mask;
821 for_each_cpu(i, mask) {
822 np = of_get_cpu_node(i, NULL);
825 prop = of_get_property(np, "ibm,chip-id", &plen);
826 if (prop && plen == sizeof(int) &&
827 of_read_number(prop, 1) == chipid) {
829 cpumask_set_cpu(cpu, cpu_core_mask(i));
830 cpumask_set_cpu(i, cpu_core_mask(cpu));
832 cpumask_clear_cpu(cpu, cpu_core_mask(i));
833 cpumask_clear_cpu(i, cpu_core_mask(cpu));
840 /* Must be called when no change can occur to cpu_present_mask,
841 * i.e. during cpu online or offline.
843 static struct device_node *cpu_to_l2cache(int cpu)
845 struct device_node *np;
846 struct device_node *cache;
848 if (!cpu_present(cpu))
851 np = of_get_cpu_node(cpu, NULL);
855 cache = of_find_next_cache_node(np);
862 static void traverse_core_siblings(int cpu, bool add)
864 struct device_node *l2_cache, *np;
865 const struct cpumask *mask;
869 /* First see if we have ibm,chip-id properties in cpu nodes */
870 np = of_get_cpu_node(cpu, NULL);
873 prop = of_get_property(np, "ibm,chip-id", &plen);
874 if (prop && plen == sizeof(int))
875 chip = of_read_number(prop, 1);
878 traverse_siblings_chip_id(cpu, add, chip);
883 l2_cache = cpu_to_l2cache(cpu);
884 mask = add ? cpu_online_mask : cpu_present_mask;
885 for_each_cpu(i, mask) {
886 np = cpu_to_l2cache(i);
889 if (np == l2_cache) {
891 cpumask_set_cpu(cpu, cpu_core_mask(i));
892 cpumask_set_cpu(i, cpu_core_mask(cpu));
894 cpumask_clear_cpu(cpu, cpu_core_mask(i));
895 cpumask_clear_cpu(i, cpu_core_mask(cpu));
900 of_node_put(l2_cache);
903 /* Activate a secondary processor. */
904 void start_secondary(void *unused)
906 unsigned int cpu = smp_processor_id();
910 current->active_mm = &init_mm;
912 smp_store_cpu_info(cpu);
913 set_dec(tb_ticks_per_jiffy);
915 cpu_callin_map[cpu] = 1;
917 if (smp_ops->setup_cpu)
918 smp_ops->setup_cpu(cpu);
919 if (smp_ops->take_timebase)
920 smp_ops->take_timebase();
922 secondary_cpu_time_init();
925 if (system_state == SYSTEM_RUNNING)
926 vdso_data->processorCount++;
930 /* Update sibling maps */
931 base = cpu_first_thread_sibling(cpu);
932 for (i = 0; i < threads_per_core; i++) {
933 if (cpu_is_offline(base + i) && (cpu != base + i))
935 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
936 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
938 /* cpu_core_map should be a superset of
939 * cpu_sibling_map even if we don't have cache
940 * information, so update the former here, too.
942 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
943 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
945 traverse_core_siblings(cpu, true);
947 set_numa_node(numa_cpu_lookup_table[cpu]);
948 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
951 notify_cpu_starting(cpu);
952 set_cpu_online(cpu, true);
956 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
961 int setup_profiling_timer(unsigned int multiplier)
966 #ifdef CONFIG_SCHED_SMT
967 /* cpumask of CPUs with asymetric SMT dependancy */
968 static int powerpc_smt_flags(void)
970 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
972 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
973 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
974 flags |= SD_ASYM_PACKING;
980 static struct sched_domain_topology_level powerpc_topology[] = {
981 #ifdef CONFIG_SCHED_SMT
982 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
984 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
988 static __init long smp_setup_cpu_workfn(void *data __always_unused)
990 smp_ops->setup_cpu(boot_cpuid);
994 void __init smp_cpus_done(unsigned int max_cpus)
997 * We want the setup_cpu() here to be called on the boot CPU, but
998 * init might run on any CPU, so make sure it's invoked on the boot
1001 if (smp_ops && smp_ops->setup_cpu)
1002 work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
1004 if (smp_ops && smp_ops->bringup_done)
1005 smp_ops->bringup_done();
1007 dump_numa_cpu_topology();
1009 set_sched_topology(powerpc_topology);
1012 #ifdef CONFIG_HOTPLUG_CPU
1013 int __cpu_disable(void)
1015 int cpu = smp_processor_id();
1019 if (!smp_ops->cpu_disable)
1022 err = smp_ops->cpu_disable();
1026 /* Update sibling maps */
1027 base = cpu_first_thread_sibling(cpu);
1028 for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
1029 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
1030 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
1031 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
1032 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
1034 traverse_core_siblings(cpu, false);
1039 void __cpu_die(unsigned int cpu)
1041 if (smp_ops->cpu_die)
1042 smp_ops->cpu_die(cpu);
1050 /* If we return, we re-enter start_secondary */
1051 start_secondary_resume();