4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
37 #include <asm/ptrace.h>
38 #include <linux/atomic.h>
40 #include <asm/hw_irq.h>
41 #include <asm/kvm_ppc.h>
42 #include <asm/dbell.h>
44 #include <asm/pgtable.h>
48 #include <asm/machdep.h>
49 #include <asm/cputhreads.h>
50 #include <asm/cputable.h>
52 #include <asm/vdso_datapage.h>
57 #include <asm/debug.h>
58 #include <asm/kexec.h>
59 #include <asm/asm-prototypes.h>
60 #include <asm/cpu_has_feature.h>
64 #define DBG(fmt...) udbg_printf(fmt)
69 #ifdef CONFIG_HOTPLUG_CPU
70 /* State of each CPU during hotplug phases */
71 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
74 struct thread_info *secondary_ti;
76 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
77 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
79 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
80 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
82 /* SMP operations for this machine */
83 struct smp_ops_t *smp_ops;
85 /* Can't be static due to PowerMac hackery */
86 volatile unsigned int cpu_callin_map[NR_CPUS];
88 int smt_enabled_at_boot = 1;
91 * Returns 1 if the specified cpu should be brought up during boot.
92 * Used to inhibit booting threads if they've been disabled or
93 * limited on the command line
95 int smp_generic_cpu_bootable(unsigned int nr)
97 /* Special case - we inhibit secondary thread startup
98 * during boot if the user requests it.
100 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
101 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
103 if (smt_enabled_at_boot
104 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
113 int smp_generic_kick_cpu(int nr)
115 BUG_ON(nr < 0 || nr >= NR_CPUS);
118 * The processor is currently spinning, waiting for the
119 * cpu_start field to become non-zero After we set cpu_start,
120 * the processor will continue on to secondary_start
122 if (!paca[nr].cpu_start) {
123 paca[nr].cpu_start = 1;
128 #ifdef CONFIG_HOTPLUG_CPU
130 * Ok it's not there, so it might be soft-unplugged, let's
131 * try to bring it back
133 generic_set_cpu_up(nr);
135 smp_send_reschedule(nr);
136 #endif /* CONFIG_HOTPLUG_CPU */
140 #endif /* CONFIG_PPC64 */
142 static irqreturn_t call_function_action(int irq, void *data)
144 generic_smp_call_function_interrupt();
148 static irqreturn_t reschedule_action(int irq, void *data)
154 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
156 tick_broadcast_ipi_handler();
160 #ifdef CONFIG_NMI_IPI
161 static irqreturn_t nmi_ipi_action(int irq, void *data)
163 smp_handle_nmi_ipi(get_irq_regs());
168 static irq_handler_t smp_ipi_action[] = {
169 [PPC_MSG_CALL_FUNCTION] = call_function_action,
170 [PPC_MSG_RESCHEDULE] = reschedule_action,
171 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
172 #ifdef CONFIG_NMI_IPI
173 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
178 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
179 * than going through the call function infrastructure, and strongly
180 * serialized, so it is more appropriate for debugging.
182 const char *smp_ipi_name[] = {
183 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
184 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
185 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
186 [PPC_MSG_NMI_IPI] = "nmi ipi",
189 /* optional function to request ipi, for controllers with >= 4 ipis */
190 int smp_request_message_ipi(int virq, int msg)
194 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
196 #ifndef CONFIG_NMI_IPI
197 if (msg == PPC_MSG_NMI_IPI)
201 err = request_irq(virq, smp_ipi_action[msg],
202 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
203 smp_ipi_name[msg], NULL);
204 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
205 virq, smp_ipi_name[msg], err);
210 #ifdef CONFIG_PPC_SMP_MUXED_IPI
211 struct cpu_messages {
212 long messages; /* current messages */
214 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
216 void smp_muxed_ipi_set_message(int cpu, int msg)
218 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
219 char *message = (char *)&info->messages;
222 * Order previous accesses before accesses in the IPI handler.
228 void smp_muxed_ipi_message_pass(int cpu, int msg)
230 smp_muxed_ipi_set_message(cpu, msg);
233 * cause_ipi functions are required to include a full barrier
234 * before doing whatever causes the IPI.
236 smp_ops->cause_ipi(cpu);
239 #ifdef __BIG_ENDIAN__
240 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
242 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
245 irqreturn_t smp_ipi_demux(void)
247 mb(); /* order any irq clear */
249 return smp_ipi_demux_relaxed();
252 /* sync-free variant. Callers should ensure synchronization */
253 irqreturn_t smp_ipi_demux_relaxed(void)
255 struct cpu_messages *info;
258 info = this_cpu_ptr(&ipi_message);
260 all = xchg(&info->messages, 0);
261 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
263 * Must check for PPC_MSG_RM_HOST_ACTION messages
264 * before PPC_MSG_CALL_FUNCTION messages because when
265 * a VM is destroyed, we call kick_all_cpus_sync()
266 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
267 * messages have completed before we free any VCPUs.
269 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
270 kvmppc_xics_ipi_action();
272 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
273 generic_smp_call_function_interrupt();
274 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
276 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
277 tick_broadcast_ipi_handler();
278 #ifdef CONFIG_NMI_IPI
279 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
280 nmi_ipi_action(0, NULL);
282 } while (info->messages);
286 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
288 static inline void do_message_pass(int cpu, int msg)
290 if (smp_ops->message_pass)
291 smp_ops->message_pass(cpu, msg);
292 #ifdef CONFIG_PPC_SMP_MUXED_IPI
294 smp_muxed_ipi_message_pass(cpu, msg);
298 void smp_send_reschedule(int cpu)
301 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
303 EXPORT_SYMBOL_GPL(smp_send_reschedule);
305 void arch_send_call_function_single_ipi(int cpu)
307 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
310 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
314 for_each_cpu(cpu, mask)
315 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
318 #ifdef CONFIG_NMI_IPI
323 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
324 * a running system. They can be used for crash, debug, halt/reboot, etc.
326 * NMI IPIs are globally single threaded. No more than one in progress at
329 * The IPI call waits with interrupts disabled until all targets enter the
330 * NMI handler, then the call returns.
332 * No new NMI can be initiated until targets exit the handler.
334 * The IPI call may time out without all targets entering the NMI handler.
335 * In that case, there is some logic to recover (and ignore subsequent
336 * NMI interrupts that may eventually be raised), but the platform interrupt
337 * handler may not be able to distinguish this from other exception causes,
338 * which may cause a crash.
341 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
342 static struct cpumask nmi_ipi_pending_mask;
343 static int nmi_ipi_busy_count = 0;
344 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
346 static void nmi_ipi_lock_start(unsigned long *flags)
348 raw_local_irq_save(*flags);
350 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
351 raw_local_irq_restore(*flags);
353 raw_local_irq_save(*flags);
358 static void nmi_ipi_lock(void)
360 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
364 static void nmi_ipi_unlock(void)
367 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
368 atomic_set(&__nmi_ipi_lock, 0);
371 static void nmi_ipi_unlock_end(unsigned long *flags)
374 raw_local_irq_restore(*flags);
378 * Platform NMI handler calls this to ack
380 int smp_handle_nmi_ipi(struct pt_regs *regs)
382 void (*fn)(struct pt_regs *);
384 int me = raw_smp_processor_id();
388 * Unexpected NMIs are possible here because the interrupt may not
389 * be able to distinguish NMI IPIs from other types of NMIs, or
390 * because the caller may have timed out.
392 nmi_ipi_lock_start(&flags);
393 if (!nmi_ipi_busy_count)
395 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
398 fn = nmi_ipi_function;
402 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
403 nmi_ipi_busy_count++;
411 nmi_ipi_busy_count--;
413 nmi_ipi_unlock_end(&flags);
418 static void do_smp_send_nmi_ipi(int cpu)
420 if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
424 do_message_pass(cpu, PPC_MSG_NMI_IPI);
428 for_each_online_cpu(c) {
429 if (c == raw_smp_processor_id())
431 do_message_pass(c, PPC_MSG_NMI_IPI);
437 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
438 * - fn is the target callback function.
439 * - delay_us > 0 is the delay before giving up waiting for targets to
440 * enter the handler, == 0 specifies indefinite delay.
442 static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
445 int me = raw_smp_processor_id();
449 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
451 if (unlikely(!smp_ops))
454 /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
455 nmi_ipi_lock_start(&flags);
456 while (nmi_ipi_busy_count) {
457 nmi_ipi_unlock_end(&flags);
459 nmi_ipi_lock_start(&flags);
462 nmi_ipi_function = fn;
466 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
467 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
469 /* cpumask starts clear */
470 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
472 nmi_ipi_busy_count++;
475 do_smp_send_nmi_ipi(cpu);
477 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
487 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
488 /* Could not gather all CPUs */
490 cpumask_clear(&nmi_ipi_pending_mask);
492 nmi_ipi_busy_count--;
493 nmi_ipi_unlock_end(&flags);
497 #endif /* CONFIG_NMI_IPI */
499 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
500 void tick_broadcast(const struct cpumask *mask)
504 for_each_cpu(cpu, mask)
505 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
509 #ifdef CONFIG_DEBUGGER
510 void debugger_ipi_callback(struct pt_regs *regs)
515 void smp_send_debugger_break(void)
517 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
521 #ifdef CONFIG_KEXEC_CORE
522 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
524 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
528 static void stop_this_cpu(void *dummy)
530 /* Remove this CPU */
531 set_cpu_online(smp_processor_id(), false);
538 void smp_send_stop(void)
540 smp_call_function(stop_this_cpu, NULL, 0);
543 struct thread_info *current_set[NR_CPUS];
545 static void smp_store_cpu_info(int id)
547 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
548 #ifdef CONFIG_PPC_FSL_BOOK3E
549 per_cpu(next_tlbcam_idx, id)
550 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
554 void __init smp_prepare_cpus(unsigned int max_cpus)
558 DBG("smp_prepare_cpus\n");
561 * setup_cpu may need to be called on the boot cpu. We havent
562 * spun any cpus up but lets be paranoid.
564 BUG_ON(boot_cpuid != smp_processor_id());
567 smp_store_cpu_info(boot_cpuid);
568 cpu_callin_map[boot_cpuid] = 1;
570 for_each_possible_cpu(cpu) {
571 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
572 GFP_KERNEL, cpu_to_node(cpu));
573 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
574 GFP_KERNEL, cpu_to_node(cpu));
576 * numa_node_id() works after this.
578 if (cpu_present(cpu)) {
579 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
580 set_cpu_numa_mem(cpu,
581 local_memory_node(numa_cpu_lookup_table[cpu]));
585 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
586 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
588 if (smp_ops && smp_ops->probe)
592 void smp_prepare_boot_cpu(void)
594 BUG_ON(smp_processor_id() != boot_cpuid);
596 paca[boot_cpuid].__current = current;
598 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
599 current_set[boot_cpuid] = task_thread_info(current);
602 #ifdef CONFIG_HOTPLUG_CPU
604 int generic_cpu_disable(void)
606 unsigned int cpu = smp_processor_id();
608 if (cpu == boot_cpuid)
611 set_cpu_online(cpu, false);
613 vdso_data->processorCount--;
615 /* Update affinity of all IRQs previously aimed at this CPU */
616 irq_migrate_all_off_this_cpu();
619 * Depending on the details of the interrupt controller, it's possible
620 * that one of the interrupts we just migrated away from this CPU is
621 * actually already pending on this CPU. If we leave it in that state
622 * the interrupt will never be EOI'ed, and will never fire again. So
623 * temporarily enable interrupts here, to allow any pending interrupt to
624 * be received (and EOI'ed), before we take this CPU offline.
633 void generic_cpu_die(unsigned int cpu)
637 for (i = 0; i < 100; i++) {
639 if (is_cpu_dead(cpu))
643 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
646 void generic_set_cpu_dead(unsigned int cpu)
648 per_cpu(cpu_state, cpu) = CPU_DEAD;
652 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
653 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
654 * which makes the delay in generic_cpu_die() not happen.
656 void generic_set_cpu_up(unsigned int cpu)
658 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
661 int generic_check_cpu_restart(unsigned int cpu)
663 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
666 int is_cpu_dead(unsigned int cpu)
668 return per_cpu(cpu_state, cpu) == CPU_DEAD;
671 static bool secondaries_inhibited(void)
673 return kvm_hv_mode_active();
676 #else /* HOTPLUG_CPU */
678 #define secondaries_inhibited() 0
682 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
684 struct thread_info *ti = task_thread_info(idle);
687 paca[cpu].__current = idle;
688 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
691 secondary_ti = current_set[cpu] = ti;
694 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
699 * Don't allow secondary threads to come online if inhibited
701 if (threads_per_core > 1 && secondaries_inhibited() &&
702 cpu_thread_in_subcore(cpu))
705 if (smp_ops == NULL ||
706 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
709 cpu_idle_thread_init(cpu, tidle);
712 * The platform might need to allocate resources prior to bringing
715 if (smp_ops->prepare_cpu) {
716 rc = smp_ops->prepare_cpu(cpu);
721 /* Make sure callin-map entry is 0 (can be leftover a CPU
724 cpu_callin_map[cpu] = 0;
726 /* The information for processor bringup must
727 * be written out to main store before we release
733 DBG("smp: kicking cpu %d\n", cpu);
734 rc = smp_ops->kick_cpu(cpu);
736 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
741 * wait to see if the cpu made a callin (is actually up).
742 * use this value that I found through experimentation.
745 if (system_state < SYSTEM_RUNNING)
746 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
748 #ifdef CONFIG_HOTPLUG_CPU
751 * CPUs can take much longer to come up in the
752 * hotplug case. Wait five seconds.
754 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
758 if (!cpu_callin_map[cpu]) {
759 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
763 DBG("Processor %u found.\n", cpu);
765 if (smp_ops->give_timebase)
766 smp_ops->give_timebase();
768 /* Wait until cpu puts itself in the online & active maps */
769 while (!cpu_online(cpu))
775 /* Return the value of the reg property corresponding to the given
778 int cpu_to_core_id(int cpu)
780 struct device_node *np;
784 np = of_get_cpu_node(cpu, NULL);
788 reg = of_get_property(np, "reg", NULL);
792 id = be32_to_cpup(reg);
797 EXPORT_SYMBOL_GPL(cpu_to_core_id);
799 /* Helper routines for cpu to core mapping */
800 int cpu_core_index_of_thread(int cpu)
802 return cpu >> threads_shift;
804 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
806 int cpu_first_thread_of_core(int core)
808 return core << threads_shift;
810 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
812 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
814 const struct cpumask *mask;
815 struct device_node *np;
819 mask = add ? cpu_online_mask : cpu_present_mask;
820 for_each_cpu(i, mask) {
821 np = of_get_cpu_node(i, NULL);
824 prop = of_get_property(np, "ibm,chip-id", &plen);
825 if (prop && plen == sizeof(int) &&
826 of_read_number(prop, 1) == chipid) {
828 cpumask_set_cpu(cpu, cpu_core_mask(i));
829 cpumask_set_cpu(i, cpu_core_mask(cpu));
831 cpumask_clear_cpu(cpu, cpu_core_mask(i));
832 cpumask_clear_cpu(i, cpu_core_mask(cpu));
839 /* Must be called when no change can occur to cpu_present_mask,
840 * i.e. during cpu online or offline.
842 static struct device_node *cpu_to_l2cache(int cpu)
844 struct device_node *np;
845 struct device_node *cache;
847 if (!cpu_present(cpu))
850 np = of_get_cpu_node(cpu, NULL);
854 cache = of_find_next_cache_node(np);
861 static void traverse_core_siblings(int cpu, bool add)
863 struct device_node *l2_cache, *np;
864 const struct cpumask *mask;
868 /* First see if we have ibm,chip-id properties in cpu nodes */
869 np = of_get_cpu_node(cpu, NULL);
872 prop = of_get_property(np, "ibm,chip-id", &plen);
873 if (prop && plen == sizeof(int))
874 chip = of_read_number(prop, 1);
877 traverse_siblings_chip_id(cpu, add, chip);
882 l2_cache = cpu_to_l2cache(cpu);
883 mask = add ? cpu_online_mask : cpu_present_mask;
884 for_each_cpu(i, mask) {
885 np = cpu_to_l2cache(i);
888 if (np == l2_cache) {
890 cpumask_set_cpu(cpu, cpu_core_mask(i));
891 cpumask_set_cpu(i, cpu_core_mask(cpu));
893 cpumask_clear_cpu(cpu, cpu_core_mask(i));
894 cpumask_clear_cpu(i, cpu_core_mask(cpu));
899 of_node_put(l2_cache);
902 /* Activate a secondary processor. */
903 void start_secondary(void *unused)
905 unsigned int cpu = smp_processor_id();
909 current->active_mm = &init_mm;
911 smp_store_cpu_info(cpu);
912 set_dec(tb_ticks_per_jiffy);
914 cpu_callin_map[cpu] = 1;
916 if (smp_ops->setup_cpu)
917 smp_ops->setup_cpu(cpu);
918 if (smp_ops->take_timebase)
919 smp_ops->take_timebase();
921 secondary_cpu_time_init();
924 if (system_state == SYSTEM_RUNNING)
925 vdso_data->processorCount++;
929 /* Update sibling maps */
930 base = cpu_first_thread_sibling(cpu);
931 for (i = 0; i < threads_per_core; i++) {
932 if (cpu_is_offline(base + i) && (cpu != base + i))
934 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
935 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
937 /* cpu_core_map should be a superset of
938 * cpu_sibling_map even if we don't have cache
939 * information, so update the former here, too.
941 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
942 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
944 traverse_core_siblings(cpu, true);
946 set_numa_node(numa_cpu_lookup_table[cpu]);
947 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
950 notify_cpu_starting(cpu);
951 set_cpu_online(cpu, true);
955 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
960 int setup_profiling_timer(unsigned int multiplier)
965 #ifdef CONFIG_SCHED_SMT
966 /* cpumask of CPUs with asymetric SMT dependancy */
967 static int powerpc_smt_flags(void)
969 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
971 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
972 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
973 flags |= SD_ASYM_PACKING;
979 static struct sched_domain_topology_level powerpc_topology[] = {
980 #ifdef CONFIG_SCHED_SMT
981 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
983 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
987 static __init long smp_setup_cpu_workfn(void *data __always_unused)
989 smp_ops->setup_cpu(boot_cpuid);
993 void __init smp_cpus_done(unsigned int max_cpus)
996 * We want the setup_cpu() here to be called on the boot CPU, but
997 * init might run on any CPU, so make sure it's invoked on the boot
1000 if (smp_ops && smp_ops->setup_cpu)
1001 work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
1003 if (smp_ops && smp_ops->bringup_done)
1004 smp_ops->bringup_done();
1006 dump_numa_cpu_topology();
1008 set_sched_topology(powerpc_topology);
1011 #ifdef CONFIG_HOTPLUG_CPU
1012 int __cpu_disable(void)
1014 int cpu = smp_processor_id();
1018 if (!smp_ops->cpu_disable)
1021 err = smp_ops->cpu_disable();
1025 /* Update sibling maps */
1026 base = cpu_first_thread_sibling(cpu);
1027 for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
1028 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
1029 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
1030 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
1031 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
1033 traverse_core_siblings(cpu, false);
1038 void __cpu_die(unsigned int cpu)
1040 if (smp_ops->cpu_die)
1041 smp_ops->cpu_die(cpu);
1049 /* If we return, we re-enter start_secondary */
1050 start_secondary_resume();