2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/sched/mm.h>
7 #include <linux/proc_fs.h>
9 #include <linux/init.h>
10 #include <linux/notifier.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/isolation.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/smt.h>
16 #include <linux/unistd.h>
17 #include <linux/cpu.h>
18 #include <linux/oom.h>
19 #include <linux/rcupdate.h>
20 #include <linux/delay.h>
21 #include <linux/export.h>
22 #include <linux/bug.h>
23 #include <linux/kthread.h>
24 #include <linux/stop_machine.h>
25 #include <linux/mutex.h>
26 #include <linux/gfp.h>
27 #include <linux/suspend.h>
28 #include <linux/lockdep.h>
29 #include <linux/tick.h>
30 #include <linux/irq.h>
31 #include <linux/nmi.h>
32 #include <linux/smpboot.h>
33 #include <linux/relay.h>
34 #include <linux/slab.h>
35 #include <linux/scs.h>
36 #include <linux/percpu-rwsem.h>
37 #include <linux/cpuset.h>
38 #include <linux/random.h>
39 #include <linux/cc_platform.h>
41 #include <trace/events/power.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/cpuhp.h>
48 * struct cpuhp_cpu_state - Per cpu hotplug state storage
49 * @state: The current cpu state
50 * @target: The target state
51 * @fail: Current CPU hotplug callback state
52 * @thread: Pointer to the hotplug thread
53 * @should_run: Thread should execute
54 * @rollback: Perform a rollback
55 * @single: Single callback invocation
56 * @bringup: Single callback bringup or teardown selector
58 * @node: Remote CPU node; for multi-instance, do a
59 * single entry callback for install/remove
60 * @last: For multi-instance rollback, remember how far we got
61 * @cb_state: The state for a single callback (install/uninstall)
62 * @result: Result of the operation
63 * @ap_sync_state: State for AP synchronization
64 * @done_up: Signal completion to the issuer of the task for cpu-up
65 * @done_down: Signal completion to the issuer of the task for cpu-down
67 struct cpuhp_cpu_state {
68 enum cpuhp_state state;
69 enum cpuhp_state target;
70 enum cpuhp_state fail;
72 struct task_struct *thread;
77 struct hlist_node *node;
78 struct hlist_node *last;
79 enum cpuhp_state cb_state;
81 atomic_t ap_sync_state;
82 struct completion done_up;
83 struct completion done_down;
87 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
88 .fail = CPUHP_INVALID,
92 cpumask_t cpus_booted_once_mask;
95 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
96 static struct lockdep_map cpuhp_state_up_map =
97 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
98 static struct lockdep_map cpuhp_state_down_map =
99 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
102 static inline void cpuhp_lock_acquire(bool bringup)
104 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
107 static inline void cpuhp_lock_release(bool bringup)
109 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
113 static inline void cpuhp_lock_acquire(bool bringup) { }
114 static inline void cpuhp_lock_release(bool bringup) { }
119 * struct cpuhp_step - Hotplug state machine step
120 * @name: Name of the step
121 * @startup: Startup function of the step
122 * @teardown: Teardown function of the step
123 * @cant_stop: Bringup/teardown can't be stopped at this step
124 * @multi_instance: State has multiple instances which get added afterwards
129 int (*single)(unsigned int cpu);
130 int (*multi)(unsigned int cpu,
131 struct hlist_node *node);
134 int (*single)(unsigned int cpu);
135 int (*multi)(unsigned int cpu,
136 struct hlist_node *node);
139 struct hlist_head list;
145 static DEFINE_MUTEX(cpuhp_state_mutex);
146 static struct cpuhp_step cpuhp_hp_states[];
148 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
150 return cpuhp_hp_states + state;
153 static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
155 return bringup ? !step->startup.single : !step->teardown.single;
159 * cpuhp_invoke_callback - Invoke the callbacks for a given state
160 * @cpu: The cpu for which the callback should be invoked
161 * @state: The state to do callbacks for
162 * @bringup: True if the bringup callback should be invoked
163 * @node: For multi-instance, do a single entry callback for install/remove
164 * @lastp: For multi-instance rollback, remember how far we got
166 * Called from cpu hotplug and from the state register machinery.
168 * Return: %0 on success or a negative errno code
170 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
171 bool bringup, struct hlist_node *node,
172 struct hlist_node **lastp)
174 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
175 struct cpuhp_step *step = cpuhp_get_step(state);
176 int (*cbm)(unsigned int cpu, struct hlist_node *node);
177 int (*cb)(unsigned int cpu);
180 if (st->fail == state) {
181 st->fail = CPUHP_INVALID;
185 if (cpuhp_step_empty(bringup, step)) {
190 if (!step->multi_instance) {
191 WARN_ON_ONCE(lastp && *lastp);
192 cb = bringup ? step->startup.single : step->teardown.single;
194 trace_cpuhp_enter(cpu, st->target, state, cb);
196 trace_cpuhp_exit(cpu, st->state, state, ret);
199 cbm = bringup ? step->startup.multi : step->teardown.multi;
201 /* Single invocation for instance add/remove */
203 WARN_ON_ONCE(lastp && *lastp);
204 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
205 ret = cbm(cpu, node);
206 trace_cpuhp_exit(cpu, st->state, state, ret);
210 /* State transition. Invoke on all instances */
212 hlist_for_each(node, &step->list) {
213 if (lastp && node == *lastp)
216 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
217 ret = cbm(cpu, node);
218 trace_cpuhp_exit(cpu, st->state, state, ret);
232 /* Rollback the instances if one failed */
233 cbm = !bringup ? step->startup.multi : step->teardown.multi;
237 hlist_for_each(node, &step->list) {
241 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
242 ret = cbm(cpu, node);
243 trace_cpuhp_exit(cpu, st->state, state, ret);
245 * Rollback must not fail,
253 static bool cpuhp_is_ap_state(enum cpuhp_state state)
256 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
257 * purposes as that state is handled explicitly in cpu_down.
259 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
262 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
264 struct completion *done = bringup ? &st->done_up : &st->done_down;
265 wait_for_completion(done);
268 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
270 struct completion *done = bringup ? &st->done_up : &st->done_down;
275 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
277 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
279 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
282 /* Synchronization state management */
283 enum cpuhp_sync_state {
286 SYNC_STATE_SHOULD_DIE,
288 SYNC_STATE_SHOULD_ONLINE,
292 #ifdef CONFIG_HOTPLUG_CORE_SYNC
294 * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
295 * @state: The synchronization state to set
297 * No synchronization point. Just update of the synchronization state, but implies
298 * a full barrier so that the AP changes are visible before the control CPU proceeds.
300 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
302 atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
304 (void)atomic_xchg(st, state);
307 void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
309 static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
310 enum cpuhp_sync_state next_state)
312 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
313 ktime_t now, end, start = ktime_get();
316 end = start + 10ULL * NSEC_PER_SEC;
318 sync = atomic_read(st);
321 if (!atomic_try_cmpxchg(st, &sync, next_state))
328 /* Timeout. Leave the state unchanged */
330 } else if (now - start < NSEC_PER_MSEC) {
331 /* Poll for one millisecond */
332 arch_cpuhp_sync_state_poll();
334 usleep_range_state(USEC_PER_MSEC, 2 * USEC_PER_MSEC, TASK_UNINTERRUPTIBLE);
336 sync = atomic_read(st);
340 #else /* CONFIG_HOTPLUG_CORE_SYNC */
341 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
342 #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
344 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
346 * cpuhp_ap_report_dead - Update synchronization state to DEAD
348 * No synchronization point. Just update of the synchronization state.
350 void cpuhp_ap_report_dead(void)
352 cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
355 void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
358 * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
359 * because the AP cannot issue complete() at this stage.
361 static void cpuhp_bp_sync_dead(unsigned int cpu)
363 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
364 int sync = atomic_read(st);
367 /* CPU can have reported dead already. Don't overwrite that! */
368 if (sync == SYNC_STATE_DEAD)
370 } while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
372 if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
373 /* CPU reached dead state. Invoke the cleanup function */
374 arch_cpuhp_cleanup_dead_cpu(cpu);
378 /* No further action possible. Emit message and give up. */
379 pr_err("CPU%u failed to report dead state\n", cpu);
381 #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
382 static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
383 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
385 #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
387 * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
389 * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
390 * for the BP to release it.
392 void cpuhp_ap_sync_alive(void)
394 atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
396 cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
398 /* Wait for the control CPU to release it. */
399 while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
403 static bool cpuhp_can_boot_ap(unsigned int cpu)
405 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
406 int sync = atomic_read(st);
410 case SYNC_STATE_DEAD:
411 /* CPU is properly dead */
413 case SYNC_STATE_KICKED:
414 /* CPU did not come up in previous attempt */
416 case SYNC_STATE_ALIVE:
417 /* CPU is stuck cpuhp_ap_sync_alive(). */
420 /* CPU failed to report online or dead and is in limbo state. */
424 /* Prepare for booting */
425 if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
431 void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
434 * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
435 * because the AP cannot issue complete() so early in the bringup.
437 static int cpuhp_bp_sync_alive(unsigned int cpu)
441 if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
444 if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
445 pr_err("CPU%u failed to report alive state\n", cpu);
449 /* Let the architecture cleanup the kick alive mechanics. */
450 arch_cpuhp_cleanup_kick_cpu(cpu);
453 #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
454 static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
455 static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
456 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
458 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
459 static DEFINE_MUTEX(cpu_add_remove_lock);
460 bool cpuhp_tasks_frozen;
461 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
464 * The following two APIs (cpu_maps_update_begin/done) must be used when
465 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
467 void cpu_maps_update_begin(void)
469 mutex_lock(&cpu_add_remove_lock);
472 void cpu_maps_update_done(void)
474 mutex_unlock(&cpu_add_remove_lock);
478 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
479 * Should always be manipulated under cpu_add_remove_lock
481 static int cpu_hotplug_disabled;
483 #ifdef CONFIG_HOTPLUG_CPU
485 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
487 void cpus_read_lock(void)
489 percpu_down_read(&cpu_hotplug_lock);
491 EXPORT_SYMBOL_GPL(cpus_read_lock);
493 int cpus_read_trylock(void)
495 return percpu_down_read_trylock(&cpu_hotplug_lock);
497 EXPORT_SYMBOL_GPL(cpus_read_trylock);
499 void cpus_read_unlock(void)
501 percpu_up_read(&cpu_hotplug_lock);
503 EXPORT_SYMBOL_GPL(cpus_read_unlock);
505 void cpus_write_lock(void)
507 percpu_down_write(&cpu_hotplug_lock);
510 void cpus_write_unlock(void)
512 percpu_up_write(&cpu_hotplug_lock);
515 void lockdep_assert_cpus_held(void)
518 * We can't have hotplug operations before userspace starts running,
519 * and some init codepaths will knowingly not take the hotplug lock.
520 * This is all valid, so mute lockdep until it makes sense to report
523 if (system_state < SYSTEM_RUNNING)
526 percpu_rwsem_assert_held(&cpu_hotplug_lock);
529 #ifdef CONFIG_LOCKDEP
530 int lockdep_is_cpus_held(void)
532 return percpu_rwsem_is_held(&cpu_hotplug_lock);
536 static void lockdep_acquire_cpus_lock(void)
538 rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
541 static void lockdep_release_cpus_lock(void)
543 rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
547 * Wait for currently running CPU hotplug operations to complete (if any) and
548 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
549 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
550 * hotplug path before performing hotplug operations. So acquiring that lock
551 * guarantees mutual exclusion from any currently running hotplug operations.
553 void cpu_hotplug_disable(void)
555 cpu_maps_update_begin();
556 cpu_hotplug_disabled++;
557 cpu_maps_update_done();
559 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
561 static void __cpu_hotplug_enable(void)
563 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
565 cpu_hotplug_disabled--;
568 void cpu_hotplug_enable(void)
570 cpu_maps_update_begin();
571 __cpu_hotplug_enable();
572 cpu_maps_update_done();
574 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
578 static void lockdep_acquire_cpus_lock(void)
582 static void lockdep_release_cpus_lock(void)
586 #endif /* CONFIG_HOTPLUG_CPU */
589 * Architectures that need SMT-specific errata handling during SMT hotplug
590 * should override this.
592 void __weak arch_smt_update(void) { }
594 #ifdef CONFIG_HOTPLUG_SMT
595 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
597 void __init cpu_smt_disable(bool force)
599 if (!cpu_smt_possible())
603 pr_info("SMT: Force disabled\n");
604 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
606 pr_info("SMT: disabled\n");
607 cpu_smt_control = CPU_SMT_DISABLED;
612 * The decision whether SMT is supported can only be done after the full
613 * CPU identification. Called from architecture code.
615 void __init cpu_smt_check_topology(void)
617 if (!topology_smt_supported())
618 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
621 static int __init smt_cmdline_disable(char *str)
623 cpu_smt_disable(str && !strcmp(str, "force"));
626 early_param("nosmt", smt_cmdline_disable);
628 static inline bool cpu_smt_allowed(unsigned int cpu)
630 if (cpu_smt_control == CPU_SMT_ENABLED)
633 if (topology_is_primary_thread(cpu))
637 * On x86 it's required to boot all logical CPUs at least once so
638 * that the init code can get a chance to set CR4.MCE on each
639 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
640 * core will shutdown the machine.
642 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
645 /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
646 bool cpu_smt_possible(void)
648 return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
649 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
651 EXPORT_SYMBOL_GPL(cpu_smt_possible);
653 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
656 static inline enum cpuhp_state
657 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
659 enum cpuhp_state prev_state = st->state;
660 bool bringup = st->state < target;
662 st->rollback = false;
667 st->bringup = bringup;
668 if (cpu_dying(cpu) != !bringup)
669 set_cpu_dying(cpu, !bringup);
675 cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
676 enum cpuhp_state prev_state)
678 bool bringup = !st->bringup;
680 st->target = prev_state;
683 * Already rolling back. No need invert the bringup value or to change
692 * If we have st->last we need to undo partial multi_instance of this
693 * state first. Otherwise start undo at the previous state.
702 st->bringup = bringup;
703 if (cpu_dying(cpu) != !bringup)
704 set_cpu_dying(cpu, !bringup);
707 /* Regular hotplug invocation of the AP hotplug thread */
708 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
710 if (!st->single && st->state == st->target)
715 * Make sure the above stores are visible before should_run becomes
716 * true. Paired with the mb() above in cpuhp_thread_fun()
719 st->should_run = true;
720 wake_up_process(st->thread);
721 wait_for_ap_thread(st, st->bringup);
724 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
725 enum cpuhp_state target)
727 enum cpuhp_state prev_state;
730 prev_state = cpuhp_set_state(cpu, st, target);
732 if ((ret = st->result)) {
733 cpuhp_reset_state(cpu, st, prev_state);
740 static int bringup_wait_for_ap_online(unsigned int cpu)
742 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
744 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
745 wait_for_ap_thread(st, true);
746 if (WARN_ON_ONCE((!cpu_online(cpu))))
749 /* Unpark the hotplug thread of the target cpu */
750 kthread_unpark(st->thread);
753 * SMT soft disabling on X86 requires to bring the CPU out of the
754 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
755 * CPU marked itself as booted_once in notify_cpu_starting() so the
756 * cpu_smt_allowed() check will now return false if this is not the
759 if (!cpu_smt_allowed(cpu))
764 static int bringup_cpu(unsigned int cpu)
766 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
767 struct task_struct *idle = idle_thread_get(cpu);
770 if (!cpuhp_can_boot_ap(cpu))
774 * Reset stale stack state from the last time this CPU was online.
776 scs_task_reset(idle);
777 kasan_unpoison_task_stack(idle);
780 * Some architectures have to walk the irq descriptors to
781 * setup the vector space for the cpu which comes online.
783 * Prevent irq alloc/free across the bringup by acquiring the
784 * sparse irq lock. Hold it until the upcoming CPU completes the
785 * startup in cpuhp_online_idle() which allows to avoid
786 * intermediate synchronization points in the architecture code.
790 /* Arch-specific enabling code. */
791 ret = __cpu_up(cpu, idle);
795 ret = cpuhp_bp_sync_alive(cpu);
799 ret = bringup_wait_for_ap_online(cpu);
805 if (st->target <= CPUHP_AP_ONLINE_IDLE)
808 return cpuhp_kick_ap(cpu, st, st->target);
815 static int finish_cpu(unsigned int cpu)
817 struct task_struct *idle = idle_thread_get(cpu);
818 struct mm_struct *mm = idle->active_mm;
821 * idle_task_exit() will have switched to &init_mm, now
822 * clean up any remaining active_mm state.
825 idle->active_mm = &init_mm;
831 * Hotplug state machine related functions
835 * Get the next state to run. Empty ones will be skipped. Returns true if a
838 * st->state will be modified ahead of time, to match state_to_run, as if it
841 static bool cpuhp_next_state(bool bringup,
842 enum cpuhp_state *state_to_run,
843 struct cpuhp_cpu_state *st,
844 enum cpuhp_state target)
848 if (st->state >= target)
851 *state_to_run = ++st->state;
853 if (st->state <= target)
856 *state_to_run = st->state--;
859 if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
866 static int __cpuhp_invoke_callback_range(bool bringup,
868 struct cpuhp_cpu_state *st,
869 enum cpuhp_state target,
872 enum cpuhp_state state;
875 while (cpuhp_next_state(bringup, &state, st, target)) {
878 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
883 pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
884 cpu, bringup ? "UP" : "DOWN",
885 cpuhp_get_step(st->state)->name,
897 static inline int cpuhp_invoke_callback_range(bool bringup,
899 struct cpuhp_cpu_state *st,
900 enum cpuhp_state target)
902 return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
905 static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
907 struct cpuhp_cpu_state *st,
908 enum cpuhp_state target)
910 __cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
913 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
915 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
918 * When CPU hotplug is disabled, then taking the CPU down is not
919 * possible because takedown_cpu() and the architecture and
920 * subsystem specific mechanisms are not available. So the CPU
921 * which would be completely unplugged again needs to stay around
922 * in the current state.
924 return st->state <= CPUHP_BRINGUP_CPU;
927 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
928 enum cpuhp_state target)
930 enum cpuhp_state prev_state = st->state;
933 ret = cpuhp_invoke_callback_range(true, cpu, st, target);
935 pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
936 ret, cpu, cpuhp_get_step(st->state)->name,
939 cpuhp_reset_state(cpu, st, prev_state);
940 if (can_rollback_cpu(st))
941 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
948 * The cpu hotplug threads manage the bringup and teardown of the cpus
950 static int cpuhp_should_run(unsigned int cpu)
952 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
954 return st->should_run;
958 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
959 * callbacks when a state gets [un]installed at runtime.
961 * Each invocation of this function by the smpboot thread does a single AP
964 * It has 3 modes of operation:
965 * - single: runs st->cb_state
966 * - up: runs ++st->state, while st->state < st->target
967 * - down: runs st->state--, while st->state > st->target
969 * When complete or on error, should_run is cleared and the completion is fired.
971 static void cpuhp_thread_fun(unsigned int cpu)
973 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
974 bool bringup = st->bringup;
975 enum cpuhp_state state;
977 if (WARN_ON_ONCE(!st->should_run))
981 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
982 * that if we see ->should_run we also see the rest of the state.
987 * The BP holds the hotplug lock, but we're now running on the AP,
988 * ensure that anybody asserting the lock is held, will actually find
991 lockdep_acquire_cpus_lock();
992 cpuhp_lock_acquire(bringup);
995 state = st->cb_state;
996 st->should_run = false;
998 st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
1003 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
1005 if (cpuhp_is_atomic_state(state)) {
1006 local_irq_disable();
1007 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1011 * STARTING/DYING must not fail!
1013 WARN_ON_ONCE(st->result);
1015 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1020 * If we fail on a rollback, we're up a creek without no
1021 * paddle, no way forward, no way back. We loose, thanks for
1024 WARN_ON_ONCE(st->rollback);
1025 st->should_run = false;
1029 cpuhp_lock_release(bringup);
1030 lockdep_release_cpus_lock();
1032 if (!st->should_run)
1033 complete_ap_thread(st, bringup);
1036 /* Invoke a single callback on a remote cpu */
1038 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1039 struct hlist_node *node)
1041 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1044 if (!cpu_online(cpu))
1047 cpuhp_lock_acquire(false);
1048 cpuhp_lock_release(false);
1050 cpuhp_lock_acquire(true);
1051 cpuhp_lock_release(true);
1054 * If we are up and running, use the hotplug thread. For early calls
1055 * we invoke the thread function directly.
1058 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1060 st->rollback = false;
1064 st->bringup = bringup;
1065 st->cb_state = state;
1068 __cpuhp_kick_ap(st);
1071 * If we failed and did a partial, do a rollback.
1073 if ((ret = st->result) && st->last) {
1074 st->rollback = true;
1075 st->bringup = !bringup;
1077 __cpuhp_kick_ap(st);
1081 * Clean up the leftovers so the next hotplug operation wont use stale
1084 st->node = st->last = NULL;
1088 static int cpuhp_kick_ap_work(unsigned int cpu)
1090 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1091 enum cpuhp_state prev_state = st->state;
1094 cpuhp_lock_acquire(false);
1095 cpuhp_lock_release(false);
1097 cpuhp_lock_acquire(true);
1098 cpuhp_lock_release(true);
1100 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1101 ret = cpuhp_kick_ap(cpu, st, st->target);
1102 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
1107 static struct smp_hotplug_thread cpuhp_threads = {
1108 .store = &cpuhp_state.thread,
1109 .thread_should_run = cpuhp_should_run,
1110 .thread_fn = cpuhp_thread_fun,
1111 .thread_comm = "cpuhp/%u",
1112 .selfparking = true,
1115 static __init void cpuhp_init_state(void)
1117 struct cpuhp_cpu_state *st;
1120 for_each_possible_cpu(cpu) {
1121 st = per_cpu_ptr(&cpuhp_state, cpu);
1122 init_completion(&st->done_up);
1123 init_completion(&st->done_down);
1127 void __init cpuhp_threads_init(void)
1130 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
1131 kthread_unpark(this_cpu_read(cpuhp_state.thread));
1136 * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
1139 * The operation is still serialized against concurrent CPU hotplug via
1140 * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
1141 * serialized against other hotplug related activity like adding or
1142 * removing of state callbacks and state instances, which invoke either the
1143 * startup or the teardown callback of the affected state.
1145 * This is required for subsystems which are unfixable vs. CPU hotplug and
1146 * evade lock inversion problems by scheduling work which has to be
1147 * completed _before_ cpu_up()/_cpu_down() returns.
1149 * Don't even think about adding anything to this for any new code or even
1150 * drivers. It's only purpose is to keep existing lock order trainwrecks
1153 * For cpu_down() there might be valid reasons to finish cleanups which are
1154 * not required to be done under cpu_hotplug_lock, but that's a different
1155 * story and would be not invoked via this.
1157 static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
1160 * cpusets delegate hotplug operations to a worker to "solve" the
1161 * lock order problems. Wait for the worker, but only if tasks are
1162 * _not_ frozen (suspend, hibernate) as that would wait forever.
1164 * The wait is required because otherwise the hotplug operation
1165 * returns with inconsistent state, which could even be observed in
1166 * user space when a new CPU is brought up. The CPU plug uevent
1167 * would be delivered and user space reacting on it would fail to
1168 * move tasks to the newly plugged CPU up to the point where the
1169 * work has finished because up to that point the newly plugged CPU
1170 * is not assignable in cpusets/cgroups. On unplug that's not
1171 * necessarily a visible issue, but it is still inconsistent state,
1172 * which is the real problem which needs to be "fixed". This can't
1173 * prevent the transient state between scheduling the work and
1174 * returning from waiting for it.
1177 cpuset_wait_for_hotplug();
1180 #ifdef CONFIG_HOTPLUG_CPU
1181 #ifndef arch_clear_mm_cpumask_cpu
1182 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
1186 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1189 * This function walks all processes, finds a valid mm struct for each one and
1190 * then clears a corresponding bit in mm's cpumask. While this all sounds
1191 * trivial, there are various non-obvious corner cases, which this function
1192 * tries to solve in a safe manner.
1194 * Also note that the function uses a somewhat relaxed locking scheme, so it may
1195 * be called only for an already offlined CPU.
1197 void clear_tasks_mm_cpumask(int cpu)
1199 struct task_struct *p;
1202 * This function is called after the cpu is taken down and marked
1203 * offline, so its not like new tasks will ever get this cpu set in
1204 * their mm mask. -- Peter Zijlstra
1205 * Thus, we may use rcu_read_lock() here, instead of grabbing
1206 * full-fledged tasklist_lock.
1208 WARN_ON(cpu_online(cpu));
1210 for_each_process(p) {
1211 struct task_struct *t;
1214 * Main thread might exit, but other threads may still have
1215 * a valid mm. Find one.
1217 t = find_lock_task_mm(p);
1220 arch_clear_mm_cpumask_cpu(cpu, t->mm);
1226 /* Take this CPU down. */
1227 static int take_cpu_down(void *_param)
1229 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1230 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1231 int err, cpu = smp_processor_id();
1233 /* Ensure this CPU doesn't handle any more interrupts. */
1234 err = __cpu_disable();
1239 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1240 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1242 WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1245 * Invoke the former CPU_DYING callbacks. DYING must not fail!
1247 cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
1249 /* Give up timekeeping duties */
1250 tick_handover_do_timer();
1251 /* Remove CPU from timer broadcasting */
1252 tick_offline_cpu(cpu);
1253 /* Park the stopper thread */
1254 stop_machine_park(cpu);
1258 static int takedown_cpu(unsigned int cpu)
1260 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1263 /* Park the smpboot threads */
1264 kthread_park(st->thread);
1267 * Prevent irq alloc/free while the dying cpu reorganizes the
1268 * interrupt affinities.
1273 * So now all preempt/rcu users must observe !cpu_active().
1275 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1277 /* CPU refused to die */
1278 irq_unlock_sparse();
1279 /* Unpark the hotplug thread so we can rollback there */
1280 kthread_unpark(st->thread);
1283 BUG_ON(cpu_online(cpu));
1286 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1287 * all runnable tasks from the CPU, there's only the idle task left now
1288 * that the migration thread is done doing the stop_machine thing.
1290 * Wait for the stop thread to go away.
1292 wait_for_ap_thread(st, false);
1293 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1295 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1296 irq_unlock_sparse();
1298 hotplug_cpu__broadcast_tick_pull(cpu);
1299 /* This actually kills the CPU. */
1302 cpuhp_bp_sync_dead(cpu);
1304 tick_cleanup_dead_cpu(cpu);
1305 rcutree_migrate_callbacks(cpu);
1309 static void cpuhp_complete_idle_dead(void *arg)
1311 struct cpuhp_cpu_state *st = arg;
1313 complete_ap_thread(st, false);
1316 void cpuhp_report_idle_dead(void)
1318 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1320 BUG_ON(st->state != CPUHP_AP_OFFLINE);
1321 rcu_report_dead(smp_processor_id());
1322 st->state = CPUHP_AP_IDLE_DEAD;
1324 * We cannot call complete after rcu_report_dead() so we delegate it
1327 smp_call_function_single(cpumask_first(cpu_online_mask),
1328 cpuhp_complete_idle_dead, st, 0);
1331 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1332 enum cpuhp_state target)
1334 enum cpuhp_state prev_state = st->state;
1337 ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1339 pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1340 ret, cpu, cpuhp_get_step(st->state)->name,
1343 cpuhp_reset_state(cpu, st, prev_state);
1345 if (st->state < prev_state)
1346 WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1353 /* Requires cpu_add_remove_lock to be held */
1354 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1355 enum cpuhp_state target)
1357 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1358 int prev_state, ret = 0;
1360 if (num_online_cpus() == 1)
1363 if (!cpu_present(cpu))
1368 cpuhp_tasks_frozen = tasks_frozen;
1370 prev_state = cpuhp_set_state(cpu, st, target);
1372 * If the current CPU state is in the range of the AP hotplug thread,
1373 * then we need to kick the thread.
1375 if (st->state > CPUHP_TEARDOWN_CPU) {
1376 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1377 ret = cpuhp_kick_ap_work(cpu);
1379 * The AP side has done the error rollback already. Just
1380 * return the error code..
1386 * We might have stopped still in the range of the AP hotplug
1387 * thread. Nothing to do anymore.
1389 if (st->state > CPUHP_TEARDOWN_CPU)
1392 st->target = target;
1395 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1396 * to do the further cleanups.
1398 ret = cpuhp_down_callbacks(cpu, st, target);
1399 if (ret && st->state < prev_state) {
1400 if (st->state == CPUHP_TEARDOWN_CPU) {
1401 cpuhp_reset_state(cpu, st, prev_state);
1402 __cpuhp_kick_ap(st);
1404 WARN(1, "DEAD callback error for CPU%d", cpu);
1409 cpus_write_unlock();
1411 * Do post unplug cleanup. This is still protected against
1412 * concurrent CPU hotplug via cpu_add_remove_lock.
1414 lockup_detector_cleanup();
1416 cpu_up_down_serialize_trainwrecks(tasks_frozen);
1420 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1423 * If the platform does not support hotplug, report it explicitly to
1424 * differentiate it from a transient offlining failure.
1426 if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED))
1428 if (cpu_hotplug_disabled)
1430 return _cpu_down(cpu, 0, target);
1433 static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1437 cpu_maps_update_begin();
1438 err = cpu_down_maps_locked(cpu, target);
1439 cpu_maps_update_done();
1444 * cpu_device_down - Bring down a cpu device
1445 * @dev: Pointer to the cpu device to offline
1447 * This function is meant to be used by device core cpu subsystem only.
1449 * Other subsystems should use remove_cpu() instead.
1451 * Return: %0 on success or a negative errno code
1453 int cpu_device_down(struct device *dev)
1455 return cpu_down(dev->id, CPUHP_OFFLINE);
1458 int remove_cpu(unsigned int cpu)
1462 lock_device_hotplug();
1463 ret = device_offline(get_cpu_device(cpu));
1464 unlock_device_hotplug();
1468 EXPORT_SYMBOL_GPL(remove_cpu);
1470 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1475 cpu_maps_update_begin();
1478 * Make certain the cpu I'm about to reboot on is online.
1480 * This is inline to what migrate_to_reboot_cpu() already do.
1482 if (!cpu_online(primary_cpu))
1483 primary_cpu = cpumask_first(cpu_online_mask);
1485 for_each_online_cpu(cpu) {
1486 if (cpu == primary_cpu)
1489 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1491 pr_err("Failed to offline CPU%d - error=%d",
1498 * Ensure all but the reboot CPU are offline.
1500 BUG_ON(num_online_cpus() > 1);
1503 * Make sure the CPUs won't be enabled by someone else after this
1504 * point. Kexec will reboot to a new kernel shortly resetting
1505 * everything along the way.
1507 cpu_hotplug_disabled++;
1509 cpu_maps_update_done();
1513 #define takedown_cpu NULL
1514 #endif /*CONFIG_HOTPLUG_CPU*/
1517 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1518 * @cpu: cpu that just started
1520 * It must be called by the arch code on the new cpu, before the new cpu
1521 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1523 void notify_cpu_starting(unsigned int cpu)
1525 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1526 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1528 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1529 cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1532 * STARTING must not fail!
1534 cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
1538 * Called from the idle task. Wake up the controlling task which brings the
1539 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1540 * online bringup to the hotplug thread.
1542 void cpuhp_online_idle(enum cpuhp_state state)
1544 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1546 /* Happens for the boot cpu */
1547 if (state != CPUHP_AP_ONLINE_IDLE)
1550 cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
1553 * Unpark the stopper thread before we start the idle loop (and start
1554 * scheduling); this ensures the stopper task is always available.
1556 stop_machine_unpark(smp_processor_id());
1558 st->state = CPUHP_AP_ONLINE_IDLE;
1559 complete_ap_thread(st, true);
1562 /* Requires cpu_add_remove_lock to be held */
1563 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1565 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1566 struct task_struct *idle;
1571 if (!cpu_present(cpu)) {
1577 * The caller of cpu_up() might have raced with another
1578 * caller. Nothing to do.
1580 if (st->state >= target)
1583 if (st->state == CPUHP_OFFLINE) {
1584 /* Let it fail before we try to bring the cpu up */
1585 idle = idle_thread_get(cpu);
1587 ret = PTR_ERR(idle);
1592 cpuhp_tasks_frozen = tasks_frozen;
1594 cpuhp_set_state(cpu, st, target);
1596 * If the current CPU state is in the range of the AP hotplug thread,
1597 * then we need to kick the thread once more.
1599 if (st->state > CPUHP_BRINGUP_CPU) {
1600 ret = cpuhp_kick_ap_work(cpu);
1602 * The AP side has done the error rollback already. Just
1603 * return the error code..
1610 * Try to reach the target state. We max out on the BP at
1611 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1612 * responsible for bringing it up to the target state.
1614 target = min((int)target, CPUHP_BRINGUP_CPU);
1615 ret = cpuhp_up_callbacks(cpu, st, target);
1617 cpus_write_unlock();
1619 cpu_up_down_serialize_trainwrecks(tasks_frozen);
1623 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1627 if (!cpu_possible(cpu)) {
1628 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1630 #if defined(CONFIG_IA64)
1631 pr_err("please check additional_cpus= boot parameter\n");
1636 err = try_online_node(cpu_to_node(cpu));
1640 cpu_maps_update_begin();
1642 if (cpu_hotplug_disabled) {
1646 if (!cpu_smt_allowed(cpu)) {
1651 err = _cpu_up(cpu, 0, target);
1653 cpu_maps_update_done();
1658 * cpu_device_up - Bring up a cpu device
1659 * @dev: Pointer to the cpu device to online
1661 * This function is meant to be used by device core cpu subsystem only.
1663 * Other subsystems should use add_cpu() instead.
1665 * Return: %0 on success or a negative errno code
1667 int cpu_device_up(struct device *dev)
1669 return cpu_up(dev->id, CPUHP_ONLINE);
1672 int add_cpu(unsigned int cpu)
1676 lock_device_hotplug();
1677 ret = device_online(get_cpu_device(cpu));
1678 unlock_device_hotplug();
1682 EXPORT_SYMBOL_GPL(add_cpu);
1685 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1686 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1688 * On some architectures like arm64, we can hibernate on any CPU, but on
1689 * wake up the CPU we hibernated on might be offline as a side effect of
1690 * using maxcpus= for example.
1692 * Return: %0 on success or a negative errno code
1694 int bringup_hibernate_cpu(unsigned int sleep_cpu)
1698 if (!cpu_online(sleep_cpu)) {
1699 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1700 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1702 pr_err("Failed to bring hibernate-CPU up!\n");
1709 void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
1713 for_each_present_cpu(cpu) {
1714 if (num_online_cpus() >= setup_max_cpus)
1716 if (!cpu_online(cpu))
1717 cpu_up(cpu, CPUHP_ONLINE);
1721 #ifdef CONFIG_PM_SLEEP_SMP
1722 static cpumask_var_t frozen_cpus;
1724 int freeze_secondary_cpus(int primary)
1728 cpu_maps_update_begin();
1729 if (primary == -1) {
1730 primary = cpumask_first(cpu_online_mask);
1731 if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1732 primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1734 if (!cpu_online(primary))
1735 primary = cpumask_first(cpu_online_mask);
1739 * We take down all of the non-boot CPUs in one shot to avoid races
1740 * with the userspace trying to use the CPU hotplug at the same time
1742 cpumask_clear(frozen_cpus);
1744 pr_info("Disabling non-boot CPUs ...\n");
1745 for_each_online_cpu(cpu) {
1749 if (pm_wakeup_pending()) {
1750 pr_info("Wakeup pending. Abort CPU freeze\n");
1755 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1756 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1757 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1759 cpumask_set_cpu(cpu, frozen_cpus);
1761 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1767 BUG_ON(num_online_cpus() > 1);
1769 pr_err("Non-boot CPUs are not disabled\n");
1772 * Make sure the CPUs won't be enabled by someone else. We need to do
1773 * this even in case of failure as all freeze_secondary_cpus() users are
1774 * supposed to do thaw_secondary_cpus() on the failure path.
1776 cpu_hotplug_disabled++;
1778 cpu_maps_update_done();
1782 void __weak arch_thaw_secondary_cpus_begin(void)
1786 void __weak arch_thaw_secondary_cpus_end(void)
1790 void thaw_secondary_cpus(void)
1794 /* Allow everyone to use the CPU hotplug again */
1795 cpu_maps_update_begin();
1796 __cpu_hotplug_enable();
1797 if (cpumask_empty(frozen_cpus))
1800 pr_info("Enabling non-boot CPUs ...\n");
1802 arch_thaw_secondary_cpus_begin();
1804 for_each_cpu(cpu, frozen_cpus) {
1805 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1806 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1807 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1809 pr_info("CPU%d is up\n", cpu);
1812 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1815 arch_thaw_secondary_cpus_end();
1817 cpumask_clear(frozen_cpus);
1819 cpu_maps_update_done();
1822 static int __init alloc_frozen_cpus(void)
1824 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1828 core_initcall(alloc_frozen_cpus);
1831 * When callbacks for CPU hotplug notifications are being executed, we must
1832 * ensure that the state of the system with respect to the tasks being frozen
1833 * or not, as reported by the notification, remains unchanged *throughout the
1834 * duration* of the execution of the callbacks.
1835 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1837 * This synchronization is implemented by mutually excluding regular CPU
1838 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1839 * Hibernate notifications.
1842 cpu_hotplug_pm_callback(struct notifier_block *nb,
1843 unsigned long action, void *ptr)
1847 case PM_SUSPEND_PREPARE:
1848 case PM_HIBERNATION_PREPARE:
1849 cpu_hotplug_disable();
1852 case PM_POST_SUSPEND:
1853 case PM_POST_HIBERNATION:
1854 cpu_hotplug_enable();
1865 static int __init cpu_hotplug_pm_sync_init(void)
1868 * cpu_hotplug_pm_callback has higher priority than x86
1869 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1870 * to disable cpu hotplug to avoid cpu hotplug race.
1872 pm_notifier(cpu_hotplug_pm_callback, 0);
1875 core_initcall(cpu_hotplug_pm_sync_init);
1877 #endif /* CONFIG_PM_SLEEP_SMP */
1881 #endif /* CONFIG_SMP */
1883 /* Boot processor state steps */
1884 static struct cpuhp_step cpuhp_hp_states[] = {
1887 .startup.single = NULL,
1888 .teardown.single = NULL,
1891 [CPUHP_CREATE_THREADS]= {
1892 .name = "threads:prepare",
1893 .startup.single = smpboot_create_threads,
1894 .teardown.single = NULL,
1897 [CPUHP_PERF_PREPARE] = {
1898 .name = "perf:prepare",
1899 .startup.single = perf_event_init_cpu,
1900 .teardown.single = perf_event_exit_cpu,
1902 [CPUHP_RANDOM_PREPARE] = {
1903 .name = "random:prepare",
1904 .startup.single = random_prepare_cpu,
1905 .teardown.single = NULL,
1907 [CPUHP_WORKQUEUE_PREP] = {
1908 .name = "workqueue:prepare",
1909 .startup.single = workqueue_prepare_cpu,
1910 .teardown.single = NULL,
1912 [CPUHP_HRTIMERS_PREPARE] = {
1913 .name = "hrtimers:prepare",
1914 .startup.single = hrtimers_prepare_cpu,
1915 .teardown.single = hrtimers_dead_cpu,
1917 [CPUHP_SMPCFD_PREPARE] = {
1918 .name = "smpcfd:prepare",
1919 .startup.single = smpcfd_prepare_cpu,
1920 .teardown.single = smpcfd_dead_cpu,
1922 [CPUHP_RELAY_PREPARE] = {
1923 .name = "relay:prepare",
1924 .startup.single = relay_prepare_cpu,
1925 .teardown.single = NULL,
1927 [CPUHP_SLAB_PREPARE] = {
1928 .name = "slab:prepare",
1929 .startup.single = slab_prepare_cpu,
1930 .teardown.single = slab_dead_cpu,
1932 [CPUHP_RCUTREE_PREP] = {
1933 .name = "RCU/tree:prepare",
1934 .startup.single = rcutree_prepare_cpu,
1935 .teardown.single = rcutree_dead_cpu,
1938 * On the tear-down path, timers_dead_cpu() must be invoked
1939 * before blk_mq_queue_reinit_notify() from notify_dead(),
1940 * otherwise a RCU stall occurs.
1942 [CPUHP_TIMERS_PREPARE] = {
1943 .name = "timers:prepare",
1944 .startup.single = timers_prepare_cpu,
1945 .teardown.single = timers_dead_cpu,
1947 /* Kicks the plugged cpu into life */
1948 [CPUHP_BRINGUP_CPU] = {
1949 .name = "cpu:bringup",
1950 .startup.single = bringup_cpu,
1951 .teardown.single = finish_cpu,
1954 /* Final state before CPU kills itself */
1955 [CPUHP_AP_IDLE_DEAD] = {
1956 .name = "idle:dead",
1959 * Last state before CPU enters the idle loop to die. Transient state
1960 * for synchronization.
1962 [CPUHP_AP_OFFLINE] = {
1963 .name = "ap:offline",
1966 /* First state is scheduler control. Interrupts are disabled */
1967 [CPUHP_AP_SCHED_STARTING] = {
1968 .name = "sched:starting",
1969 .startup.single = sched_cpu_starting,
1970 .teardown.single = sched_cpu_dying,
1972 [CPUHP_AP_RCUTREE_DYING] = {
1973 .name = "RCU/tree:dying",
1974 .startup.single = NULL,
1975 .teardown.single = rcutree_dying_cpu,
1977 [CPUHP_AP_SMPCFD_DYING] = {
1978 .name = "smpcfd:dying",
1979 .startup.single = NULL,
1980 .teardown.single = smpcfd_dying_cpu,
1982 /* Entry state on starting. Interrupts enabled from here on. Transient
1983 * state for synchronsization */
1984 [CPUHP_AP_ONLINE] = {
1985 .name = "ap:online",
1988 * Handled on control processor until the plugged processor manages
1991 [CPUHP_TEARDOWN_CPU] = {
1992 .name = "cpu:teardown",
1993 .startup.single = NULL,
1994 .teardown.single = takedown_cpu,
1998 [CPUHP_AP_SCHED_WAIT_EMPTY] = {
1999 .name = "sched:waitempty",
2000 .startup.single = NULL,
2001 .teardown.single = sched_cpu_wait_empty,
2004 /* Handle smpboot threads park/unpark */
2005 [CPUHP_AP_SMPBOOT_THREADS] = {
2006 .name = "smpboot/threads:online",
2007 .startup.single = smpboot_unpark_threads,
2008 .teardown.single = smpboot_park_threads,
2010 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
2011 .name = "irq/affinity:online",
2012 .startup.single = irq_affinity_online_cpu,
2013 .teardown.single = NULL,
2015 [CPUHP_AP_PERF_ONLINE] = {
2016 .name = "perf:online",
2017 .startup.single = perf_event_init_cpu,
2018 .teardown.single = perf_event_exit_cpu,
2020 [CPUHP_AP_WATCHDOG_ONLINE] = {
2021 .name = "lockup_detector:online",
2022 .startup.single = lockup_detector_online_cpu,
2023 .teardown.single = lockup_detector_offline_cpu,
2025 [CPUHP_AP_WORKQUEUE_ONLINE] = {
2026 .name = "workqueue:online",
2027 .startup.single = workqueue_online_cpu,
2028 .teardown.single = workqueue_offline_cpu,
2030 [CPUHP_AP_RANDOM_ONLINE] = {
2031 .name = "random:online",
2032 .startup.single = random_online_cpu,
2033 .teardown.single = NULL,
2035 [CPUHP_AP_RCUTREE_ONLINE] = {
2036 .name = "RCU/tree:online",
2037 .startup.single = rcutree_online_cpu,
2038 .teardown.single = rcutree_offline_cpu,
2042 * The dynamically registered state space is here
2046 /* Last state is scheduler control setting the cpu active */
2047 [CPUHP_AP_ACTIVE] = {
2048 .name = "sched:active",
2049 .startup.single = sched_cpu_activate,
2050 .teardown.single = sched_cpu_deactivate,
2054 /* CPU is fully up and running. */
2057 .startup.single = NULL,
2058 .teardown.single = NULL,
2062 /* Sanity check for callbacks */
2063 static int cpuhp_cb_check(enum cpuhp_state state)
2065 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
2071 * Returns a free for dynamic slot assignment of the Online state. The states
2072 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2073 * by having no name assigned.
2075 static int cpuhp_reserve_state(enum cpuhp_state state)
2077 enum cpuhp_state i, end;
2078 struct cpuhp_step *step;
2081 case CPUHP_AP_ONLINE_DYN:
2082 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
2083 end = CPUHP_AP_ONLINE_DYN_END;
2085 case CPUHP_BP_PREPARE_DYN:
2086 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
2087 end = CPUHP_BP_PREPARE_DYN_END;
2093 for (i = state; i <= end; i++, step++) {
2097 WARN(1, "No more dynamic states available for CPU hotplug\n");
2101 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
2102 int (*startup)(unsigned int cpu),
2103 int (*teardown)(unsigned int cpu),
2104 bool multi_instance)
2106 /* (Un)Install the callbacks for further cpu hotplug operations */
2107 struct cpuhp_step *sp;
2111 * If name is NULL, then the state gets removed.
2113 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
2114 * the first allocation from these dynamic ranges, so the removal
2115 * would trigger a new allocation and clear the wrong (already
2116 * empty) state, leaving the callbacks of the to be cleared state
2117 * dangling, which causes wreckage on the next hotplug operation.
2119 if (name && (state == CPUHP_AP_ONLINE_DYN ||
2120 state == CPUHP_BP_PREPARE_DYN)) {
2121 ret = cpuhp_reserve_state(state);
2126 sp = cpuhp_get_step(state);
2127 if (name && sp->name)
2130 sp->startup.single = startup;
2131 sp->teardown.single = teardown;
2133 sp->multi_instance = multi_instance;
2134 INIT_HLIST_HEAD(&sp->list);
2138 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
2140 return cpuhp_get_step(state)->teardown.single;
2144 * Call the startup/teardown function for a step either on the AP or
2145 * on the current CPU.
2147 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2148 struct hlist_node *node)
2150 struct cpuhp_step *sp = cpuhp_get_step(state);
2154 * If there's nothing to do, we done.
2155 * Relies on the union for multi_instance.
2157 if (cpuhp_step_empty(bringup, sp))
2160 * The non AP bound callbacks can fail on bringup. On teardown
2161 * e.g. module removal we crash for now.
2164 if (cpuhp_is_ap_state(state))
2165 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
2167 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2169 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2171 BUG_ON(ret && !bringup);
2176 * Called from __cpuhp_setup_state on a recoverable failure.
2178 * Note: The teardown callbacks for rollback are not allowed to fail!
2180 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2181 struct hlist_node *node)
2185 /* Roll back the already executed steps on the other cpus */
2186 for_each_present_cpu(cpu) {
2187 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2188 int cpustate = st->state;
2190 if (cpu >= failedcpu)
2193 /* Did we invoke the startup call on that cpu ? */
2194 if (cpustate >= state)
2195 cpuhp_issue_call(cpu, state, false, node);
2199 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
2200 struct hlist_node *node,
2203 struct cpuhp_step *sp;
2207 lockdep_assert_cpus_held();
2209 sp = cpuhp_get_step(state);
2210 if (sp->multi_instance == false)
2213 mutex_lock(&cpuhp_state_mutex);
2215 if (!invoke || !sp->startup.multi)
2219 * Try to call the startup callback for each present cpu
2220 * depending on the hotplug state of the cpu.
2222 for_each_present_cpu(cpu) {
2223 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2224 int cpustate = st->state;
2226 if (cpustate < state)
2229 ret = cpuhp_issue_call(cpu, state, true, node);
2231 if (sp->teardown.multi)
2232 cpuhp_rollback_install(cpu, state, node);
2238 hlist_add_head(node, &sp->list);
2240 mutex_unlock(&cpuhp_state_mutex);
2244 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2250 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2254 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2257 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2258 * @state: The state to setup
2259 * @name: Name of the step
2260 * @invoke: If true, the startup function is invoked for cpus where
2261 * cpu state >= @state
2262 * @startup: startup callback function
2263 * @teardown: teardown callback function
2264 * @multi_instance: State is set up for multiple instances which get
2267 * The caller needs to hold cpus read locked while calling this function.
2270 * Positive state number if @state is CPUHP_AP_ONLINE_DYN;
2271 * 0 for all other states
2272 * On failure: proper (negative) error code
2274 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2275 const char *name, bool invoke,
2276 int (*startup)(unsigned int cpu),
2277 int (*teardown)(unsigned int cpu),
2278 bool multi_instance)
2283 lockdep_assert_cpus_held();
2285 if (cpuhp_cb_check(state) || !name)
2288 mutex_lock(&cpuhp_state_mutex);
2290 ret = cpuhp_store_callbacks(state, name, startup, teardown,
2293 dynstate = state == CPUHP_AP_ONLINE_DYN;
2294 if (ret > 0 && dynstate) {
2299 if (ret || !invoke || !startup)
2303 * Try to call the startup callback for each present cpu
2304 * depending on the hotplug state of the cpu.
2306 for_each_present_cpu(cpu) {
2307 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2308 int cpustate = st->state;
2310 if (cpustate < state)
2313 ret = cpuhp_issue_call(cpu, state, true, NULL);
2316 cpuhp_rollback_install(cpu, state, NULL);
2317 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2322 mutex_unlock(&cpuhp_state_mutex);
2324 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2325 * dynamically allocated state in case of success.
2327 if (!ret && dynstate)
2331 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2333 int __cpuhp_setup_state(enum cpuhp_state state,
2334 const char *name, bool invoke,
2335 int (*startup)(unsigned int cpu),
2336 int (*teardown)(unsigned int cpu),
2337 bool multi_instance)
2342 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2343 teardown, multi_instance);
2347 EXPORT_SYMBOL(__cpuhp_setup_state);
2349 int __cpuhp_state_remove_instance(enum cpuhp_state state,
2350 struct hlist_node *node, bool invoke)
2352 struct cpuhp_step *sp = cpuhp_get_step(state);
2355 BUG_ON(cpuhp_cb_check(state));
2357 if (!sp->multi_instance)
2361 mutex_lock(&cpuhp_state_mutex);
2363 if (!invoke || !cpuhp_get_teardown_cb(state))
2366 * Call the teardown callback for each present cpu depending
2367 * on the hotplug state of the cpu. This function is not
2368 * allowed to fail currently!
2370 for_each_present_cpu(cpu) {
2371 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2372 int cpustate = st->state;
2374 if (cpustate >= state)
2375 cpuhp_issue_call(cpu, state, false, node);
2380 mutex_unlock(&cpuhp_state_mutex);
2385 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2388 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2389 * @state: The state to remove
2390 * @invoke: If true, the teardown function is invoked for cpus where
2391 * cpu state >= @state
2393 * The caller needs to hold cpus read locked while calling this function.
2394 * The teardown callback is currently not allowed to fail. Think
2395 * about module removal!
2397 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2399 struct cpuhp_step *sp = cpuhp_get_step(state);
2402 BUG_ON(cpuhp_cb_check(state));
2404 lockdep_assert_cpus_held();
2406 mutex_lock(&cpuhp_state_mutex);
2407 if (sp->multi_instance) {
2408 WARN(!hlist_empty(&sp->list),
2409 "Error: Removing state %d which has instances left.\n",
2414 if (!invoke || !cpuhp_get_teardown_cb(state))
2418 * Call the teardown callback for each present cpu depending
2419 * on the hotplug state of the cpu. This function is not
2420 * allowed to fail currently!
2422 for_each_present_cpu(cpu) {
2423 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2424 int cpustate = st->state;
2426 if (cpustate >= state)
2427 cpuhp_issue_call(cpu, state, false, NULL);
2430 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2431 mutex_unlock(&cpuhp_state_mutex);
2433 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2435 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2438 __cpuhp_remove_state_cpuslocked(state, invoke);
2441 EXPORT_SYMBOL(__cpuhp_remove_state);
2443 #ifdef CONFIG_HOTPLUG_SMT
2444 static void cpuhp_offline_cpu_device(unsigned int cpu)
2446 struct device *dev = get_cpu_device(cpu);
2448 dev->offline = true;
2449 /* Tell user space about the state change */
2450 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2453 static void cpuhp_online_cpu_device(unsigned int cpu)
2455 struct device *dev = get_cpu_device(cpu);
2457 dev->offline = false;
2458 /* Tell user space about the state change */
2459 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2462 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2466 cpu_maps_update_begin();
2467 for_each_online_cpu(cpu) {
2468 if (topology_is_primary_thread(cpu))
2470 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2474 * As this needs to hold the cpu maps lock it's impossible
2475 * to call device_offline() because that ends up calling
2476 * cpu_down() which takes cpu maps lock. cpu maps lock
2477 * needs to be held as this might race against in kernel
2478 * abusers of the hotplug machinery (thermal management).
2480 * So nothing would update device:offline state. That would
2481 * leave the sysfs entry stale and prevent onlining after
2482 * smt control has been changed to 'off' again. This is
2483 * called under the sysfs hotplug lock, so it is properly
2484 * serialized against the regular offline usage.
2486 cpuhp_offline_cpu_device(cpu);
2489 cpu_smt_control = ctrlval;
2490 cpu_maps_update_done();
2494 int cpuhp_smt_enable(void)
2498 cpu_maps_update_begin();
2499 cpu_smt_control = CPU_SMT_ENABLED;
2500 for_each_present_cpu(cpu) {
2501 /* Skip online CPUs and CPUs on offline nodes */
2502 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2504 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2507 /* See comment in cpuhp_smt_disable() */
2508 cpuhp_online_cpu_device(cpu);
2510 cpu_maps_update_done();
2515 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2516 static ssize_t state_show(struct device *dev,
2517 struct device_attribute *attr, char *buf)
2519 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2521 return sprintf(buf, "%d\n", st->state);
2523 static DEVICE_ATTR_RO(state);
2525 static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2526 const char *buf, size_t count)
2528 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2529 struct cpuhp_step *sp;
2532 ret = kstrtoint(buf, 10, &target);
2536 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2537 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2540 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2544 ret = lock_device_hotplug_sysfs();
2548 mutex_lock(&cpuhp_state_mutex);
2549 sp = cpuhp_get_step(target);
2550 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2551 mutex_unlock(&cpuhp_state_mutex);
2555 if (st->state < target)
2556 ret = cpu_up(dev->id, target);
2557 else if (st->state > target)
2558 ret = cpu_down(dev->id, target);
2559 else if (WARN_ON(st->target != target))
2560 st->target = target;
2562 unlock_device_hotplug();
2563 return ret ? ret : count;
2566 static ssize_t target_show(struct device *dev,
2567 struct device_attribute *attr, char *buf)
2569 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2571 return sprintf(buf, "%d\n", st->target);
2573 static DEVICE_ATTR_RW(target);
2575 static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
2576 const char *buf, size_t count)
2578 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2579 struct cpuhp_step *sp;
2582 ret = kstrtoint(buf, 10, &fail);
2586 if (fail == CPUHP_INVALID) {
2591 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2595 * Cannot fail STARTING/DYING callbacks.
2597 if (cpuhp_is_atomic_state(fail))
2601 * DEAD callbacks cannot fail...
2602 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2603 * triggering STARTING callbacks, a failure in this state would
2606 if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2610 * Cannot fail anything that doesn't have callbacks.
2612 mutex_lock(&cpuhp_state_mutex);
2613 sp = cpuhp_get_step(fail);
2614 if (!sp->startup.single && !sp->teardown.single)
2616 mutex_unlock(&cpuhp_state_mutex);
2625 static ssize_t fail_show(struct device *dev,
2626 struct device_attribute *attr, char *buf)
2628 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2630 return sprintf(buf, "%d\n", st->fail);
2633 static DEVICE_ATTR_RW(fail);
2635 static struct attribute *cpuhp_cpu_attrs[] = {
2636 &dev_attr_state.attr,
2637 &dev_attr_target.attr,
2638 &dev_attr_fail.attr,
2642 static const struct attribute_group cpuhp_cpu_attr_group = {
2643 .attrs = cpuhp_cpu_attrs,
2648 static ssize_t states_show(struct device *dev,
2649 struct device_attribute *attr, char *buf)
2651 ssize_t cur, res = 0;
2654 mutex_lock(&cpuhp_state_mutex);
2655 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2656 struct cpuhp_step *sp = cpuhp_get_step(i);
2659 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2664 mutex_unlock(&cpuhp_state_mutex);
2667 static DEVICE_ATTR_RO(states);
2669 static struct attribute *cpuhp_cpu_root_attrs[] = {
2670 &dev_attr_states.attr,
2674 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2675 .attrs = cpuhp_cpu_root_attrs,
2680 #ifdef CONFIG_HOTPLUG_SMT
2683 __store_smt_control(struct device *dev, struct device_attribute *attr,
2684 const char *buf, size_t count)
2688 if (sysfs_streq(buf, "on"))
2689 ctrlval = CPU_SMT_ENABLED;
2690 else if (sysfs_streq(buf, "off"))
2691 ctrlval = CPU_SMT_DISABLED;
2692 else if (sysfs_streq(buf, "forceoff"))
2693 ctrlval = CPU_SMT_FORCE_DISABLED;
2697 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2700 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2703 ret = lock_device_hotplug_sysfs();
2707 if (ctrlval != cpu_smt_control) {
2709 case CPU_SMT_ENABLED:
2710 ret = cpuhp_smt_enable();
2712 case CPU_SMT_DISABLED:
2713 case CPU_SMT_FORCE_DISABLED:
2714 ret = cpuhp_smt_disable(ctrlval);
2719 unlock_device_hotplug();
2720 return ret ? ret : count;
2723 #else /* !CONFIG_HOTPLUG_SMT */
2725 __store_smt_control(struct device *dev, struct device_attribute *attr,
2726 const char *buf, size_t count)
2730 #endif /* CONFIG_HOTPLUG_SMT */
2732 static const char *smt_states[] = {
2733 [CPU_SMT_ENABLED] = "on",
2734 [CPU_SMT_DISABLED] = "off",
2735 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2736 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2737 [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
2740 static ssize_t control_show(struct device *dev,
2741 struct device_attribute *attr, char *buf)
2743 const char *state = smt_states[cpu_smt_control];
2745 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2748 static ssize_t control_store(struct device *dev, struct device_attribute *attr,
2749 const char *buf, size_t count)
2751 return __store_smt_control(dev, attr, buf, count);
2753 static DEVICE_ATTR_RW(control);
2755 static ssize_t active_show(struct device *dev,
2756 struct device_attribute *attr, char *buf)
2758 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2760 static DEVICE_ATTR_RO(active);
2762 static struct attribute *cpuhp_smt_attrs[] = {
2763 &dev_attr_control.attr,
2764 &dev_attr_active.attr,
2768 static const struct attribute_group cpuhp_smt_attr_group = {
2769 .attrs = cpuhp_smt_attrs,
2774 static int __init cpu_smt_sysfs_init(void)
2776 struct device *dev_root;
2779 dev_root = bus_get_dev_root(&cpu_subsys);
2781 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
2782 put_device(dev_root);
2787 static int __init cpuhp_sysfs_init(void)
2789 struct device *dev_root;
2792 ret = cpu_smt_sysfs_init();
2796 dev_root = bus_get_dev_root(&cpu_subsys);
2798 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
2799 put_device(dev_root);
2804 for_each_possible_cpu(cpu) {
2805 struct device *dev = get_cpu_device(cpu);
2809 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2815 device_initcall(cpuhp_sysfs_init);
2816 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2819 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2820 * represents all NR_CPUS bits binary values of 1<<nr.
2822 * It is used by cpumask_of() to get a constant address to a CPU
2823 * mask value that has a single bit set only.
2826 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2827 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2828 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2829 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2830 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2832 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2834 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2835 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2836 #if BITS_PER_LONG > 32
2837 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2838 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2841 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2843 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2844 EXPORT_SYMBOL(cpu_all_bits);
2846 #ifdef CONFIG_INIT_ALL_POSSIBLE
2847 struct cpumask __cpu_possible_mask __read_mostly
2850 struct cpumask __cpu_possible_mask __read_mostly;
2852 EXPORT_SYMBOL(__cpu_possible_mask);
2854 struct cpumask __cpu_online_mask __read_mostly;
2855 EXPORT_SYMBOL(__cpu_online_mask);
2857 struct cpumask __cpu_present_mask __read_mostly;
2858 EXPORT_SYMBOL(__cpu_present_mask);
2860 struct cpumask __cpu_active_mask __read_mostly;
2861 EXPORT_SYMBOL(__cpu_active_mask);
2863 struct cpumask __cpu_dying_mask __read_mostly;
2864 EXPORT_SYMBOL(__cpu_dying_mask);
2866 atomic_t __num_online_cpus __read_mostly;
2867 EXPORT_SYMBOL(__num_online_cpus);
2869 void init_cpu_present(const struct cpumask *src)
2871 cpumask_copy(&__cpu_present_mask, src);
2874 void init_cpu_possible(const struct cpumask *src)
2876 cpumask_copy(&__cpu_possible_mask, src);
2879 void init_cpu_online(const struct cpumask *src)
2881 cpumask_copy(&__cpu_online_mask, src);
2884 void set_cpu_online(unsigned int cpu, bool online)
2887 * atomic_inc/dec() is required to handle the horrid abuse of this
2888 * function by the reboot and kexec code which invoke it from
2889 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2890 * regular CPU hotplug is properly serialized.
2892 * Note, that the fact that __num_online_cpus is of type atomic_t
2893 * does not protect readers which are not serialized against
2894 * concurrent hotplug operations.
2897 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2898 atomic_inc(&__num_online_cpus);
2900 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2901 atomic_dec(&__num_online_cpus);
2906 * Activate the first processor.
2908 void __init boot_cpu_init(void)
2910 int cpu = smp_processor_id();
2912 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2913 set_cpu_online(cpu, true);
2914 set_cpu_active(cpu, true);
2915 set_cpu_present(cpu, true);
2916 set_cpu_possible(cpu, true);
2919 __boot_cpu_id = cpu;
2924 * Must be called _AFTER_ setting up the per_cpu areas
2926 void __init boot_cpu_hotplug_init(void)
2929 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2930 atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
2932 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2933 this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
2937 * These are used for a global "mitigations=" cmdline option for toggling
2938 * optional CPU mitigations.
2940 enum cpu_mitigations {
2941 CPU_MITIGATIONS_OFF,
2942 CPU_MITIGATIONS_AUTO,
2943 CPU_MITIGATIONS_AUTO_NOSMT,
2946 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2947 CPU_MITIGATIONS_AUTO;
2949 static int __init mitigations_parse_cmdline(char *arg)
2951 if (!strcmp(arg, "off"))
2952 cpu_mitigations = CPU_MITIGATIONS_OFF;
2953 else if (!strcmp(arg, "auto"))
2954 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2955 else if (!strcmp(arg, "auto,nosmt"))
2956 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2958 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2963 early_param("mitigations", mitigations_parse_cmdline);
2965 /* mitigations=off */
2966 bool cpu_mitigations_off(void)
2968 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2970 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2972 /* mitigations=auto,nosmt */
2973 bool cpu_mitigations_auto_nosmt(void)
2975 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2977 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);