1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * kernel/stop_machine.c
5 * Copyright (C) 2008, 2005 IBM Corporation.
6 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
7 * Copyright (C) 2010 SUSE Linux Products GmbH
8 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
10 #include <linux/compiler.h>
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/nmi.h>
24 #include <linux/sched/wake_q.h>
27 * Structure to determine completion condition and record errors. May
28 * be shared by works on different cpus.
30 struct cpu_stop_done {
31 atomic_t nr_todo; /* nr left to execute */
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
38 struct task_struct *thread;
41 bool enabled; /* is this stopper enabled? */
42 struct list_head works; /* list of pending works */
44 struct cpu_stop_work stop_work; /* for stop_cpus */
49 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
50 static bool stop_machine_initialized = false;
52 void print_stop_info(const char *log_lvl, struct task_struct *task)
55 * If @task is a stopper task, it cannot migrate and task_cpu() is
58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
60 if (task != stopper->thread)
63 printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
66 /* static data for stop_cpus */
67 static DEFINE_MUTEX(stop_cpus_mutex);
68 static bool stop_cpus_in_progress;
70 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
72 memset(done, 0, sizeof(*done));
73 atomic_set(&done->nr_todo, nr_todo);
74 init_completion(&done->completion);
77 /* signal completion unless @done is NULL */
78 static void cpu_stop_signal_done(struct cpu_stop_done *done)
80 if (atomic_dec_and_test(&done->nr_todo))
81 complete(&done->completion);
84 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
85 struct cpu_stop_work *work,
86 struct wake_q_head *wakeq)
88 list_add_tail(&work->list, &stopper->works);
89 wake_q_add(wakeq, stopper->thread);
92 /* queue @work to @stopper. if offline, @work is completed immediately */
93 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
95 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
101 raw_spin_lock_irqsave(&stopper->lock, flags);
102 enabled = stopper->enabled;
104 __cpu_stop_queue_work(stopper, work, &wakeq);
106 cpu_stop_signal_done(work->done);
107 raw_spin_unlock_irqrestore(&stopper->lock, flags);
116 * stop_one_cpu - stop a cpu
118 * @fn: function to execute
119 * @arg: argument to @fn
121 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
122 * the highest priority preempting any task on the cpu and
123 * monopolizing it. This function returns after the execution is
126 * This function doesn't guarantee @cpu stays online till @fn
127 * completes. If @cpu goes down in the middle, execution may happen
128 * partially or fully on different cpus. @fn should either be ready
129 * for that or the caller should ensure that @cpu stays online until
130 * this function completes.
136 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
137 * otherwise, the return value of @fn.
139 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
141 struct cpu_stop_done done;
142 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
144 cpu_stop_init_done(&done, 1);
145 if (!cpu_stop_queue_work(cpu, &work))
148 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
149 * cycle by doing a preemption:
152 wait_for_completion(&done.completion);
156 /* This controls the threads on each CPU. */
157 enum multi_stop_state {
158 /* Dummy starting state for thread. */
160 /* Awaiting everyone to be scheduled. */
162 /* Disable interrupts. */
163 MULTI_STOP_DISABLE_IRQ,
164 /* Run the function */
170 struct multi_stop_data {
173 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
174 unsigned int num_threads;
175 const struct cpumask *active_cpus;
177 enum multi_stop_state state;
181 static void set_state(struct multi_stop_data *msdata,
182 enum multi_stop_state newstate)
184 /* Reset ack counter. */
185 atomic_set(&msdata->thread_ack, msdata->num_threads);
187 WRITE_ONCE(msdata->state, newstate);
190 /* Last one to ack a state moves to the next state. */
191 static void ack_state(struct multi_stop_data *msdata)
193 if (atomic_dec_and_test(&msdata->thread_ack))
194 set_state(msdata, msdata->state + 1);
197 notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
202 /* This is the cpu_stop function which stops the CPU. */
203 static int multi_cpu_stop(void *data)
205 struct multi_stop_data *msdata = data;
206 enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
207 int cpu = smp_processor_id(), err = 0;
208 const struct cpumask *cpumask;
213 * When called from stop_machine_from_inactive_cpu(), irq might
214 * already be disabled. Save the state and restore it on exit.
216 local_save_flags(flags);
218 if (!msdata->active_cpus) {
219 cpumask = cpu_online_mask;
220 is_active = cpu == cpumask_first(cpumask);
222 cpumask = msdata->active_cpus;
223 is_active = cpumask_test_cpu(cpu, cpumask);
226 /* Simple state machine */
228 /* Chill out and ensure we re-read multi_stop_state. */
229 stop_machine_yield(cpumask);
230 newstate = READ_ONCE(msdata->state);
231 if (newstate != curstate) {
234 case MULTI_STOP_DISABLE_IRQ:
240 err = msdata->fn(msdata->data);
246 } else if (curstate > MULTI_STOP_PREPARE) {
248 * At this stage all other CPUs we depend on must spin
249 * in the same loop. Any reason for hard-lockup should
250 * be detected and reported on their side.
252 touch_nmi_watchdog();
254 rcu_momentary_dyntick_idle();
255 } while (curstate != MULTI_STOP_EXIT);
257 local_irq_restore(flags);
261 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
262 int cpu2, struct cpu_stop_work *work2)
264 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
265 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
266 DEFINE_WAKE_Q(wakeq);
271 * The waking up of stopper threads has to happen in the same
272 * scheduling context as the queueing. Otherwise, there is a
273 * possibility of one of the above stoppers being woken up by another
274 * CPU, and preempting us. This will cause us to not wake up the other
278 raw_spin_lock_irq(&stopper1->lock);
279 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
281 if (!stopper1->enabled || !stopper2->enabled) {
287 * Ensure that if we race with __stop_cpus() the stoppers won't get
288 * queued up in reverse order leading to system deadlock.
290 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
291 * queued a work on cpu1 but not on cpu2, we hold both locks.
293 * It can be falsely true but it is safe to spin until it is cleared,
294 * queue_stop_cpus_work() does everything under preempt_disable().
296 if (unlikely(stop_cpus_in_progress)) {
302 __cpu_stop_queue_work(stopper1, work1, &wakeq);
303 __cpu_stop_queue_work(stopper2, work2, &wakeq);
306 raw_spin_unlock(&stopper2->lock);
307 raw_spin_unlock_irq(&stopper1->lock);
309 if (unlikely(err == -EDEADLK)) {
312 while (stop_cpus_in_progress)
324 * stop_two_cpus - stops two cpus
325 * @cpu1: the cpu to stop
326 * @cpu2: the other cpu to stop
327 * @fn: function to execute
328 * @arg: argument to @fn
330 * Stops both the current and specified CPU and runs @fn on one of them.
332 * returns when both are completed.
334 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
336 struct cpu_stop_done done;
337 struct cpu_stop_work work1, work2;
338 struct multi_stop_data msdata;
340 msdata = (struct multi_stop_data){
344 .active_cpus = cpumask_of(cpu1),
347 work1 = work2 = (struct cpu_stop_work){
348 .fn = multi_cpu_stop,
354 cpu_stop_init_done(&done, 2);
355 set_state(&msdata, MULTI_STOP_PREPARE);
359 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
362 wait_for_completion(&done.completion);
367 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
369 * @fn: function to execute
370 * @arg: argument to @fn
371 * @work_buf: pointer to cpu_stop_work structure
373 * Similar to stop_one_cpu() but doesn't wait for completion. The
374 * caller is responsible for ensuring @work_buf is currently unused
375 * and will remain untouched until stopper starts executing @fn.
381 * true if cpu_stop_work was queued successfully and @fn will be called,
384 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
385 struct cpu_stop_work *work_buf)
387 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
388 return cpu_stop_queue_work(cpu, work_buf);
391 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
392 cpu_stop_fn_t fn, void *arg,
393 struct cpu_stop_done *done)
395 struct cpu_stop_work *work;
400 * Disable preemption while queueing to avoid getting
401 * preempted by a stopper which might wait for other stoppers
402 * to enter @fn which can lead to deadlock.
405 stop_cpus_in_progress = true;
407 for_each_cpu(cpu, cpumask) {
408 work = &per_cpu(cpu_stopper.stop_work, cpu);
412 if (cpu_stop_queue_work(cpu, work))
416 stop_cpus_in_progress = false;
422 static int __stop_cpus(const struct cpumask *cpumask,
423 cpu_stop_fn_t fn, void *arg)
425 struct cpu_stop_done done;
427 cpu_stop_init_done(&done, cpumask_weight(cpumask));
428 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
430 wait_for_completion(&done.completion);
435 * stop_cpus - stop multiple cpus
436 * @cpumask: cpus to stop
437 * @fn: function to execute
438 * @arg: argument to @fn
440 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
441 * @fn is run in a process context with the highest priority
442 * preempting any task on the cpu and monopolizing it. This function
443 * returns after all executions are complete.
445 * This function doesn't guarantee the cpus in @cpumask stay online
446 * till @fn completes. If some cpus go down in the middle, execution
447 * on the cpu may happen partially or fully on different cpus. @fn
448 * should either be ready for that or the caller should ensure that
449 * the cpus stay online until this function completes.
451 * All stop_cpus() calls are serialized making it safe for @fn to wait
452 * for all cpus to start executing it.
458 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
459 * @cpumask were offline; otherwise, 0 if all executions of @fn
460 * returned 0, any non zero return value if any returned non zero.
462 static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
466 /* static works are used, process one request at a time */
467 mutex_lock(&stop_cpus_mutex);
468 ret = __stop_cpus(cpumask, fn, arg);
469 mutex_unlock(&stop_cpus_mutex);
473 static int cpu_stop_should_run(unsigned int cpu)
475 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
479 raw_spin_lock_irqsave(&stopper->lock, flags);
480 run = !list_empty(&stopper->works);
481 raw_spin_unlock_irqrestore(&stopper->lock, flags);
485 static void cpu_stopper_thread(unsigned int cpu)
487 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
488 struct cpu_stop_work *work;
492 raw_spin_lock_irq(&stopper->lock);
493 if (!list_empty(&stopper->works)) {
494 work = list_first_entry(&stopper->works,
495 struct cpu_stop_work, list);
496 list_del_init(&work->list);
498 raw_spin_unlock_irq(&stopper->lock);
501 cpu_stop_fn_t fn = work->fn;
502 void *arg = work->arg;
503 struct cpu_stop_done *done = work->done;
506 /* cpu stop callbacks must not sleep, make in_atomic() == T */
507 stopper->caller = work->caller;
514 cpu_stop_signal_done(done);
519 WARN_ONCE(preempt_count(),
520 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
525 void stop_machine_park(int cpu)
527 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
529 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
530 * the pending works before it parks, until then it is fine to queue
533 stopper->enabled = false;
534 kthread_park(stopper->thread);
537 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
539 static void cpu_stop_create(unsigned int cpu)
541 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
544 static void cpu_stop_park(unsigned int cpu)
546 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
548 WARN_ON(!list_empty(&stopper->works));
551 void stop_machine_unpark(int cpu)
553 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
555 stopper->enabled = true;
556 kthread_unpark(stopper->thread);
559 static struct smp_hotplug_thread cpu_stop_threads = {
560 .store = &cpu_stopper.thread,
561 .thread_should_run = cpu_stop_should_run,
562 .thread_fn = cpu_stopper_thread,
563 .thread_comm = "migration/%u",
564 .create = cpu_stop_create,
565 .park = cpu_stop_park,
569 static int __init cpu_stop_init(void)
573 for_each_possible_cpu(cpu) {
574 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
576 raw_spin_lock_init(&stopper->lock);
577 INIT_LIST_HEAD(&stopper->works);
580 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
581 stop_machine_unpark(raw_smp_processor_id());
582 stop_machine_initialized = true;
585 early_initcall(cpu_stop_init);
587 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
588 const struct cpumask *cpus)
590 struct multi_stop_data msdata = {
593 .num_threads = num_online_cpus(),
597 lockdep_assert_cpus_held();
599 if (!stop_machine_initialized) {
601 * Handle the case where stop_machine() is called
602 * early in boot before stop_machine() has been
608 WARN_ON_ONCE(msdata.num_threads != 1);
610 local_irq_save(flags);
613 local_irq_restore(flags);
618 /* Set the initial state and stop all online cpus. */
619 set_state(&msdata, MULTI_STOP_PREPARE);
620 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
623 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
627 /* No CPUs can come up or down during this. */
629 ret = stop_machine_cpuslocked(fn, data, cpus);
633 EXPORT_SYMBOL_GPL(stop_machine);
636 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
637 * @fn: the function to run
638 * @data: the data ptr for the @fn()
639 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
641 * This is identical to stop_machine() but can be called from a CPU which
642 * is not active. The local CPU is in the process of hotplug (so no other
643 * CPU hotplug can start) and not marked active and doesn't have enough
646 * This function provides stop_machine() functionality for such state by
647 * using busy-wait for synchronization and executing @fn directly for local
651 * Local CPU is inactive. Temporarily stops all active CPUs.
654 * 0 if all executions of @fn returned 0, any non zero return value if any
657 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
658 const struct cpumask *cpus)
660 struct multi_stop_data msdata = { .fn = fn, .data = data,
661 .active_cpus = cpus };
662 struct cpu_stop_done done;
665 /* Local CPU must be inactive and CPU hotplug in progress. */
666 BUG_ON(cpu_active(raw_smp_processor_id()));
667 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
669 /* No proper task established and can't sleep - busy wait for lock. */
670 while (!mutex_trylock(&stop_cpus_mutex))
673 /* Schedule work on other CPUs and execute directly for local CPU */
674 set_state(&msdata, MULTI_STOP_PREPARE);
675 cpu_stop_init_done(&done, num_active_cpus());
676 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
678 ret = multi_cpu_stop(&msdata);
680 /* Busy wait for completion. */
681 while (!completion_done(&done.completion))
684 mutex_unlock(&stop_cpus_mutex);
685 return ret ?: done.ret;