2 * kernel/stop_machine.c
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 * This file is released under the GPLv2 and any later version.
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/lglock.h>
26 * Structure to determine completion condition and record errors. May
27 * be shared by works on different cpus.
29 struct cpu_stop_done {
30 atomic_t nr_todo; /* nr left to execute */
31 bool executed; /* actually executed? */
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
39 bool enabled; /* is this stopper enabled? */
40 struct list_head works; /* list of pending works */
43 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44 static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
45 static bool stop_machine_initialized = false;
48 * Avoids a race between stop_two_cpus and global stop_cpus, where
49 * the stoppers could get queued up in reverse order, leading to
50 * system deadlock. Using an lglock means stop_two_cpus remains
53 DEFINE_STATIC_LGLOCK(stop_cpus_lock);
55 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
57 memset(done, 0, sizeof(*done));
58 atomic_set(&done->nr_todo, nr_todo);
59 init_completion(&done->completion);
62 /* signal completion unless @done is NULL */
63 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
67 done->executed = true;
68 if (atomic_dec_and_test(&done->nr_todo))
69 complete(&done->completion);
73 /* queue @work to @stopper. if offline, @work is completed immediately */
74 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
76 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
77 struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
81 spin_lock_irqsave(&stopper->lock, flags);
83 if (stopper->enabled) {
84 list_add_tail(&work->list, &stopper->works);
87 cpu_stop_signal_done(work->done, false);
89 spin_unlock_irqrestore(&stopper->lock, flags);
93 * stop_one_cpu - stop a cpu
95 * @fn: function to execute
96 * @arg: argument to @fn
98 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
99 * the highest priority preempting any task on the cpu and
100 * monopolizing it. This function returns after the execution is
103 * This function doesn't guarantee @cpu stays online till @fn
104 * completes. If @cpu goes down in the middle, execution may happen
105 * partially or fully on different cpus. @fn should either be ready
106 * for that or the caller should ensure that @cpu stays online until
107 * this function completes.
113 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
114 * otherwise, the return value of @fn.
116 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
118 struct cpu_stop_done done;
119 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
121 cpu_stop_init_done(&done, 1);
122 cpu_stop_queue_work(cpu, &work);
123 wait_for_completion(&done.completion);
124 return done.executed ? done.ret : -ENOENT;
127 /* This controls the threads on each CPU. */
128 enum multi_stop_state {
129 /* Dummy starting state for thread. */
131 /* Awaiting everyone to be scheduled. */
133 /* Disable interrupts. */
134 MULTI_STOP_DISABLE_IRQ,
135 /* Run the function */
141 struct multi_stop_data {
144 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
145 unsigned int num_threads;
146 const struct cpumask *active_cpus;
148 enum multi_stop_state state;
152 static void set_state(struct multi_stop_data *msdata,
153 enum multi_stop_state newstate)
155 /* Reset ack counter. */
156 atomic_set(&msdata->thread_ack, msdata->num_threads);
158 msdata->state = newstate;
161 /* Last one to ack a state moves to the next state. */
162 static void ack_state(struct multi_stop_data *msdata)
164 if (atomic_dec_and_test(&msdata->thread_ack))
165 set_state(msdata, msdata->state + 1);
168 /* This is the cpu_stop function which stops the CPU. */
169 static int multi_cpu_stop(void *data)
171 struct multi_stop_data *msdata = data;
172 enum multi_stop_state curstate = MULTI_STOP_NONE;
173 int cpu = smp_processor_id(), err = 0;
178 * When called from stop_machine_from_inactive_cpu(), irq might
179 * already be disabled. Save the state and restore it on exit.
181 local_save_flags(flags);
183 if (!msdata->active_cpus)
184 is_active = cpu == cpumask_first(cpu_online_mask);
186 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
188 /* Simple state machine */
190 /* Chill out and ensure we re-read multi_stop_state. */
192 if (msdata->state != curstate) {
193 curstate = msdata->state;
195 case MULTI_STOP_DISABLE_IRQ:
201 err = msdata->fn(msdata->data);
208 } while (curstate != MULTI_STOP_EXIT);
210 local_irq_restore(flags);
215 * stop_two_cpus - stops two cpus
216 * @cpu1: the cpu to stop
217 * @cpu2: the other cpu to stop
218 * @fn: function to execute
219 * @arg: argument to @fn
221 * Stops both the current and specified CPU and runs @fn on one of them.
223 * returns when both are completed.
225 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
227 struct cpu_stop_done done;
228 struct cpu_stop_work work1, work2;
229 struct multi_stop_data msdata;
232 msdata = (struct multi_stop_data){
236 .active_cpus = cpumask_of(cpu1),
239 work1 = work2 = (struct cpu_stop_work){
240 .fn = multi_cpu_stop,
245 cpu_stop_init_done(&done, 2);
246 set_state(&msdata, MULTI_STOP_PREPARE);
249 * If we observe both CPUs active we know _cpu_down() cannot yet have
250 * queued its stop_machine works and therefore ours will get executed
251 * first. Or its not either one of our CPUs that's getting unplugged,
252 * in which case we don't care.
254 * This relies on the stopper workqueues to be FIFO.
256 if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
261 lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
262 cpu_stop_queue_work(cpu1, &work1);
263 cpu_stop_queue_work(cpu2, &work2);
264 lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
268 wait_for_completion(&done.completion);
270 return done.executed ? done.ret : -ENOENT;
274 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
276 * @fn: function to execute
277 * @arg: argument to @fn
278 * @work_buf: pointer to cpu_stop_work structure
280 * Similar to stop_one_cpu() but doesn't wait for completion. The
281 * caller is responsible for ensuring @work_buf is currently unused
282 * and will remain untouched until stopper starts executing @fn.
287 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
288 struct cpu_stop_work *work_buf)
290 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
291 cpu_stop_queue_work(cpu, work_buf);
294 /* static data for stop_cpus */
295 static DEFINE_MUTEX(stop_cpus_mutex);
296 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
298 static void queue_stop_cpus_work(const struct cpumask *cpumask,
299 cpu_stop_fn_t fn, void *arg,
300 struct cpu_stop_done *done)
302 struct cpu_stop_work *work;
305 /* initialize works and done */
306 for_each_cpu(cpu, cpumask) {
307 work = &per_cpu(stop_cpus_work, cpu);
314 * Disable preemption while queueing to avoid getting
315 * preempted by a stopper which might wait for other stoppers
316 * to enter @fn which can lead to deadlock.
318 lg_global_lock(&stop_cpus_lock);
319 for_each_cpu(cpu, cpumask)
320 cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
321 lg_global_unlock(&stop_cpus_lock);
324 static int __stop_cpus(const struct cpumask *cpumask,
325 cpu_stop_fn_t fn, void *arg)
327 struct cpu_stop_done done;
329 cpu_stop_init_done(&done, cpumask_weight(cpumask));
330 queue_stop_cpus_work(cpumask, fn, arg, &done);
331 wait_for_completion(&done.completion);
332 return done.executed ? done.ret : -ENOENT;
336 * stop_cpus - stop multiple cpus
337 * @cpumask: cpus to stop
338 * @fn: function to execute
339 * @arg: argument to @fn
341 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
342 * @fn is run in a process context with the highest priority
343 * preempting any task on the cpu and monopolizing it. This function
344 * returns after all executions are complete.
346 * This function doesn't guarantee the cpus in @cpumask stay online
347 * till @fn completes. If some cpus go down in the middle, execution
348 * on the cpu may happen partially or fully on different cpus. @fn
349 * should either be ready for that or the caller should ensure that
350 * the cpus stay online until this function completes.
352 * All stop_cpus() calls are serialized making it safe for @fn to wait
353 * for all cpus to start executing it.
359 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
360 * @cpumask were offline; otherwise, 0 if all executions of @fn
361 * returned 0, any non zero return value if any returned non zero.
363 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
367 /* static works are used, process one request at a time */
368 mutex_lock(&stop_cpus_mutex);
369 ret = __stop_cpus(cpumask, fn, arg);
370 mutex_unlock(&stop_cpus_mutex);
375 * try_stop_cpus - try to stop multiple cpus
376 * @cpumask: cpus to stop
377 * @fn: function to execute
378 * @arg: argument to @fn
380 * Identical to stop_cpus() except that it fails with -EAGAIN if
381 * someone else is already using the facility.
387 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
388 * @fn(@arg) was not executed at all because all cpus in @cpumask were
389 * offline; otherwise, 0 if all executions of @fn returned 0, any non
390 * zero return value if any returned non zero.
392 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
396 /* static works are used, process one request at a time */
397 if (!mutex_trylock(&stop_cpus_mutex))
399 ret = __stop_cpus(cpumask, fn, arg);
400 mutex_unlock(&stop_cpus_mutex);
404 static int cpu_stop_should_run(unsigned int cpu)
406 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
410 spin_lock_irqsave(&stopper->lock, flags);
411 run = !list_empty(&stopper->works);
412 spin_unlock_irqrestore(&stopper->lock, flags);
416 static void cpu_stopper_thread(unsigned int cpu)
418 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
419 struct cpu_stop_work *work;
424 spin_lock_irq(&stopper->lock);
425 if (!list_empty(&stopper->works)) {
426 work = list_first_entry(&stopper->works,
427 struct cpu_stop_work, list);
428 list_del_init(&work->list);
430 spin_unlock_irq(&stopper->lock);
433 cpu_stop_fn_t fn = work->fn;
434 void *arg = work->arg;
435 struct cpu_stop_done *done = work->done;
436 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
438 /* cpu stop callbacks are not allowed to sleep */
445 /* restore preemption and check it's still balanced */
447 WARN_ONCE(preempt_count(),
448 "cpu_stop: %s(%p) leaked preempt count\n",
449 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
452 cpu_stop_signal_done(done, true);
457 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
459 static void cpu_stop_create(unsigned int cpu)
461 sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
464 static void cpu_stop_park(unsigned int cpu)
466 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
467 struct cpu_stop_work *work;
470 /* drain remaining works */
471 spin_lock_irqsave(&stopper->lock, flags);
472 list_for_each_entry(work, &stopper->works, list)
473 cpu_stop_signal_done(work->done, false);
474 stopper->enabled = false;
475 spin_unlock_irqrestore(&stopper->lock, flags);
478 static void cpu_stop_unpark(unsigned int cpu)
480 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
482 spin_lock_irq(&stopper->lock);
483 stopper->enabled = true;
484 spin_unlock_irq(&stopper->lock);
487 static struct smp_hotplug_thread cpu_stop_threads = {
488 .store = &cpu_stopper_task,
489 .thread_should_run = cpu_stop_should_run,
490 .thread_fn = cpu_stopper_thread,
491 .thread_comm = "migration/%u",
492 .create = cpu_stop_create,
493 .setup = cpu_stop_unpark,
494 .park = cpu_stop_park,
495 .pre_unpark = cpu_stop_unpark,
499 static int __init cpu_stop_init(void)
503 for_each_possible_cpu(cpu) {
504 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
506 spin_lock_init(&stopper->lock);
507 INIT_LIST_HEAD(&stopper->works);
510 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
511 stop_machine_initialized = true;
514 early_initcall(cpu_stop_init);
516 #ifdef CONFIG_STOP_MACHINE
518 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
520 struct multi_stop_data msdata = {
523 .num_threads = num_online_cpus(),
527 if (!stop_machine_initialized) {
529 * Handle the case where stop_machine() is called
530 * early in boot before stop_machine() has been
536 WARN_ON_ONCE(msdata.num_threads != 1);
538 local_irq_save(flags);
541 local_irq_restore(flags);
546 /* Set the initial state and stop all online cpus. */
547 set_state(&msdata, MULTI_STOP_PREPARE);
548 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
551 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
555 /* No CPUs can come up or down during this. */
557 ret = __stop_machine(fn, data, cpus);
561 EXPORT_SYMBOL_GPL(stop_machine);
564 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
565 * @fn: the function to run
566 * @data: the data ptr for the @fn()
567 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
569 * This is identical to stop_machine() but can be called from a CPU which
570 * is not active. The local CPU is in the process of hotplug (so no other
571 * CPU hotplug can start) and not marked active and doesn't have enough
574 * This function provides stop_machine() functionality for such state by
575 * using busy-wait for synchronization and executing @fn directly for local
579 * Local CPU is inactive. Temporarily stops all active CPUs.
582 * 0 if all executions of @fn returned 0, any non zero return value if any
585 int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
586 const struct cpumask *cpus)
588 struct multi_stop_data msdata = { .fn = fn, .data = data,
589 .active_cpus = cpus };
590 struct cpu_stop_done done;
593 /* Local CPU must be inactive and CPU hotplug in progress. */
594 BUG_ON(cpu_active(raw_smp_processor_id()));
595 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
597 /* No proper task established and can't sleep - busy wait for lock. */
598 while (!mutex_trylock(&stop_cpus_mutex))
601 /* Schedule work on other CPUs and execute directly for local CPU */
602 set_state(&msdata, MULTI_STOP_PREPARE);
603 cpu_stop_init_done(&done, num_active_cpus());
604 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
606 ret = multi_cpu_stop(&msdata);
608 /* Busy wait for completion. */
609 while (!completion_done(&done.completion))
612 mutex_unlock(&stop_cpus_mutex);
613 return ret ?: done.ret;
616 #endif /* CONFIG_STOP_MACHINE */