1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_STOP_MACHINE
3 #define _LINUX_STOP_MACHINE
6 #include <linux/cpumask.h>
8 #include <linux/list.h>
11 * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
12 * monopolization mechanism. The caller can specify a non-sleeping
13 * function to be executed on a single or multiple cpus preempting all
14 * other processes and monopolizing those cpus until it finishes.
16 * Resources for this mechanism are preallocated when a cpu is brought
17 * up and requests are guaranteed to be served as long as the target
20 typedef int (*cpu_stop_fn_t)(void *arg);
24 struct cpu_stop_work {
25 struct list_head list; /* cpu_stopper->works */
28 struct cpu_stop_done *done;
31 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
32 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
33 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
34 struct cpu_stop_work *work_buf);
35 void stop_machine_park(int cpu);
36 void stop_machine_unpark(int cpu);
37 void stop_machine_yield(const struct cpumask *cpumask);
39 #else /* CONFIG_SMP */
41 #include <linux/workqueue.h>
43 struct cpu_stop_work {
44 struct work_struct work;
49 static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
53 if (cpu == smp_processor_id())
59 static void stop_one_cpu_nowait_workfn(struct work_struct *work)
61 struct cpu_stop_work *stwork =
62 container_of(work, struct cpu_stop_work, work);
64 stwork->fn(stwork->arg);
68 static inline bool stop_one_cpu_nowait(unsigned int cpu,
69 cpu_stop_fn_t fn, void *arg,
70 struct cpu_stop_work *work_buf)
72 if (cpu == smp_processor_id()) {
73 INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
76 schedule_work(&work_buf->work);
83 #endif /* CONFIG_SMP */
86 * stop_machine "Bogolock": stop the entire machine, disable
87 * interrupts. This is a very heavy lock, which is equivalent to
88 * grabbing every spinlock (and more). So the "read" side to such a
89 * lock is anything which disables preemption.
91 #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
94 * stop_machine: freeze the machine on all CPUs and run this function
95 * @fn: the function to run
96 * @data: the data ptr for the @fn()
97 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
99 * Description: This causes a thread to be scheduled on every cpu,
100 * each of which disables interrupts. The result is that no one is
101 * holding a spinlock or inside any other preempt-disabled region when
104 * This can be thought of as a very heavy write lock, equivalent to
105 * grabbing every spinlock in the kernel.
107 * Protects against CPU hotplug.
109 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
112 * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
113 * @fn: the function to run
114 * @data: the data ptr for the @fn()
115 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
117 * Same as above. Must be called from with in a cpus_read_lock() protected
118 * region. Avoids nested calls to cpus_read_lock().
120 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
122 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
123 const struct cpumask *cpus);
124 #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
126 static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
127 const struct cpumask *cpus)
131 local_irq_save(flags);
133 local_irq_restore(flags);
137 static inline int stop_machine(cpu_stop_fn_t fn, void *data,
138 const struct cpumask *cpus)
140 return stop_machine_cpuslocked(fn, data, cpus);
143 static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
144 const struct cpumask *cpus)
146 return stop_machine(fn, data, cpus);
149 #endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
150 #endif /* _LINUX_STOP_MACHINE */