1 /* SPDX-License-Identifier: GPL-2.0 */
7 * Alan Cox. <alan@redhat.com>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/smp_types.h>
17 typedef void (*smp_call_func_t)(void *info);
18 typedef bool (*smp_cond_func_t)(int cpu, void *info);
21 * structure shares (partial) layout with struct irq_work
23 struct __call_single_data {
24 struct __call_single_node node;
29 #define CSD_INIT(_func, _info) \
30 (struct __call_single_data){ .func = (_func), .info = (_info), }
32 /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
33 typedef struct __call_single_data call_single_data_t
34 __aligned(sizeof(struct __call_single_data));
36 #define INIT_CSD(_csd, _func, _info) \
38 *(_csd) = CSD_INIT((_func), (_info)); \
42 * Enqueue a llist_node on the call_single_queue; be very careful, read
43 * flush_smp_call_function_queue() in detail.
45 extern void __smp_call_single_queue(int cpu, struct llist_node *node);
47 /* total number of cpus in this system (may exceed NR_CPUS) */
48 extern unsigned int total_cpus;
50 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
53 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
54 void *info, bool wait, const struct cpumask *mask);
56 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
59 * Call a function on all processors
61 static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
63 on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
67 * on_each_cpu_mask(): Run a function on processors specified by
68 * cpumask, which may include the local processor.
69 * @mask: The set of cpus to run on (only runs on online subset).
70 * @func: The function to run. This must be fast and non-blocking.
71 * @info: An arbitrary pointer to pass to the function.
72 * @wait: If true, wait (atomically) until function has completed
75 * If @wait is true, then returns once @func has returned.
77 * You must not call this function with disabled interrupts or from a
78 * hardware interrupt handler or from a bottom half handler. The
79 * exception is that it may be used during early boot while
80 * early_boot_irqs_disabled is set.
82 static inline void on_each_cpu_mask(const struct cpumask *mask,
83 smp_call_func_t func, void *info, bool wait)
85 on_each_cpu_cond_mask(NULL, func, info, wait, mask);
89 * Call a function on each processor for which the supplied function
90 * cond_func returns a positive value. This may include the local
91 * processor. May be used during early boot while early_boot_irqs_disabled is
92 * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
94 static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
95 smp_call_func_t func, void *info, bool wait)
97 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
102 #include <linux/preempt.h>
103 #include <linux/kernel.h>
104 #include <linux/compiler.h>
105 #include <linux/thread_info.h>
109 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
110 * (defined in asm header):
114 * stops all CPUs but the current one:
116 extern void smp_send_stop(void);
119 * sends a 'reschedule' event to another CPU:
121 extern void smp_send_reschedule(int cpu);
125 * Prepare machine for booting other CPUs.
127 extern void smp_prepare_cpus(unsigned int max_cpus);
132 extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
135 * Final polishing of CPUs
137 extern void smp_cpus_done(unsigned int max_cpus);
140 * Call a function on all other processors
142 void smp_call_function(smp_call_func_t func, void *info, int wait);
143 void smp_call_function_many(const struct cpumask *mask,
144 smp_call_func_t func, void *info, bool wait);
146 int smp_call_function_any(const struct cpumask *mask,
147 smp_call_func_t func, void *info, int wait);
149 void kick_all_cpus_sync(void);
150 void wake_up_all_idle_cpus(void);
153 * Generic and arch helpers
155 void __init call_function_init(void);
156 void generic_smp_call_function_single_interrupt(void);
157 #define generic_smp_call_function_interrupt \
158 generic_smp_call_function_single_interrupt
161 * Mark the boot cpu "online" so that it can call console drivers in
162 * printk() and can access its per-cpu storage.
164 void smp_prepare_boot_cpu(void);
166 extern unsigned int setup_max_cpus;
167 extern void __init setup_nr_cpu_ids(void);
168 extern void __init smp_init(void);
170 extern int __boot_cpu_id;
172 static inline int get_boot_cpu_id(void)
174 return __boot_cpu_id;
179 static inline void smp_send_stop(void) { }
182 * These macros fold the SMP functionality into a single CPU system
184 #define raw_smp_processor_id() 0
185 static inline void up_smp_call_function(smp_call_func_t func, void *info)
188 #define smp_call_function(func, info, wait) \
189 (up_smp_call_function(func, info))
191 static inline void smp_send_reschedule(int cpu) { }
192 #define smp_prepare_boot_cpu() do {} while (0)
193 #define smp_call_function_many(mask, func, info, wait) \
194 (up_smp_call_function(func, info))
195 static inline void call_function_init(void) { }
198 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
199 void *info, int wait)
201 return smp_call_function_single(0, func, info, wait);
204 static inline void kick_all_cpus_sync(void) { }
205 static inline void wake_up_all_idle_cpus(void) { }
207 #ifdef CONFIG_UP_LATE_INIT
208 extern void __init up_late_init(void);
209 static inline void smp_init(void) { up_late_init(); }
211 static inline void smp_init(void) { }
214 static inline int get_boot_cpu_id(void)
222 * raw_processor_id() - get the current (unstable) CPU id
224 * For then you know what you are doing and need an unstable
229 * smp_processor_id() - get the current (stable) CPU id
231 * This is the normal accessor to the CPU id and should be used
234 * The CPU id is stable when:
236 * - IRQs are disabled;
237 * - preemption is disabled;
238 * - the task is CPU affine.
240 * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
241 * when smp_processor_id() is used when the CPU id is not stable.
245 * Allow the architecture to differentiate between a stable and unstable read.
246 * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
247 * regular asm read for the stable.
249 #ifndef __smp_processor_id
250 #define __smp_processor_id(x) raw_smp_processor_id(x)
253 #ifdef CONFIG_DEBUG_PREEMPT
254 extern unsigned int debug_smp_processor_id(void);
255 # define smp_processor_id() debug_smp_processor_id()
257 # define smp_processor_id() __smp_processor_id()
260 #define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
261 #define put_cpu() preempt_enable()
264 * Callback to arch code if there's nosmp or maxcpus=0 on the
267 extern void arch_disable_smp_support(void);
269 extern void arch_thaw_secondary_cpus_begin(void);
270 extern void arch_thaw_secondary_cpus_end(void);
272 void smp_setup_processor_id(void);
274 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
277 /* SMP core functions */
278 int smpcfd_prepare_cpu(unsigned int cpu);
279 int smpcfd_dead_cpu(unsigned int cpu);
280 int smpcfd_dying_cpu(unsigned int cpu);
282 #endif /* __LINUX_SMP_H */