2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
13 #include <linux/cpu.h>
15 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
18 struct list_head queue;
20 } call_function __cacheline_aligned_in_smp = {
21 .queue = LIST_HEAD_INIT(call_function.queue),
22 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
30 struct call_function_data {
31 struct call_single_data csd;
34 cpumask_var_t cpumask;
37 struct call_single_queue {
38 struct list_head list;
42 static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
43 .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
47 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
49 long cpu = (long)hcpu;
50 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
54 case CPU_UP_PREPARE_FROZEN:
55 if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
60 #ifdef CONFIG_CPU_HOTPLUG
62 case CPU_UP_CANCELED_FROZEN:
66 free_cpumask_var(cfd->cpumask);
74 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
75 .notifier_call = hotplug_cfd,
78 static int __cpuinit init_call_single_data(void)
80 void *cpu = (void *)(long)smp_processor_id();
83 for_each_possible_cpu(i) {
84 struct call_single_queue *q = &per_cpu(call_single_queue, i);
86 spin_lock_init(&q->lock);
87 INIT_LIST_HEAD(&q->list);
90 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
91 register_cpu_notifier(&hotplug_cfd_notifier);
95 early_initcall(init_call_single_data);
98 * csd_wait/csd_complete are used for synchronous ipi calls
100 static void csd_wait_prepare(struct call_single_data *data)
102 data->flags |= CSD_FLAG_WAIT;
105 static void csd_complete(struct call_single_data *data)
107 if (data->flags & CSD_FLAG_WAIT) {
109 * ensure we're all done before saying we are
112 data->flags &= ~CSD_FLAG_WAIT;
116 static void csd_wait(struct call_single_data *data)
118 while (data->flags & CSD_FLAG_WAIT)
123 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
125 * For non-synchronous ipi calls the csd can still be in use by the previous
126 * function call. For multi-cpu calls its even more interesting as we'll have
127 * to ensure no other cpu is observing our csd.
129 static void csd_lock(struct call_single_data *data)
131 while (data->flags & CSD_FLAG_LOCK)
133 data->flags = CSD_FLAG_LOCK;
136 * prevent CPU from reordering the above assignment to ->flags
137 * with any subsequent assignments to other fields of the
138 * specified call_single_data structure.
144 static void csd_unlock(struct call_single_data *data)
146 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
148 * ensure we're all done before releasing data
151 data->flags &= ~CSD_FLAG_LOCK;
155 * Insert a previously allocated call_single_data element for execution
156 * on the given CPU. data must already have ->func, ->info, and ->flags set.
158 static void generic_exec_single(int cpu, struct call_single_data *data)
160 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
161 int wait = data->flags & CSD_FLAG_WAIT, ipi;
164 spin_lock_irqsave(&dst->lock, flags);
165 ipi = list_empty(&dst->list);
166 list_add_tail(&data->list, &dst->list);
167 spin_unlock_irqrestore(&dst->lock, flags);
170 * The list addition should be visible before sending the IPI
171 * handler locks the list to pull the entry off it because of
172 * normal cache coherency rules implied by spinlocks.
174 * If IPIs can go out of order to the cache coherency protocol
175 * in an architecture, sufficient synchronisation should be added
176 * to arch code to make it appear to obey cache coherency WRT
177 * locking and barrier primitives. Generic code isn't really equipped
178 * to do the right thing...
182 arch_send_call_function_single_ipi(cpu);
189 * Invoked by arch to handle an IPI for call function. Must be called with
190 * interrupts disabled.
192 void generic_smp_call_function_interrupt(void)
194 struct call_function_data *data;
198 * Ensure entry is visible on call_function_queue after we have
199 * entered the IPI. See comment in smp_call_function_many.
200 * If we don't have this, then we may miss an entry on the list
201 * and never get another IPI to process it.
206 * It's ok to use list_for_each_rcu() here even though we may delete
207 * 'pos', since list_del_rcu() doesn't clear ->next
209 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
212 spin_lock(&data->lock);
213 if (!cpumask_test_cpu(cpu, data->cpumask)) {
214 spin_unlock(&data->lock);
217 cpumask_clear_cpu(cpu, data->cpumask);
218 spin_unlock(&data->lock);
220 data->csd.func(data->csd.info);
222 spin_lock(&data->lock);
223 WARN_ON(data->refs == 0);
226 spin_lock(&call_function.lock);
227 list_del_rcu(&data->csd.list);
228 spin_unlock(&call_function.lock);
230 spin_unlock(&data->lock);
235 csd_complete(&data->csd);
236 csd_unlock(&data->csd);
243 * Invoked by arch to handle an IPI for call function single. Must be called
244 * from the arch with interrupts disabled.
246 void generic_smp_call_function_single_interrupt(void)
248 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
250 unsigned int data_flags;
253 list_replace_init(&q->list, &list);
254 spin_unlock(&q->lock);
256 while (!list_empty(&list)) {
257 struct call_single_data *data;
259 data = list_entry(list.next, struct call_single_data,
261 list_del(&data->list);
264 * 'data' can be invalid after this call if
265 * flags == 0 (when called through
266 * generic_exec_single(), so save them away before
269 data_flags = data->flags;
271 data->func(data->info);
273 if (data_flags & CSD_FLAG_WAIT)
277 * Unlocked CSDs are valid through generic_exec_single()
279 if (data_flags & CSD_FLAG_LOCK)
284 static DEFINE_PER_CPU(struct call_single_data, csd_data);
287 * smp_call_function_single - Run a function on a specific CPU
288 * @func: The function to run. This must be fast and non-blocking.
289 * @info: An arbitrary pointer to pass to the function.
290 * @wait: If true, wait until function has completed on other CPUs.
292 * Returns 0 on success, else a negative status code. Note that @wait
293 * will be implicitly turned on in case of allocation failures, since
294 * we fall back to on-stack allocation.
296 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
299 struct call_single_data d = {
303 /* prevent preemption and reschedule on another processor,
304 as well as CPU removal */
308 /* Can deadlock when called with interrupts disabled */
309 WARN_ON(irqs_disabled());
312 local_irq_save(flags);
314 local_irq_restore(flags);
315 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
316 struct call_single_data *data;
320 * We are calling a function on a single CPU
321 * and we are not going to wait for it to finish.
322 * We use a per cpu data to pass the information to
323 * that CPU. Since all callers of this code will
324 * use the same data, we must synchronize the
325 * callers to prevent a new caller from corrupting
326 * the data before the callee can access it.
328 * The CSD_FLAG_LOCK is used to let us know when
329 * the IPI handler is done with the data.
330 * The first caller will set it, and the callee
331 * will clear it. The next caller must wait for
332 * it to clear before we set it again. This
333 * will make sure the callee is done with the
334 * data before a new caller will use it.
336 data = &__get_cpu_var(csd_data);
340 csd_wait_prepare(data);
345 generic_exec_single(cpu, data);
347 err = -ENXIO; /* CPU not online */
353 EXPORT_SYMBOL(smp_call_function_single);
356 * __smp_call_function_single(): Run a function on another CPU
357 * @cpu: The CPU to run on.
358 * @data: Pre-allocated and setup data structure
360 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
361 * data structure. Useful for embedding @data inside other structures, for
365 void __smp_call_function_single(int cpu, struct call_single_data *data)
367 /* Can deadlock when called with interrupts disabled */
368 WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
370 generic_exec_single(cpu, data);
373 /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
374 #ifndef arch_send_call_function_ipi_mask
375 #define arch_send_call_function_ipi_mask(maskp) \
376 arch_send_call_function_ipi(*(maskp))
380 * smp_call_function_many(): Run a function on a set of other CPUs.
381 * @mask: The set of cpus to run on (only runs on online subset).
382 * @func: The function to run. This must be fast and non-blocking.
383 * @info: An arbitrary pointer to pass to the function.
384 * @wait: If true, wait (atomically) until function has completed on other CPUs.
386 * If @wait is true, then returns once @func has returned. Note that @wait
387 * will be implicitly turned on in case of allocation failures, since
388 * we fall back to on-stack allocation.
390 * You must not call this function with disabled interrupts or from a
391 * hardware interrupt handler or from a bottom half handler. Preemption
392 * must be disabled when calling this function.
394 void smp_call_function_many(const struct cpumask *mask,
395 void (*func)(void *), void *info,
398 struct call_function_data *data;
400 int cpu, next_cpu, me = smp_processor_id();
402 /* Can deadlock when called with interrupts disabled */
403 WARN_ON(irqs_disabled());
405 /* So, what's a CPU they want? Ignoring this one. */
406 cpu = cpumask_first_and(mask, cpu_online_mask);
408 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
409 /* No online cpus? We're done. */
410 if (cpu >= nr_cpu_ids)
413 /* Do we have another CPU which isn't us? */
414 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
416 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
418 /* Fastpath: do that cpu by itself. */
419 if (next_cpu >= nr_cpu_ids) {
420 smp_call_function_single(cpu, func, info, wait);
424 data = &__get_cpu_var(cfd_data);
425 csd_lock(&data->csd);
427 spin_lock_irqsave(&data->lock, flags);
429 csd_wait_prepare(&data->csd);
431 data->csd.func = func;
432 data->csd.info = info;
433 cpumask_and(data->cpumask, mask, cpu_online_mask);
434 cpumask_clear_cpu(me, data->cpumask);
435 data->refs = cpumask_weight(data->cpumask);
437 spin_lock(&call_function.lock);
439 * Place entry at the _HEAD_ of the list, so that any cpu still
440 * observing the entry in generic_smp_call_function_interrupt() will
441 * not miss any other list entries.
443 list_add_rcu(&data->csd.list, &call_function.queue);
444 spin_unlock(&call_function.lock);
445 spin_unlock_irqrestore(&data->lock, flags);
448 * Make the list addition visible before sending the ipi.
449 * (IPIs must obey or appear to obey normal Linux cache coherency
450 * rules -- see comment in generic_exec_single).
454 /* Send a message to all CPUs in the map */
455 arch_send_call_function_ipi_mask(data->cpumask);
457 /* optionally wait for the CPUs to complete */
459 csd_wait(&data->csd);
461 EXPORT_SYMBOL(smp_call_function_many);
464 * smp_call_function(): Run a function on all other CPUs.
465 * @func: The function to run. This must be fast and non-blocking.
466 * @info: An arbitrary pointer to pass to the function.
467 * @wait: If true, wait (atomically) until function has completed on other CPUs.
471 * If @wait is true, then returns once @func has returned; otherwise
472 * it returns just before the target cpu calls @func. In case of allocation
473 * failure, @wait will be implicitly turned on.
475 * You must not call this function with disabled interrupts or from a
476 * hardware interrupt handler or from a bottom half handler.
478 int smp_call_function(void (*func)(void *), void *info, int wait)
481 smp_call_function_many(cpu_online_mask, func, info, wait);
485 EXPORT_SYMBOL(smp_call_function);
487 void ipi_call_lock(void)
489 spin_lock(&call_function.lock);
492 void ipi_call_unlock(void)
494 spin_unlock(&call_function.lock);
497 void ipi_call_lock_irq(void)
499 spin_lock_irq(&call_function.lock);
502 void ipi_call_unlock_irq(void)
504 spin_unlock_irq(&call_function.lock);