Merge tag 'for-linus-5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw...
[linux-2.6-microblaze.git] / kernel / smp.c
index aeb0adf..e210749 100644 (file)
 #include <linux/sched/clock.h>
 #include <linux/nmi.h>
 #include <linux/sched/debug.h>
+#include <linux/jump_label.h>
 
 #include "smpboot.h"
 #include "sched/smp.h"
 
 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
 
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+union cfd_seq_cnt {
+       u64             val;
+       struct {
+               u64     src:16;
+               u64     dst:16;
+#define CFD_SEQ_NOCPU  0xffff
+               u64     type:4;
+#define CFD_SEQ_QUEUE  0
+#define CFD_SEQ_IPI    1
+#define CFD_SEQ_NOIPI  2
+#define CFD_SEQ_PING   3
+#define CFD_SEQ_PINGED 4
+#define CFD_SEQ_HANDLE 5
+#define CFD_SEQ_DEQUEUE        6
+#define CFD_SEQ_IDLE   7
+#define CFD_SEQ_GOTIPI 8
+#define CFD_SEQ_HDLEND 9
+               u64     cnt:28;
+       }               u;
+};
+
+static char *seq_type[] = {
+       [CFD_SEQ_QUEUE]         = "queue",
+       [CFD_SEQ_IPI]           = "ipi",
+       [CFD_SEQ_NOIPI]         = "noipi",
+       [CFD_SEQ_PING]          = "ping",
+       [CFD_SEQ_PINGED]        = "pinged",
+       [CFD_SEQ_HANDLE]        = "handle",
+       [CFD_SEQ_DEQUEUE]       = "dequeue (src CPU 0 == empty)",
+       [CFD_SEQ_IDLE]          = "idle",
+       [CFD_SEQ_GOTIPI]        = "gotipi",
+       [CFD_SEQ_HDLEND]        = "hdlend (src CPU 0 == early)",
+};
+
+struct cfd_seq_local {
+       u64     ping;
+       u64     pinged;
+       u64     handle;
+       u64     dequeue;
+       u64     idle;
+       u64     gotipi;
+       u64     hdlend;
+};
+#endif
+
+struct cfd_percpu {
+       call_single_data_t      csd;
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+       u64     seq_queue;
+       u64     seq_ipi;
+       u64     seq_noipi;
+#endif
+};
+
 struct call_function_data {
-       call_single_data_t      __percpu *csd;
+       struct cfd_percpu       __percpu *pcpu;
        cpumask_var_t           cpumask;
        cpumask_var_t           cpumask_ipi;
 };
@@ -54,8 +110,8 @@ int smpcfd_prepare_cpu(unsigned int cpu)
                free_cpumask_var(cfd->cpumask);
                return -ENOMEM;
        }
-       cfd->csd = alloc_percpu(call_single_data_t);
-       if (!cfd->csd) {
+       cfd->pcpu = alloc_percpu(struct cfd_percpu);
+       if (!cfd->pcpu) {
                free_cpumask_var(cfd->cpumask);
                free_cpumask_var(cfd->cpumask_ipi);
                return -ENOMEM;
@@ -70,7 +126,7 @@ int smpcfd_dead_cpu(unsigned int cpu)
 
        free_cpumask_var(cfd->cpumask);
        free_cpumask_var(cfd->cpumask_ipi);
-       free_percpu(cfd->csd);
+       free_percpu(cfd->pcpu);
        return 0;
 }
 
@@ -102,15 +158,60 @@ void __init call_function_init(void)
 
 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
 
+static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
+static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
+
+static int __init csdlock_debug(char *str)
+{
+       unsigned int val = 0;
+
+       if (str && !strcmp(str, "ext")) {
+               val = 1;
+               static_branch_enable(&csdlock_debug_extended);
+       } else
+               get_option(&str, &val);
+
+       if (val)
+               static_branch_enable(&csdlock_debug_enabled);
+
+       return 0;
+}
+early_param("csdlock_debug", csdlock_debug);
+
 static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
 static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
 static DEFINE_PER_CPU(void *, cur_csd_info);
+static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
 
 #define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
 static atomic_t csd_bug_count = ATOMIC_INIT(0);
+static u64 cfd_seq;
+
+#define CFD_SEQ(s, d, t, c)    \
+       (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
+
+static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
+{
+       union cfd_seq_cnt new, old;
+
+       new = CFD_SEQ(src, dst, type, 0);
+
+       do {
+               old.val = READ_ONCE(cfd_seq);
+               new.u.cnt = old.u.cnt + 1;
+       } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
+
+       return old.val;
+}
+
+#define cfd_seq_store(var, src, dst, type)                             \
+       do {                                                            \
+               if (static_branch_unlikely(&csdlock_debug_extended))    \
+                       var = cfd_seq_inc(src, dst, type);              \
+       } while (0)
 
 /* Record current CSD work for current CPU, NULL to erase. */
-static void csd_lock_record(call_single_data_t *csd)
+static void __csd_lock_record(call_single_data_t *csd)
 {
        if (!csd) {
                smp_mb(); /* NULL cur_csd after unlock. */
@@ -125,7 +226,13 @@ static void csd_lock_record(call_single_data_t *csd)
                  /* Or before unlock, as the case may be. */
 }
 
-static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
+static __always_inline void csd_lock_record(call_single_data_t *csd)
+{
+       if (static_branch_unlikely(&csdlock_debug_enabled))
+               __csd_lock_record(csd);
+}
+
+static int csd_lock_wait_getcpu(call_single_data_t *csd)
 {
        unsigned int csd_type;
 
@@ -135,12 +242,86 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
        return -1;
 }
 
+static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
+                            unsigned int type, union cfd_seq_cnt *data,
+                            unsigned int *n_data, unsigned int now)
+{
+       union cfd_seq_cnt new[2];
+       unsigned int i, j, k;
+
+       new[0].val = val;
+       new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
+
+       for (i = 0; i < 2; i++) {
+               if (new[i].u.cnt <= now)
+                       new[i].u.cnt |= 0x80000000U;
+               for (j = 0; j < *n_data; j++) {
+                       if (new[i].u.cnt == data[j].u.cnt) {
+                               /* Direct read value trumps generated one. */
+                               if (i == 0)
+                                       data[j].val = new[i].val;
+                               break;
+                       }
+                       if (new[i].u.cnt < data[j].u.cnt) {
+                               for (k = *n_data; k > j; k--)
+                                       data[k].val = data[k - 1].val;
+                               data[j].val = new[i].val;
+                               (*n_data)++;
+                               break;
+                       }
+               }
+               if (j == *n_data) {
+                       data[j].val = new[i].val;
+                       (*n_data)++;
+               }
+       }
+}
+
+static const char *csd_lock_get_type(unsigned int type)
+{
+       return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
+}
+
+static void csd_lock_print_extended(call_single_data_t *csd, int cpu)
+{
+       struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
+       unsigned int srccpu = csd->node.src;
+       struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
+       struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
+       unsigned int now;
+       union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
+       unsigned int n_data = 0, i;
+
+       data[0].val = READ_ONCE(cfd_seq);
+       now = data[0].u.cnt;
+
+       cfd_seq_data_add(pcpu->seq_queue,                       srccpu, cpu,           CFD_SEQ_QUEUE,  data, &n_data, now);
+       cfd_seq_data_add(pcpu->seq_ipi,                         srccpu, cpu,           CFD_SEQ_IPI,    data, &n_data, now);
+       cfd_seq_data_add(pcpu->seq_noipi,                       srccpu, cpu,           CFD_SEQ_NOIPI,  data, &n_data, now);
+
+       cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu),   srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING,   data, &n_data, now);
+       cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
+
+       cfd_seq_data_add(seq->idle,    CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE,    data, &n_data, now);
+       cfd_seq_data_add(seq->gotipi,  CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI,  data, &n_data, now);
+       cfd_seq_data_add(seq->handle,  CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE,  data, &n_data, now);
+       cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
+       cfd_seq_data_add(seq->hdlend,  CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND,  data, &n_data, now);
+
+       for (i = 0; i < n_data; i++) {
+               pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
+                        data[i].u.cnt & ~0x80000000U, data[i].u.src,
+                        data[i].u.dst, csd_lock_get_type(data[i].u.type));
+       }
+       pr_alert("\tcsd: cnt now: %07x\n", now);
+}
+
 /*
  * Complain if too much time spent waiting.  Note that only
  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
  * so waiting on other types gets much less information.
  */
-static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
+static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
 {
        int cpu = -1;
        int cpux;
@@ -184,6 +365,8 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
                         *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
        }
        if (cpu >= 0) {
+               if (static_branch_unlikely(&csdlock_debug_extended))
+                       csd_lock_print_extended(csd, cpu);
                if (!trigger_single_cpu_backtrace(cpu))
                        dump_cpu_task(cpu);
                if (!cpu_cur_csd) {
@@ -204,7 +387,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
  * previous function call. For multi-cpu calls its even more interesting
  * as we'll have to ensure no other cpu is observing our csd.
  */
-static __always_inline void csd_lock_wait(call_single_data_t *csd)
+static void __csd_lock_wait(call_single_data_t *csd)
 {
        int bug_id = 0;
        u64 ts0, ts1;
@@ -218,7 +401,36 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
        smp_acquire__after_ctrl_dep();
 }
 
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
+{
+       if (static_branch_unlikely(&csdlock_debug_enabled)) {
+               __csd_lock_wait(csd);
+               return;
+       }
+
+       smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
+}
+
+static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
+{
+       unsigned int this_cpu = smp_processor_id();
+       struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
+       struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
+       struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
+
+       cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
+       if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
+               cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
+               cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
+               send_call_function_single_ipi(cpu);
+               cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
+       } else {
+               cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
+       }
+}
 #else
+#define cfd_seq_store(var, src, dst, type)
+
 static void csd_lock_record(call_single_data_t *csd)
 {
 }
@@ -256,6 +468,19 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
 
 void __smp_call_single_queue(int cpu, struct llist_node *node)
 {
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+       if (static_branch_unlikely(&csdlock_debug_extended)) {
+               unsigned int type;
+
+               type = CSD_TYPE(container_of(node, call_single_data_t,
+                                            node.llist));
+               if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
+                       __smp_call_single_queue_debug(cpu, node);
+                       return;
+               }
+       }
+#endif
+
        /*
         * The list addition should be visible before sending the IPI
         * handler locks the list to pull the entry off it because of
@@ -314,6 +539,8 @@ static int generic_exec_single(int cpu, call_single_data_t *csd)
  */
 void generic_smp_call_function_single_interrupt(void)
 {
+       cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
+                     smp_processor_id(), CFD_SEQ_GOTIPI);
        flush_smp_call_function_queue(true);
 }
 
@@ -341,7 +568,13 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
        lockdep_assert_irqs_disabled();
 
        head = this_cpu_ptr(&call_single_queue);
+       cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
+                     smp_processor_id(), CFD_SEQ_HANDLE);
        entry = llist_del_all(head);
+       cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
+                     /* Special meaning of source cpu: 0 == queue empty */
+                     entry ? CFD_SEQ_NOCPU : 0,
+                     smp_processor_id(), CFD_SEQ_DEQUEUE);
        entry = llist_reverse_order(entry);
 
        /* There shouldn't be any pending callbacks on an offline CPU. */
@@ -400,8 +633,12 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
                }
        }
 
-       if (!entry)
+       if (!entry) {
+               cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
+                             0, smp_processor_id(),
+                             CFD_SEQ_HDLEND);
                return;
+       }
 
        /*
         * Second; run all !SYNC callbacks.
@@ -439,6 +676,9 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
         */
        if (entry)
                sched_ttwu_pending(entry);
+
+       cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
+                     smp_processor_id(), CFD_SEQ_HDLEND);
 }
 
 void flush_smp_call_function_from_idle(void)
@@ -448,6 +688,8 @@ void flush_smp_call_function_from_idle(void)
        if (llist_empty(this_cpu_ptr(&call_single_queue)))
                return;
 
+       cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
+                     smp_processor_id(), CFD_SEQ_IDLE);
        local_irq_save(flags);
        flush_smp_call_function_queue(true);
        if (local_softirq_pending())
@@ -608,12 +850,28 @@ call:
 }
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
+/*
+ * Flags to be used as scf_flags argument of smp_call_function_many_cond().
+ *
+ * %SCF_WAIT:          Wait until function execution is completed
+ * %SCF_RUN_LOCAL:     Run also locally if local cpu is set in cpumask
+ */
+#define SCF_WAIT       (1U << 0)
+#define SCF_RUN_LOCAL  (1U << 1)
+
 static void smp_call_function_many_cond(const struct cpumask *mask,
                                        smp_call_func_t func, void *info,
-                                       bool wait, smp_cond_func_t cond_func)
+                                       unsigned int scf_flags,
+                                       smp_cond_func_t cond_func)
 {
+       int cpu, last_cpu, this_cpu = smp_processor_id();
        struct call_function_data *cfd;
-       int cpu, next_cpu, this_cpu = smp_processor_id();
+       bool wait = scf_flags & SCF_WAIT;
+       bool run_remote = false;
+       bool run_local = false;
+       int nr_cpus = 0;
+
+       lockdep_assert_preemption_disabled();
 
        /*
         * Can deadlock when called with interrupts disabled.
@@ -621,8 +879,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
         * send smp call function interrupt to this cpu and as such deadlocks
         * can't happen.
         */
-       WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
-                    && !oops_in_progress && !early_boot_irqs_disabled);
+       if (cpu_online(this_cpu) && !oops_in_progress &&
+           !early_boot_irqs_disabled)
+               lockdep_assert_irqs_enabled();
 
        /*
         * When @wait we can deadlock when we interrupt between llist_add() and
@@ -632,76 +891,93 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
         */
        WARN_ON_ONCE(!in_task());
 
-       /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
+       /* Check if we need local execution. */
+       if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
+               run_local = true;
+
+       /* Check if we need remote execution, i.e., any CPU excluding this one. */
        cpu = cpumask_first_and(mask, cpu_online_mask);
        if (cpu == this_cpu)
                cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
+       if (cpu < nr_cpu_ids)
+               run_remote = true;
 
-       /* No online cpus?  We're done. */
-       if (cpu >= nr_cpu_ids)
-               return;
-
-       /* Do we have another CPU which isn't us? */
-       next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
-       if (next_cpu == this_cpu)
-               next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
+       if (run_remote) {
+               cfd = this_cpu_ptr(&cfd_data);
+               cpumask_and(cfd->cpumask, mask, cpu_online_mask);
+               __cpumask_clear_cpu(this_cpu, cfd->cpumask);
 
-       /* Fastpath: do that cpu by itself. */
-       if (next_cpu >= nr_cpu_ids) {
-               if (!cond_func || cond_func(cpu, info))
-                       smp_call_function_single(cpu, func, info, wait);
-               return;
-       }
+               cpumask_clear(cfd->cpumask_ipi);
+               for_each_cpu(cpu, cfd->cpumask) {
+                       struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
+                       call_single_data_t *csd = &pcpu->csd;
 
-       cfd = this_cpu_ptr(&cfd_data);
+                       if (cond_func && !cond_func(cpu, info))
+                               continue;
 
-       cpumask_and(cfd->cpumask, mask, cpu_online_mask);
-       __cpumask_clear_cpu(this_cpu, cfd->cpumask);
+                       csd_lock(csd);
+                       if (wait)
+                               csd->node.u_flags |= CSD_TYPE_SYNC;
+                       csd->func = func;
+                       csd->info = info;
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+                       csd->node.src = smp_processor_id();
+                       csd->node.dst = cpu;
+#endif
+                       cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
+                       if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
+                               __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
+                               nr_cpus++;
+                               last_cpu = cpu;
 
-       /* Some callers race with other cpus changing the passed mask */
-       if (unlikely(!cpumask_weight(cfd->cpumask)))
-               return;
+                               cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
+                       } else {
+                               cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
+                       }
+               }
 
-       cpumask_clear(cfd->cpumask_ipi);
-       for_each_cpu(cpu, cfd->cpumask) {
-               call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
+               cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
 
-               if (cond_func && !cond_func(cpu, info))
-                       continue;
+               /*
+                * Choose the most efficient way to send an IPI. Note that the
+                * number of CPUs might be zero due to concurrent changes to the
+                * provided mask.
+                */
+               if (nr_cpus == 1)
+                       send_call_function_single_ipi(last_cpu);
+               else if (likely(nr_cpus > 1))
+                       arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
 
-               csd_lock(csd);
-               if (wait)
-                       csd->node.u_flags |= CSD_TYPE_SYNC;
-               csd->func = func;
-               csd->info = info;
-#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
-               csd->node.src = smp_processor_id();
-               csd->node.dst = cpu;
-#endif
-               if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu)))
-                       __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
+               cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
        }
 
-       /* Send a message to all CPUs in the map */
-       arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
+       if (run_local && (!cond_func || cond_func(this_cpu, info))) {
+               unsigned long flags;
 
-       if (wait) {
+               local_irq_save(flags);
+               func(info);
+               local_irq_restore(flags);
+       }
+
+       if (run_remote && wait) {
                for_each_cpu(cpu, cfd->cpumask) {
                        call_single_data_t *csd;
 
-                       csd = per_cpu_ptr(cfd->csd, cpu);
+                       csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
                        csd_lock_wait(csd);
                }
        }
 }
 
 /**
- * smp_call_function_many(): Run a function on a set of other CPUs.
+ * smp_call_function_many(): Run a function on a set of CPUs.
  * @mask: The set of cpus to run on (only runs on online subset).
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
+ * @flags: Bitmask that controls the operation. If %SCF_WAIT is set, wait
+ *        (atomically) until function has completed on other CPUs. If
+ *        %SCF_RUN_LOCAL is set, the function will also be run locally
+ *        if the local CPU is set in the @cpumask.
  *
  * If @wait is true, then returns once @func has returned.
  *
@@ -712,7 +988,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
 void smp_call_function_many(const struct cpumask *mask,
                            smp_call_func_t func, void *info, bool wait)
 {
-       smp_call_function_many_cond(mask, func, info, wait, NULL);
+       smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
 }
 EXPORT_SYMBOL(smp_call_function_many);
 
@@ -823,56 +1099,6 @@ void __init smp_init(void)
        smp_cpus_done(setup_max_cpus);
 }
 
-/*
- * Call a function on all processors.  May be used during early boot while
- * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
- * of local_irq_disable/enable().
- */
-void on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
-       unsigned long flags;
-
-       preempt_disable();
-       smp_call_function(func, info, wait);
-       local_irq_save(flags);
-       func(info);
-       local_irq_restore(flags);
-       preempt_enable();
-}
-EXPORT_SYMBOL(on_each_cpu);
-
-/**
- * on_each_cpu_mask(): Run a function on processors specified by
- * cpumask, which may include the local processor.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.  The
- * exception is that it may be used during early boot while
- * early_boot_irqs_disabled is set.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-                       void *info, bool wait)
-{
-       int cpu = get_cpu();
-
-       smp_call_function_many(mask, func, info, wait);
-       if (cpumask_test_cpu(cpu, mask)) {
-               unsigned long flags;
-               local_irq_save(flags);
-               func(info);
-               local_irq_restore(flags);
-       }
-       put_cpu();
-}
-EXPORT_SYMBOL(on_each_cpu_mask);
-
 /*
  * on_each_cpu_cond(): Call a function on each processor for which
  * the supplied function cond_func returns true, optionally waiting
@@ -898,27 +1124,17 @@ EXPORT_SYMBOL(on_each_cpu_mask);
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
                           void *info, bool wait, const struct cpumask *mask)
 {
-       int cpu = get_cpu();
+       unsigned int scf_flags = SCF_RUN_LOCAL;
 
-       smp_call_function_many_cond(mask, func, info, wait, cond_func);
-       if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
-               unsigned long flags;
+       if (wait)
+               scf_flags |= SCF_WAIT;
 
-               local_irq_save(flags);
-               func(info);
-               local_irq_restore(flags);
-       }
-       put_cpu();
+       preempt_disable();
+       smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
+       preempt_enable();
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-                     void *info, bool wait)
-{
-       on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
-}
-EXPORT_SYMBOL(on_each_cpu_cond);
-
 static void do_nothing(void *unused)
 {
 }