Merge branches 'clk-range', 'clk-uniphier', 'clk-apple' and 'clk-qcom' into clk-next
[linux-2.6-microblaze.git] / kernel / rcu / tasks.h
index 84f1d91..d64f0b1 100644 (file)
@@ -123,7 +123,7 @@ static struct rcu_tasks rt_name =                                                   \
        .call_func = call,                                                              \
        .rtpcpu = &rt_name ## __percpu,                                                 \
        .name = n,                                                                      \
-       .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS),                                  \
+       .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS) + 1,                              \
        .percpu_enqueue_lim = 1,                                                        \
        .percpu_dequeue_lim = 1,                                                        \
        .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex),                \
@@ -216,6 +216,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
        int cpu;
        unsigned long flags;
        int lim;
+       int shift;
 
        raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
        if (rcu_task_enqueue_lim < 0) {
@@ -229,7 +230,10 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
 
        if (lim > nr_cpu_ids)
                lim = nr_cpu_ids;
-       WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim));
+       shift = ilog2(nr_cpu_ids / lim);
+       if (((nr_cpu_ids - 1) >> shift) >= lim)
+               shift++;
+       WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
        WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
        smp_store_release(&rtp->percpu_enqueue_lim, lim);
        for_each_possible_cpu(cpu) {
@@ -298,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
        if (unlikely(needadjust)) {
                raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
                if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
-                       WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
+                       WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
                        WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
                        smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
                        pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
@@ -413,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
        if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
                raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
                if (rtp->percpu_enqueue_lim > 1) {
-                       WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
+                       WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
                        smp_store_release(&rtp->percpu_enqueue_lim, 1);
                        rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
                        pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);