rcu/nocb: Code-style nits in callback-offloading toggling
authorPaul E. McKenney <paulmck@kernel.org>
Mon, 21 Dec 2020 19:17:16 +0000 (11:17 -0800)
committerPaul E. McKenney <paulmck@kernel.org>
Thu, 7 Jan 2021 00:47:55 +0000 (16:47 -0800)
This commit addresses a few code-style nits in callback-offloading
toggling, including one that predates this toggling.

Cc: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/rcu_segcblist.h
kernel/rcu/rcutorture.c
kernel/rcu/tree_plugin.h
kernel/time/timer.c

index 3110602..9a19328 100644 (file)
@@ -80,17 +80,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
        return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
 }
 
-/* Is the specified rcu_segcblist offloaded */
+/* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */
 static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
 {
-       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) {
-               /*
-                * Complete de-offloading happens only when SEGCBLIST_SOFTIRQ_ONLY
-                * is set.
-                */
-               if (!rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
-                       return true;
-       }
+       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
+           !rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
+               return true;
 
        return false;
 }
@@ -99,10 +94,8 @@ static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rscl
 {
        int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED;
 
-       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) {
-               if ((rsclp->flags & flags) == flags)
-                       return true;
-       }
+       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags)
+               return true;
 
        return false;
 }
index 22735bc..b9dd63c 100644 (file)
@@ -1606,7 +1606,7 @@ rcu_torture_stats_print(void)
                data_race(n_barrier_successes),
                data_race(n_barrier_attempts),
                data_race(n_rcu_torture_barrier_error));
-       pr_cont("read-exits: %ld ", data_race(n_read_exits));
+       pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
        pr_cont("nocb-toggles: %ld:%ld\n",
                atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
 
index bc63a6b..6f56f9e 100644 (file)
@@ -1962,17 +1962,17 @@ static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_sta
                                *needwake_state = true;
                }
                return true;
-       } else {
-               /*
-                * De-offloading. Clear our flag and notify the de-offload worker.
-                * We will ignore this rdp until it ever gets re-offloaded.
-                */
-               WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
-               rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
-                       *needwake_state = true;
-               return false;
        }
+
+       /*
+        * De-offloading. Clear our flag and notify the de-offload worker.
+        * We will ignore this rdp until it ever gets re-offloaded.
+        */
+       WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+       rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
+       if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+               *needwake_state = true;
+       return false;
 }
 
 
@@ -2005,6 +2005,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
        WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
                bool needwake_state = false;
+
                if (!nocb_gp_enabled_cb(rdp))
                        continue;
                trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
@@ -2160,11 +2161,11 @@ static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
 static void nocb_cb_wait(struct rcu_data *rdp)
 {
        struct rcu_segcblist *cblist = &rdp->cblist;
-       struct rcu_node *rnp = rdp->mynode;
-       bool needwake_state = false;
-       bool needwake_gp = false;
        unsigned long cur_gp_seq;
        unsigned long flags;
+       bool needwake_state = false;
+       bool needwake_gp = false;
+       struct rcu_node *rnp = rdp->mynode;
 
        local_irq_save(flags);
        rcu_momentary_dyntick_idle();
@@ -2217,8 +2218,8 @@ static void nocb_cb_wait(struct rcu_data *rdp)
                swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
                                                    nocb_cb_wait_cond(rdp));
 
-               /* ^^^ Ensure CB invocation follows _sleep test. */
-               if (smp_load_acquire(&rdp->nocb_cb_sleep)) {
+               // VVV Ensure CB invocation follows _sleep test.
+               if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
                        WARN_ON(signal_pending(current));
                        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
                }
@@ -2323,7 +2324,7 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
        unsigned long flags;
        int ret;
 
-       printk("De-offloading %d\n", rdp->cpu);
+       pr_info("De-offloading %d\n", rdp->cpu);
 
        rcu_nocb_lock_irqsave(rdp, flags);
        /*
@@ -2384,11 +2385,10 @@ int rcu_nocb_cpu_deoffload(int cpu)
        mutex_lock(&rcu_state.barrier_mutex);
        cpus_read_lock();
        if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
-               if (cpu_online(cpu)) {
+               if (cpu_online(cpu))
                        ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
-               } else {
+               else
                        ret = __rcu_nocb_rdp_deoffload(rdp);
-               }
                if (!ret)
                        cpumask_clear_cpu(cpu, rcu_nocb_mask);
        }
@@ -2412,7 +2412,7 @@ static int __rcu_nocb_rdp_offload(struct rcu_data *rdp)
        if (!rdp->nocb_gp_rdp)
                return -EINVAL;
 
-       printk("Offloading %d\n", rdp->cpu);
+       pr_info("Offloading %d\n", rdp->cpu);
        /*
         * Can't use rcu_nocb_lock_irqsave() while we are in
         * SEGCBLIST_SOFTIRQ_ONLY mode.
@@ -2460,11 +2460,10 @@ int rcu_nocb_cpu_offload(int cpu)
        mutex_lock(&rcu_state.barrier_mutex);
        cpus_read_lock();
        if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
-               if (cpu_online(cpu)) {
+               if (cpu_online(cpu))
                        ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
-               } else {
+               else
                        ret = __rcu_nocb_rdp_offload(rdp);
-               }
                if (!ret)
                        cpumask_set_cpu(cpu, rcu_nocb_mask);
        }
index f9b2096..f475f1a 100644 (file)
@@ -1243,6 +1243,7 @@ bool timer_curr_running(struct timer_list *timer)
 
        for (i = 0; i < NR_BASES; i++) {
                struct timer_base *base = this_cpu_ptr(&timer_bases[i]);
+
                if (base->running_timer == timer)
                        return true;
        }