Merge branches 'bitmaprange.2021.05.10c', 'doc.2021.05.10c', 'fixes.2021.05.13a'...
[linux-2.6-microblaze.git] / kernel / rcu / tree_plugin.h
index ad0156b..334eaf4 100644 (file)
@@ -33,10 +33,6 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
        return false;
 }
 
-static inline bool rcu_running_nocb_timer(struct rcu_data *rdp)
-{
-       return (timer_curr_running(&rdp->nocb_timer) && !in_irq());
-}
 #else
 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
 {
@@ -48,11 +44,6 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
        return false;
 }
 
-static inline bool rcu_running_nocb_timer(struct rcu_data *rdp)
-{
-       return false;
-}
-
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
@@ -72,8 +63,7 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
                  rcu_lockdep_is_held_nocb(rdp) ||
                  (rdp == this_cpu_ptr(&rcu_data) &&
                   !(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) ||
-                 rcu_current_is_nocb_kthread(rdp) ||
-                 rcu_running_nocb_timer(rdp)),
+                 rcu_current_is_nocb_kthread(rdp)),
                "Unsafe read of RCU_NOCB offloaded state"
        );
 
@@ -1098,6 +1088,7 @@ static int rcu_boost(struct rcu_node *rnp)
        /* Lock only for side effect: boosts task t's priority. */
        rt_mutex_lock(&rnp->boost_mtx);
        rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
+       rnp->n_boosts++;
 
        return READ_ONCE(rnp->exp_tasks) != NULL ||
               READ_ONCE(rnp->boost_tasks) != NULL;
@@ -1197,22 +1188,16 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  */
 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
 {
-       int rnp_index = rnp - rcu_get_root();
        unsigned long flags;
+       int rnp_index = rnp - rcu_get_root();
        struct sched_param sp;
        struct task_struct *t;
 
-       if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
-               return;
-
-       if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
+       if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
                return;
 
        rcu_state.boost = 1;
 
-       if (rnp->boost_kthread_task != NULL)
-               return;
-
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
                           "rcub/%d", rnp_index);
        if (WARN_ON_ONCE(IS_ERR(t)))
@@ -1264,17 +1249,8 @@ static void __init rcu_spawn_boost_kthreads(void)
        struct rcu_node *rnp;
 
        rcu_for_each_leaf_node(rnp)
-               rcu_spawn_one_boost_kthread(rnp);
-}
-
-static void rcu_prepare_kthreads(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       struct rcu_node *rnp = rdp->mynode;
-
-       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_scheduler_fully_active)
-               rcu_spawn_one_boost_kthread(rnp);
+               if (rcu_rnp_online_cpus(rnp))
+                       rcu_spawn_one_boost_kthread(rnp);
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
@@ -1294,15 +1270,15 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
 
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
 {
 }
 
-static void __init rcu_spawn_boost_kthreads(void)
+static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 {
 }
 
-static void rcu_prepare_kthreads(int cpu)
+static void __init rcu_spawn_boost_kthreads(void)
 {
 }
 
@@ -1535,13 +1511,10 @@ static void rcu_cleanup_after_idle(void)
 static int __init rcu_nocb_setup(char *str)
 {
        alloc_bootmem_cpumask_var(&rcu_nocb_mask);
-       if (!strcasecmp(str, "all"))            /* legacy: use "0-N" instead */
+       if (cpulist_parse(str, rcu_nocb_mask)) {
+               pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
                cpumask_setall(rcu_nocb_mask);
-       else
-               if (cpulist_parse(str, rcu_nocb_mask)) {
-                       pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
-                       cpumask_setall(rcu_nocb_mask);
-               }
+       }
        return 1;
 }
 __setup("rcu_nocbs=", rcu_nocb_setup);
@@ -1692,43 +1665,50 @@ bool rcu_is_nocb_cpu(int cpu)
        return false;
 }
 
-/*
- * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
- * and this function releases it.
- */
-static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
-                        unsigned long flags)
-       __releases(rdp->nocb_lock)
+static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
+                          struct rcu_data *rdp,
+                          bool force, unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
 {
        bool needwake = false;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
 
-       lockdep_assert_held(&rdp->nocb_lock);
        if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
                trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                    TPS("AlreadyAwake"));
                return false;
        }
 
-       if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
-               WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-               del_timer(&rdp->nocb_timer);
+       if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+               del_timer(&rdp_gp->nocb_timer);
        }
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+
        if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
                WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
                needwake = true;
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
        }
        raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-       if (needwake)
+       if (needwake) {
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
                wake_up_process(rdp_gp->nocb_gp_kthread);
+       }
 
        return needwake;
 }
 
+/*
+ * Kick the GP kthread for this NOCB group.
+ */
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return __wake_nocb_gp(rdp_gp, rdp, force, flags);
+}
+
 /*
  * Arrange to wake the GP kthread for this NOCB group at some future
  * time when it is safe to do so.
@@ -1736,12 +1716,27 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
                               const char *reason)
 {
-       if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_OFF)
-               return;
-       if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
-               mod_timer(&rdp->nocb_timer, jiffies + 1);
-       if (rdp->nocb_defer_wakeup < waketype)
-               WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+
+       /*
+        * Bypass wakeup overrides previous deferments. In case
+        * of callback storm, no need to wake up too early.
+        */
+       if (waketype == RCU_NOCB_WAKE_BYPASS) {
+               mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       } else {
+               if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
+                       mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
+               if (rdp_gp->nocb_defer_wakeup < waketype)
+                       WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       }
+
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
 }
 
@@ -1940,7 +1935,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 }
 
 /*
- * Awaken the no-CBs grace-period kthead if needed, either due to it
+ * Awaken the no-CBs grace-period kthread if needed, either due to it
  * legitimately being asleep or due to overload conditions.
  *
  * If warranted, also wake up the kthread servicing this CPUs queues.
@@ -1968,13 +1963,14 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
                rdp->qlen_last_fqs_check = len;
                if (!irqs_disabled_flags(flags)) {
                        /* ... if queue was empty ... */
-                       wake_nocb_gp(rdp, false, flags);
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp(rdp, false);
                        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                            TPS("WakeEmpty"));
                } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
                        wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
                                           TPS("WakeEmptyIsDeferred"));
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
                }
        } else if (len > rdp->qlen_last_fqs_check + qhimark) {
                /* ... or if many callbacks queued. */
@@ -1989,10 +1985,14 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
                smp_mb(); /* Enqueue before timer_pending(). */
                if ((rdp->nocb_cb_sleep ||
                     !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
-                   !timer_pending(&rdp->nocb_bypass_timer))
+                   !timer_pending(&rdp->nocb_timer)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
                        wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
                                           TPS("WakeOvfIsDeferred"));
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+               }
        } else {
                rcu_nocb_unlock_irqrestore(rdp, flags);
                trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
@@ -2000,18 +2000,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
        return;
 }
 
-/* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */
-static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
-{
-       unsigned long flags;
-       struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer);
-
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
-       rcu_nocb_lock_irqsave(rdp, flags);
-       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
-       __call_rcu_nocb_wake(rdp, true, flags);
-}
-
 /*
  * Check if we ignore this rdp.
  *
@@ -2118,11 +2106,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
                        bypass = true;
                }
                rnp = rdp->mynode;
-               if (bypass) {  // Avoid race with first bypass CB.
-                       WRITE_ONCE(my_rdp->nocb_defer_wakeup,
-                                  RCU_NOCB_WAKE_NOT);
-                       del_timer(&my_rdp->nocb_timer);
-               }
+
                // Advance callbacks if helpful and low contention.
                needwake_gp = false;
                if (!rcu_segcblist_restempty(&rdp->cblist,
@@ -2168,12 +2152,12 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
        my_rdp->nocb_gp_bypass = bypass;
        my_rdp->nocb_gp_gp = needwait_gp;
        my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
+
        if (bypass && !rcu_nocb_poll) {
                // At least one child with non-empty ->nocb_bypass, so set
                // timer in order to avoid stranding its callbacks.
-               raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
-               mod_timer(&my_rdp->nocb_bypass_timer, j + 2);
-               raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
+               wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
+                                  TPS("WakeBypassIsDeferred"));
        }
        if (rcu_nocb_poll) {
                /* Polling, so trace if first poll in the series. */
@@ -2197,8 +2181,10 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
        }
        if (!rcu_nocb_poll) {
                raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
-               if (bypass)
-                       del_timer(&my_rdp->nocb_bypass_timer);
+               if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+                       WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+                       del_timer(&my_rdp->nocb_timer);
+               }
                WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
                raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
        }
@@ -2334,25 +2320,27 @@ static int rcu_nocb_cb_kthread(void *arg)
 }
 
 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
 {
-       return READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT;
+       return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
 }
 
 /* Do a deferred wakeup of rcu_nocb_kthread(). */
-static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
+                                          struct rcu_data *rdp, int level,
+                                          unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
 {
-       unsigned long flags;
        int ndw;
        int ret;
 
-       rcu_nocb_lock_irqsave(rdp, flags);
-       if (!rcu_nocb_need_deferred_wakeup(rdp)) {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+       if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
                return false;
        }
-       ndw = READ_ONCE(rdp->nocb_defer_wakeup);
-       ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+
+       ndw = rdp_gp->nocb_defer_wakeup;
+       ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
 
        return ret;
@@ -2361,9 +2349,15 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
 {
+       unsigned long flags;
        struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
 
-       do_nocb_deferred_wakeup_common(rdp);
+       WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
+
+       raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
+       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
+       do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
 }
 
 /*
@@ -2373,9 +2367,14 @@ static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
  */
 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
 {
-       if (rcu_nocb_need_deferred_wakeup(rdp))
-               return do_nocb_deferred_wakeup_common(rdp);
-       return false;
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
+               return false;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
 }
 
 void rcu_nocb_flush_deferred_wakeup(void)
@@ -2443,17 +2442,15 @@ static long rcu_nocb_rdp_deoffload(void *arg)
        swait_event_exclusive(rdp->nocb_state_wq,
                              !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
                                                        SEGCBLIST_KTHREAD_GP));
-       rcu_nocb_lock_irqsave(rdp, flags);
-       /* Make sure nocb timer won't stay around */
-       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_OFF);
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-       del_timer_sync(&rdp->nocb_timer);
-
        /*
-        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY with CB unlocked
-        * and IRQs disabled but let's be paranoid.
+        * Lock one last time to acquire latest callback updates from kthreads
+        * so we can later handle callbacks locally without locking.
         */
        rcu_nocb_lock_irqsave(rdp, flags);
+       /*
+        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
+        * lock is released but how about being paranoid for once?
+        */
        rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
        /*
         * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
@@ -2473,10 +2470,6 @@ int rcu_nocb_cpu_deoffload(int cpu)
        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
        int ret = 0;
 
-       if (rdp == rdp->nocb_gp_rdp) {
-               pr_info("Can't deoffload an rdp GP leader (yet)\n");
-               return -EINVAL;
-       }
        mutex_lock(&rcu_state.barrier_mutex);
        cpus_read_lock();
        if (rcu_rdp_is_offloaded(rdp)) {
@@ -2517,8 +2510,7 @@ static long rcu_nocb_rdp_offload(void *arg)
         * SEGCBLIST_SOFTIRQ_ONLY mode.
         */
        raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
-       /* Re-enable nocb timer */
-       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+
        /*
         * We didn't take the nocb lock while working on the
         * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
@@ -2626,7 +2618,6 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
        raw_spin_lock_init(&rdp->nocb_bypass_lock);
        raw_spin_lock_init(&rdp->nocb_gp_lock);
        timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
-       timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0);
        rcu_cblist_init(&rdp->nocb_bypass);
 }
 
@@ -2785,13 +2776,12 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
 {
        struct rcu_node *rnp = rdp->mynode;
 
-       pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
+       pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
                rdp->cpu,
                "kK"[!!rdp->nocb_gp_kthread],
                "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
                "dD"[!!rdp->nocb_defer_wakeup],
                "tT"[timer_pending(&rdp->nocb_timer)],
-               "bB"[timer_pending(&rdp->nocb_bypass_timer)],
                "sS"[!!rdp->nocb_gp_sleep],
                ".W"[swait_active(&rdp->nocb_gp_wq)],
                ".W"[swait_active(&rnp->nocb_gp_wq[0])],
@@ -2812,7 +2802,6 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
        char bufr[20];
        struct rcu_segcblist *rsclp = &rdp->cblist;
        bool waslocked;
-       bool wastimer;
        bool wassleep;
 
        if (rdp->nocb_gp_rdp == rdp)
@@ -2849,15 +2838,13 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
                return;
 
        waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
-       wastimer = timer_pending(&rdp->nocb_bypass_timer);
        wassleep = swait_active(&rdp->nocb_gp_wq);
-       if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep)
-               return;  /* Nothing untowards. */
+       if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
+               return;  /* Nothing untoward. */
 
-       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n",
+       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
                "lL"[waslocked],
                "dD"[!!rdp->nocb_defer_wakeup],
-               "tT"[wastimer],
                "sS"[!!rdp->nocb_gp_sleep],
                ".W"[wassleep]);
 }
@@ -2922,7 +2909,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
 {
 }
 
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
 {
        return false;
 }