Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / kernel / rcu / tree.c
index 24a79e8..0b760c1 100644 (file)
@@ -27,6 +27,9 @@
  * For detailed explanation of Read-Copy Update mechanism see -
  *     Documentation/RCU
  */
+
+#define pr_fmt(fmt) "rcu: " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -101,6 +104,7 @@ struct rcu_state sname##_state = { \
        .abbr = sabbr, \
        .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
        .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
+       .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
 }
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -154,6 +158,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
+static void
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
+                 struct rcu_node *rnp, unsigned long gps, unsigned long flags);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -176,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
 static int gp_cleanup_delay;
 module_param(gp_cleanup_delay, int, 0444);
 
+/* Retreive RCU kthreads priority for rcutorture */
+int rcu_get_gp_kthreads_prio(void)
+{
+       return kthread_prio;
+}
+EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
+
 /*
  * Number of grace periods between delays, normalized by the duration of
  * the delay.  The longer the delay, the more the grace periods between
@@ -187,18 +201,6 @@ module_param(gp_cleanup_delay, int, 0444);
  */
 #define PER_RCU_NODE_PERIOD 3  /* Number of grace periods between delays. */
 
-/*
- * Track the rcutorture test sequence number and the update version
- * number within a given test.  The rcutorture_testseq is incremented
- * on every rcutorture module load and unload, so has an odd value
- * when a test is running.  The rcutorture_vernum is set to zero
- * when rcutorture starts and is incremented on each rcutorture update.
- * These variables enable correlating rcutorture output with the
- * RCU tracing information.
- */
-unsigned long rcutorture_testseq;
-unsigned long rcutorture_vernum;
-
 /*
  * Compute the mask of online CPUs for the specified rcu_node structure.
  * This will not be stable unless the rcu_node structure's ->lock is
@@ -378,20 +380,6 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
        return snap != rcu_dynticks_snap(rdtp);
 }
 
-/*
- * Do a double-increment of the ->dynticks counter to emulate a
- * momentary idle-CPU quiescent state.
- */
-static void rcu_dynticks_momentary_idle(void)
-{
-       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-       int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                       &rdtp->dynticks);
-
-       /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
-}
-
 /*
  * Set the special (bottom) bit of the specified CPU so that it
  * will take special action (such as flushing its TLB) on the
@@ -423,12 +411,17 @@ bool rcu_eqs_special_set(int cpu)
  *
  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
  *
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts and must not be idle.
  */
 static void rcu_momentary_dyntick_idle(void)
 {
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int special;
+
        raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
-       rcu_dynticks_momentary_idle();
+       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+       /* It is illegal to call this from idle state. */
+       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
 }
 
 /*
@@ -450,7 +443,7 @@ void rcu_note_context_switch(bool preempt)
                rcu_momentary_dyntick_idle();
        this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
        if (!preempt)
-               rcu_note_voluntary_context_switch_lite(current);
+               rcu_tasks_qs(current);
 out:
        trace_rcu_utilization(TPS("End context switch"));
        barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -512,8 +505,38 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
 static ulong jiffies_till_next_fqs = ULONG_MAX;
 static bool rcu_kick_kthreads;
 
-module_param(jiffies_till_first_fqs, ulong, 0644);
-module_param(jiffies_till_next_fqs, ulong, 0644);
+static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
+       return ret;
+}
+
+static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
+       return ret;
+}
+
+static struct kernel_param_ops first_fqs_jiffies_ops = {
+       .set = param_set_first_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+static struct kernel_param_ops next_fqs_jiffies_ops = {
+       .set = param_set_next_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
+module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
 module_param(rcu_kick_kthreads, bool, 0644);
 
 /*
@@ -532,7 +555,7 @@ static int rcu_pending(void);
  */
 unsigned long rcu_get_gp_seq(void)
 {
-       return rcu_seq_ctr(READ_ONCE(rcu_state_p->gp_seq));
+       return READ_ONCE(rcu_state_p->gp_seq);
 }
 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
 
@@ -541,7 +564,7 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
  */
 unsigned long rcu_sched_get_gp_seq(void)
 {
-       return rcu_seq_ctr(READ_ONCE(rcu_sched_state.gp_seq));
+       return READ_ONCE(rcu_sched_state.gp_seq);
 }
 EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
 
@@ -550,7 +573,7 @@ EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
  */
 unsigned long rcu_bh_get_gp_seq(void)
 {
-       return rcu_seq_ctr(READ_ONCE(rcu_bh_state.gp_seq));
+       return READ_ONCE(rcu_bh_state.gp_seq);
 }
 EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
 
@@ -608,30 +631,37 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  */
 void show_rcu_gp_kthreads(void)
 {
+       int cpu;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp) {
                pr_info("%s: wait state: %d ->state: %#lx\n",
                        rsp->name, rsp->gp_state, rsp->gp_kthread->state);
+               rcu_for_each_node_breadth_first(rsp, rnp) {
+                       if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
+                               continue;
+                       pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+                               rnp->grplo, rnp->grphi, rnp->gp_seq,
+                               rnp->gp_seq_needed);
+                       if (!rcu_is_leaf_node(rnp))
+                               continue;
+                       for_each_leaf_node_possible_cpu(rnp, cpu) {
+                               rdp = per_cpu_ptr(rsp->rda, cpu);
+                               if (rdp->gpwrap ||
+                                   ULONG_CMP_GE(rsp->gp_seq,
+                                                rdp->gp_seq_needed))
+                                       continue;
+                               pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+                                       cpu, rdp->gp_seq_needed);
+                       }
+               }
                /* sched_show_task(rsp->gp_kthread); */
        }
 }
 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 
-/*
- * Record the number of times rcutorture tests have been initiated and
- * terminated.  This information allows the debugfs tracing stats to be
- * correlated to the rcutorture messages, even when the rcutorture module
- * is being repeatedly loaded and unloaded.  In other words, we cannot
- * store this state in rcutorture itself.
- */
-void rcutorture_record_test_transition(void)
-{
-       rcutorture_testseq++;
-       rcutorture_vernum = 0;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
-
 /*
  * Send along grace-period-related data for rcutorture diagnostics.
  */
@@ -660,17 +690,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 }
 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 
-/*
- * Record the number of writer passes through the current rcutorture test.
- * This is also used to correlate debugfs tracing stats with the rcutorture
- * messages.
- */
-void rcutorture_record_progress(unsigned long vernum)
-{
-       rcutorture_vernum++;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_progress);
-
 /*
  * Return the root node of the specified rcu_state structure.
  */
@@ -1030,41 +1049,41 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
- * Is the current CPU online?  Disable preemption to avoid false positives
- * that could otherwise happen due to the current CPU number being sampled,
- * this task being preempted, its old CPU being taken offline, resuming
- * on some other CPU, then determining that its old CPU is now offline.
- * It is OK to use RCU on an offline processor during initial boot, hence
- * the check for rcu_scheduler_fully_active.  Note also that it is OK
- * for a CPU coming online to use RCU for one jiffy prior to marking itself
- * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
- * offline to continue to use RCU for one jiffy after marking itself
- * offline in the cpu_online_mask.  This leniency is necessary given the
- * non-atomic nature of the online and offline processing, for example,
- * the fact that a CPU enters the scheduler after completing the teardown
- * of the CPU.
+ * Is the current CPU online as far as RCU is concerned?
  *
- * This is also why RCU internally marks CPUs online during in the
- * preparation phase and offline after the CPU has been taken down.
+ * Disable preemption to avoid false positives that could otherwise
+ * happen due to the current CPU number being sampled, this task being
+ * preempted, its old CPU being taken offline, resuming on some other CPU,
+ * then determining that its old CPU is now offline.  Because there are
+ * multiple flavors of RCU, and because this function can be called in the
+ * midst of updating the flavors while a given CPU coming online or going
+ * offline, it is necessary to check all flavors.  If any of the flavors
+ * believe that given CPU is online, it is considered to be online.
  *
- * Disable checking if in an NMI handler because we cannot safely report
- * errors from NMI handlers anyway.
+ * Disable checking if in an NMI handler because we cannot safely
+ * report errors from NMI handlers anyway.  In addition, it is OK to use
+ * RCU on an offline processor during initial boot, hence the check for
+ * rcu_scheduler_fully_active.
  */
 bool rcu_lockdep_current_cpu_online(void)
 {
        struct rcu_data *rdp;
        struct rcu_node *rnp;
-       bool ret;
+       struct rcu_state *rsp;
 
-       if (in_nmi())
+       if (in_nmi() || !rcu_scheduler_fully_active)
                return true;
        preempt_disable();
-       rdp = this_cpu_ptr(&rcu_sched_data);
-       rnp = rdp->mynode;
-       ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
-             !rcu_scheduler_fully_active;
+       for_each_rcu_flavor(rsp) {
+               rdp = this_cpu_ptr(rsp->rda);
+               rnp = rdp->mynode;
+               if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
+                       preempt_enable();
+                       return true;
+               }
+       }
        preempt_enable();
-       return ret;
+       return false;
 }
 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 
@@ -1184,12 +1203,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
                smp_store_release(ruqp, true);
        }
 
-       /* Check for the CPU being offline. */
-       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("ofl"));
-               rdp->offline_fqs++;
-               rcu_gpnum_ovf(rnp, rdp);
-               return 1;
+       /* If waiting too long on an offline CPU, complain. */
+       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
+           time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+               bool onl;
+               struct rcu_node *rnp1;
+
+               WARN_ON(1);  /* Offline CPUs are supposed to report QS! */
+               pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+                       __func__, rnp->grplo, rnp->grphi, rnp->level,
+                       (long)rnp->gp_seq, (long)rnp->completedqs);
+               for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+                       pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
+                               __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
+               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+               pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
+                       __func__, rdp->cpu, ".o"[onl],
+                       (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+                       (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+               return 1; /* Break things loose after complaining. */
        }
 
        /*
@@ -1246,9 +1278,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        unsigned long j1;
 
        rsp->gp_start = j;
-       smp_wmb(); /* Record start time before stall time. */
        j1 = rcu_jiffies_till_stall_check();
-       WRITE_ONCE(rsp->jiffies_stall, j + j1);
+       /* Record ->gp_start before ->jiffies_stall. */
+       smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
        rsp->jiffies_resched = j + j1 / 2;
        rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
 }
@@ -1331,7 +1363,7 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
        }
 }
 
-static inline void panic_on_rcu_stall(void)
+static void panic_on_rcu_stall(void)
 {
        if (sysctl_panic_on_rcu_stall)
                panic("RCU Stall\n");
@@ -1357,8 +1389,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
         * See Documentation/RCU/stallwarn.txt for info on how to debug
         * RCU CPU stall warnings.
         */
-       pr_err("INFO: %s detected stalls on CPUs/tasks:",
-              rsp->name);
+       pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
        print_cpu_stall_info_begin();
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1546,13 +1577,18 @@ void rcu_cpu_stall_reset(void)
 
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c, const char *s)
+                             unsigned long gp_seq_req, const char *s)
 {
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, c,
+       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
                                      rnp->level, rnp->grplo, rnp->grphi, s);
 }
 
 /*
+ * rcu_start_this_gp - Request the start of a particular grace period
+ * @rnp_start: The leaf node of the CPU from which to start.
+ * @rdp: The rcu_data corresponding to the CPU from which to start.
+ * @gp_seq_req: The gp_seq of the grace period to start.
+ *
  * Start the specified grace period, as needed to handle newly arrived
  * callbacks.  The required future grace periods are recorded in each
  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
@@ -1560,73 +1596,78 @@ static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  *
  * The caller must hold the specified rcu_node structure's ->lock, which
  * is why the caller is responsible for waking the grace-period kthread.
+ *
+ * Returns true if the GP thread needs to be awakened else false.
  */
-static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c)
+static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
+                             unsigned long gp_seq_req)
 {
        bool ret = false;
        struct rcu_state *rsp = rdp->rsp;
-       struct rcu_node *rnp_root;
+       struct rcu_node *rnp;
 
        /*
         * Use funnel locking to either acquire the root rcu_node
         * structure's lock or bail out if the need for this grace period
-        * has already been recorded -- or has already started.  If there
-        * is already a grace period in progress in a non-leaf node, no
-        * recording is needed because the end of the grace period will
-        * scan the leaf rcu_node structures.  Note that rnp->lock must
-        * not be released.
+        * has already been recorded -- or if that grace period has in
+        * fact already started.  If there is already a grace period in
+        * progress in a non-leaf node, no recording is needed because the
+        * end of the grace period will scan the leaf rcu_node structures.
+        * Note that rnp_start->lock must not be released.
         */
-       raw_lockdep_assert_held_rcu_node(rnp);
-       trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
-       for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
-               if (rnp_root != rnp)
-                       raw_spin_lock_rcu_node(rnp_root);
-               if (ULONG_CMP_GE(rnp_root->gp_seq_needed, c) ||
-                   rcu_seq_done(&rnp_root->gp_seq, c) ||
-                   (rnp != rnp_root &&
-                    rcu_seq_state(rcu_seq_current(&rnp_root->gp_seq)))) {
-                       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
+       raw_lockdep_assert_held_rcu_node(rnp_start);
+       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
+       for (rnp = rnp_start; 1; rnp = rnp->parent) {
+               if (rnp != rnp_start)
+                       raw_spin_lock_rcu_node(rnp);
+               if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
+                   rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
+                   (rnp != rnp_start &&
+                    rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
+                       trace_rcu_this_gp(rnp, rdp, gp_seq_req,
+                                         TPS("Prestarted"));
                        goto unlock_out;
                }
-               rnp_root->gp_seq_needed = c;
+               rnp->gp_seq_needed = gp_seq_req;
                if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
                        /*
-                        * We just marked the leaf, and a grace period
-                        * is in progress, which means that rcu_gp_cleanup()
-                        * will see the marking.  Bail to reduce contention.
+                        * We just marked the leaf or internal node, and a
+                        * grace period is in progress, which means that
+                        * rcu_gp_cleanup() will see the marking.  Bail to
+                        * reduce contention.
                         */
-                       trace_rcu_this_gp(rnp, rdp, c, TPS("Startedleaf"));
+                       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
+                                         TPS("Startedleaf"));
                        goto unlock_out;
                }
-               if (rnp_root != rnp && rnp_root->parent != NULL)
-                       raw_spin_unlock_rcu_node(rnp_root);
-               if (!rnp_root->parent)
+               if (rnp != rnp_start && rnp->parent != NULL)
+                       raw_spin_unlock_rcu_node(rnp);
+               if (!rnp->parent)
                        break;  /* At root, and perhaps also leaf. */
        }
 
        /* If GP already in progress, just leave, otherwise start one. */
        if (rcu_gp_in_progress(rsp)) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
                goto unlock_out;
        }
-       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
+       trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
        WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
        rsp->gp_req_activity = jiffies;
        if (!rsp->gp_kthread) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
                goto unlock_out;
        }
        trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
        ret = true;  /* Caller must wake GP kthread. */
 unlock_out:
        /* Push furthest requested GP to leaf node and rcu_data structure. */
-       if (ULONG_CMP_LT(c, rnp_root->gp_seq_needed)) {
-               rnp->gp_seq_needed = rnp_root->gp_seq_needed;
-               rdp->gp_seq_needed = rnp_root->gp_seq_needed;
+       if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
+               rnp_start->gp_seq_needed = rnp->gp_seq_needed;
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
        }
-       if (rnp != rnp_root)
-               raw_spin_unlock_rcu_node(rnp_root);
+       if (rnp != rnp_start)
+               raw_spin_unlock_rcu_node(rnp);
        return ret;
 }
 
@@ -1636,14 +1677,13 @@ unlock_out:
  */
 static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
-       unsigned long c = rnp->gp_seq;
        bool needmore;
        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
 
        needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
        if (!needmore)
                rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
-       trace_rcu_this_gp(rnp, rdp, c,
+       trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
                          needmore ? TPS("CleanupMore") : TPS("Cleanup"));
        return needmore;
 }
@@ -1661,7 +1701,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
            !READ_ONCE(rsp->gp_flags) ||
            !rsp->gp_kthread)
                return;
-       swake_up(&rsp->gp_wq);
+       swake_up_one(&rsp->gp_wq);
 }
 
 /*
@@ -1679,7 +1719,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                               struct rcu_data *rdp)
 {
-       unsigned long c;
+       unsigned long gp_seq_req;
        bool ret = false;
 
        raw_lockdep_assert_held_rcu_node(rnp);
@@ -1698,9 +1738,9 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
         * accelerating callback invocation to an earlier grace-period
         * number.
         */
-       c = rcu_seq_snap(&rsp->gp_seq);
-       if (rcu_segcblist_accelerate(&rdp->cblist, c))
-               ret = rcu_start_this_gp(rnp, rdp, c);
+       gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+       if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
+               ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
 
        /* Trace depending on how much we were able to accelerate. */
        if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
@@ -1801,16 +1841,18 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                 * set up to detect a quiescent state, otherwise don't
                 * go looking for one.
                 */
-               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpustart"));
+               trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
                need_gp = !!(rnp->qsmask & rdp->grpmask);
                rdp->cpu_no_qs.b.norm = need_gp;
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
                rdp->core_needs_qs = need_gp;
                zero_cpu_stall_ticks(rdp);
-               WRITE_ONCE(rdp->gpwrap, false);
-               rcu_gpnum_ovf(rnp, rdp);
        }
        rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
+       if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
+       WRITE_ONCE(rdp->gpwrap, false);
+       rcu_gpnum_ovf(rnp, rdp);
        return ret;
 }
 
@@ -1847,7 +1889,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
  */
 static bool rcu_gp_init(struct rcu_state *rsp)
 {
+       unsigned long flags;
        unsigned long oldmask;
+       unsigned long mask;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
@@ -1882,13 +1926,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * for subsequent online CPUs, and that quiescent-state forcing
         * will handle subsequent offline CPUs.
         */
+       rsp->gp_state = RCU_GP_ONOFF;
        rcu_for_each_leaf_node(rsp, rnp) {
-               rcu_gp_slow(rsp, gp_preinit_delay);
+               spin_lock(&rsp->ofl_lock);
                raw_spin_lock_irq_rcu_node(rnp);
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
                        raw_spin_unlock_irq_rcu_node(rnp);
+                       spin_unlock(&rsp->ofl_lock);
                        continue;
                }
 
@@ -1898,12 +1944,14 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 
                /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
                if (!oldmask != !rnp->qsmaskinit) {
-                       if (!oldmask) /* First online CPU for this rcu_node. */
-                               rcu_init_new_rnp(rnp);
-                       else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
-                               rnp->wait_blkd_tasks = true;
-                       else /* Last offline CPU and can propagate. */
+                       if (!oldmask) { /* First online CPU for rcu_node. */
+                               if (!rnp->wait_blkd_tasks) /* Ever offline? */
+                                       rcu_init_new_rnp(rnp);
+                       } else if (rcu_preempt_has_tasks(rnp)) {
+                               rnp->wait_blkd_tasks = true; /* blocked tasks */
+                       } else { /* Last offline CPU and can propagate. */
                                rcu_cleanup_dead_rnp(rnp);
+                       }
                }
 
                /*
@@ -1912,18 +1960,19 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                 * still offline, propagate up the rcu_node tree and
                 * clear ->wait_blkd_tasks.  Otherwise, if one of this
                 * rcu_node structure's CPUs has since come back online,
-                * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
-                * checks for this, so just call it unconditionally).
+                * simply clear ->wait_blkd_tasks.
                 */
                if (rnp->wait_blkd_tasks &&
-                   (!rcu_preempt_has_tasks(rnp) ||
-                    rnp->qsmaskinit)) {
+                   (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
                        rnp->wait_blkd_tasks = false;
-                       rcu_cleanup_dead_rnp(rnp);
+                       if (!rnp->qsmaskinit)
+                               rcu_cleanup_dead_rnp(rnp);
                }
 
                raw_spin_unlock_irq_rcu_node(rnp);
+               spin_unlock(&rsp->ofl_lock);
        }
+       rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
 
        /*
         * Set the quiescent-state-needed bits in all the rcu_node
@@ -1937,11 +1986,12 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * The grace period cannot complete until the initialization
         * process finishes, because this kthread handles both.
         */
+       rsp->gp_state = RCU_GP_INIT;
        rcu_for_each_node_breadth_first(rsp, rnp) {
                rcu_gp_slow(rsp, gp_init_delay);
-               raw_spin_lock_irq_rcu_node(rnp);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                rdp = this_cpu_ptr(rsp->rda);
-               rcu_preempt_check_blocked_tasks(rnp);
+               rcu_preempt_check_blocked_tasks(rsp, rnp);
                rnp->qsmask = rnp->qsmaskinit;
                WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
                if (rnp == rdp->mynode)
@@ -1950,7 +2000,13 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
-               raw_spin_unlock_irq_rcu_node(rnp);
+               /* Quiescent states for tasks on any now-offline CPUs. */
+               mask = rnp->qsmask & ~rnp->qsmaskinitnext;
+               rnp->rcu_gp_init_mask = mask;
+               if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               else
+                       raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_tasks_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
        }
@@ -1959,7 +2015,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 }
 
 /*
- * Helper function for swait_event_idle() wakeup at force-quiescent-state
+ * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
  * time.
  */
 static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
@@ -2045,7 +2101,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq_rcu_node(rnp);
                if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
-                       dump_blkd_tasks(rnp, 10);
+                       dump_blkd_tasks(rsp, rnp, 10);
                WARN_ON_ONCE(rnp->qsmask);
                WRITE_ONCE(rnp->gp_seq, new_gp_seq);
                rdp = this_cpu_ptr(rsp->rda);
@@ -2069,7 +2125,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        rsp->gp_state = RCU_GP_IDLE;
        /* Check for GP requests since above loop. */
        rdp = this_cpu_ptr(rsp->rda);
-       if (ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
+       if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
                trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
                                  TPS("CleanupMore"));
                needgp = true;
@@ -2107,7 +2163,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                               READ_ONCE(rsp->gp_seq),
                                               TPS("reqwait"));
                        rsp->gp_state = RCU_GP_WAIT_GPS;
-                       swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
+                       swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
                                                     RCU_GP_FLAG_INIT);
                        rsp->gp_state = RCU_GP_DONE_GPS;
                        /* Locking provides needed memory barrier. */
@@ -2124,10 +2180,6 @@ static int __noreturn rcu_gp_kthread(void *arg)
                /* Handle quiescent-state forcing. */
                first_gp_fqs = true;
                j = jiffies_till_first_fqs;
-               if (j > HZ) {
-                       j = HZ;
-                       jiffies_till_first_fqs = HZ;
-               }
                ret = 0;
                for (;;) {
                        if (!ret) {
@@ -2139,7 +2191,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                               READ_ONCE(rsp->gp_seq),
                                               TPS("fqswait"));
                        rsp->gp_state = RCU_GP_WAIT_FQS;
-                       ret = swait_event_idle_timeout(rsp->gp_wq,
+                       ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
                                        rcu_gp_fqs_check_wake(rsp, &gf), j);
                        rsp->gp_state = RCU_GP_DOING_FQS;
                        /* Locking provides needed memory barriers. */
@@ -2162,13 +2214,6 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                ret = 0; /* Force full wait till next FQS. */
                                j = jiffies_till_next_fqs;
-                               if (j > HZ) {
-                                       j = HZ;
-                                       jiffies_till_next_fqs = HZ;
-                               } else if (j < 1) {
-                                       j = 1;
-                                       jiffies_till_next_fqs = 1;
-                               }
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_tasks_rcu_qs();
@@ -2221,6 +2266,10 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  * is the grace-period snapshot, which means that the quiescent states
  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
  * must be held upon entry, and it is released before return.
+ *
+ * As a special case, if mask is zero, the bit-already-cleared check is
+ * disabled.  This allows propagating quiescent state due to resumed tasks
+ * during grace-period initialization.
  */
 static void
 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
@@ -2234,7 +2283,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 
        /* Walk up the rcu_node hierarchy. */
        for (;;) {
-               if (!(rnp->qsmask & mask) || rnp->gp_seq != gps) {
+               if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
 
                        /*
                         * Our bit has already been cleared, or the
@@ -2287,8 +2336,9 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  * irqs disabled, and this lock is released upon return, but irqs remain
  * disabled.
  */
-static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
-                                     struct rcu_node *rnp, unsigned long flags)
+static void __maybe_unused
+rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+                         struct rcu_node *rnp, unsigned long flags)
        __releases(rnp->lock)
 {
        unsigned long gps;
@@ -2296,12 +2346,15 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
        struct rcu_node *rnp_p;
 
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
-           rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+       if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
+           WARN_ON_ONCE(rsp != rcu_state_p) ||
+           WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
+           rnp->qsmask != 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;  /* Still need more quiescent states! */
        }
 
+       rnp->completedqs = rnp->gp_seq;
        rnp_p = rnp->parent;
        if (rnp_p == NULL) {
                /*
@@ -2426,7 +2479,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  * This function therefore goes up the tree of rcu_node structures,
  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
  * the leaf rcu_node structure's ->qsmaskinit field has already been
- * updated
+ * updated.
  *
  * This function does check that the specified rcu_node structure has
  * all CPUs offline and no blocked tasks, so it is OK to invoke it
@@ -2439,9 +2492,10 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
        long mask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
-           rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+           WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
+           WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
                return;
        for (;;) {
                mask = rnp->grpmask;
@@ -2450,7 +2504,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
                        break;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                rnp->qsmaskinit &= ~mask;
-               rnp->qsmask &= ~mask;
+               /* Between grace periods, so better already be zero! */
+               WARN_ON_ONCE(rnp->qsmask);
                if (rnp->qsmaskinit) {
                        raw_spin_unlock_rcu_node(rnp);
                        /* irqs remain disabled. */
@@ -2593,6 +2648,7 @@ void rcu_check_callbacks(int user)
 
                rcu_sched_qs();
                rcu_bh_qs();
+               rcu_note_voluntary_context_switch(current);
 
        } else if (!in_softirq()) {
 
@@ -2608,8 +2664,7 @@ void rcu_check_callbacks(int user)
        rcu_preempt_check_callbacks();
        if (rcu_pending())
                invoke_rcu_core();
-       if (user)
-               rcu_note_voluntary_context_switch(current);
+
        trace_rcu_utilization(TPS("End scheduler-tick"));
 }
 
@@ -2644,17 +2699,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
                                /* rcu_initiate_boost() releases rnp->lock */
                                continue;
                        }
-                       if (rnp->parent &&
-                           (rnp->parent->qsmask & rnp->grpmask)) {
-                               /*
-                                * Race between grace-period
-                                * initialization and task exiting RCU
-                                * read-side critical section: Report.
-                                */
-                               rcu_report_unblock_qs_rnp(rsp, rnp, flags);
-                               /* rcu_report_unblock_qs_rnp() rlses ->lock */
-                               continue;
-                       }
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                       continue;
                }
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
@@ -2717,6 +2763,7 @@ static void
 rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
                         struct rcu_data *rdp)
 {
+       const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
        unsigned long flags;
        unsigned long j;
        struct rcu_node *rnp_root = rcu_get_root(rsp);
@@ -2726,8 +2773,8 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
            ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
                return;
        j = jiffies; /* Expensive access, and in common case don't get here. */
-       if (time_before(j, READ_ONCE(rsp->gp_req_activity) + HZ) ||
-           time_before(j, READ_ONCE(rsp->gp_activity) + HZ) ||
+       if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
            atomic_read(&warned))
                return;
 
@@ -2735,8 +2782,8 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
        j = jiffies;
        if (rcu_gp_in_progress(rsp) ||
            ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-           time_before(j, READ_ONCE(rsp->gp_req_activity) + HZ) ||
-           time_before(j, READ_ONCE(rsp->gp_activity) + HZ) ||
+           time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
            atomic_read(&warned)) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
@@ -2748,18 +2795,18 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
        j = jiffies;
        if (rcu_gp_in_progress(rsp) ||
            ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-           time_before(j, rsp->gp_req_activity + HZ) ||
-           time_before(j, rsp->gp_activity + HZ) ||
+           time_before(j, rsp->gp_req_activity + gpssdelay) ||
+           time_before(j, rsp->gp_activity + gpssdelay) ||
            atomic_xchg(&warned, 1)) {
                raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
-       pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x %s->state:%#lx\n",
+       pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
                 __func__, (long)READ_ONCE(rsp->gp_seq),
                 (long)READ_ONCE(rnp_root->gp_seq_needed),
                 j - rsp->gp_req_activity, j - rsp->gp_activity,
-                rsp->gp_flags, rsp->name,
+                rsp->gp_flags, rsp->gp_state, rsp->name,
                 rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
        WARN_ON(1);
        if (rnp_root != rnp)
@@ -3044,7 +3091,7 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
  * when there was in fact only one the whole time, as this just adds
  * some overhead: RCU still operates correctly.
  */
-static inline int rcu_blocking_is_gp(void)
+static int rcu_blocking_is_gp(void)
 {
        int ret;
 
@@ -3279,7 +3326,7 @@ static int rcu_pending(void)
  * non-NULL, store an indication of whether all callbacks are lazy.
  * (If there are no callbacks, all of them are deemed to be lazy.)
  */
-static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
+static bool rcu_cpu_has_callbacks(bool *all_lazy)
 {
        bool al = true;
        bool hc = false;
@@ -3465,17 +3512,22 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
 {
        long mask;
+       long oldmask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
+       WARN_ON_ONCE(rnp->wait_blkd_tasks);
        for (;;) {
                mask = rnp->grpmask;
                rnp = rnp->parent;
                if (rnp == NULL)
                        return;
                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
+               oldmask = rnp->qsmaskinit;
                rnp->qsmaskinit |= mask;
                raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
+               if (oldmask)
+                       return;
        }
 }
 
@@ -3492,6 +3544,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
        WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
+       rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+       rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
+       rdp->rcu_onl_gp_seq = rsp->gp_seq;
+       rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
        rdp->cpu = cpu;
        rdp->rsp = rsp;
        rcu_boot_init_nocb_percpu_data(rdp);
@@ -3686,7 +3742,15 @@ void rcu_cpu_starting(unsigned int cpu)
                nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
                /* Allow lockless access for expedited grace periods. */
                smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+               rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
+               rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+               if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+                       /* Report QS -after- changing ->qsmaskinitnext! */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               } else {
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               }
        }
        smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
 }
@@ -3694,7 +3758,7 @@ void rcu_cpu_starting(unsigned int cpu)
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinitnext
  * bit masks.
  */
 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
@@ -3706,9 +3770,18 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
 
        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
        mask = rdp->grpmask;
+       spin_lock(&rsp->ofl_lock);
        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+       rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
+       rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+       if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
+               /* Report quiescent state -before- changing ->qsmaskinitnext! */
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       }
        rnp->qsmaskinitnext &= ~mask;
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       spin_unlock(&rsp->ofl_lock);
 }
 
 /*
@@ -3820,12 +3893,16 @@ static int __init rcu_spawn_gp_kthread(void)
        struct task_struct *t;
 
        /* Force priority into range. */
-       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
+           && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
+               kthread_prio = 2;
+       else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
                kthread_prio = 1;
        else if (kthread_prio < 0)
                kthread_prio = 0;
        else if (kthread_prio > 99)
                kthread_prio = 99;
+
        if (kthread_prio != kthread_prio_in)
                pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
                         kthread_prio, kthread_prio_in);
@@ -3979,7 +4056,7 @@ static void __init rcu_init_geometry(void)
        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
            nr_cpu_ids == NR_CPUS)
                return;
-       pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
+       pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
                rcu_fanout_leaf, nr_cpu_ids);
 
        /*