From: Paul E. McKenney Date: Fri, 29 Jul 2022 02:58:13 +0000 (-0700) Subject: rcu: Add full-sized polling for get_state() X-Git-Tag: microblaze-v6.2~221^2^5~23 X-Git-Url: http://git.monstr.eu/?a=commitdiff_plain;h=3fdefca9b42c8bebe3beea5c1a067c9718ca0fc7;p=linux-2.6-microblaze.git rcu: Add full-sized polling for get_state() The get_state_synchronize_rcu() API compresses the combined expedited and normal grace-period states into a single unsigned long, which conserves storage, but can miss grace periods in certain cases involving overlapping normal and expedited grace periods. Missing the occasional grace period is usually not a problem, but there are use cases that care about each and every grace period. This commit therefore adds the next member of the full-state RCU grace-period polling API, namely the get_state_synchronize_rcu_full() function. This uses up to three times the storage (rcu_gp_oldstate structure instead of unsigned long), but is guaranteed not to miss grace periods. Signed-off-by: Paul E. McKenney --- diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 1fbff8600d92..6e299955c4e9 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -19,6 +19,12 @@ struct rcu_gp_oldstate { }; unsigned long get_state_synchronize_rcu(void); + +static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) +{ + rgosp->rgos_norm = get_state_synchronize_rcu(); +} + unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 4ccbc3aa9dc2..7b769f1b417a 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -50,6 +50,7 @@ struct rcu_gp_oldstate { unsigned long start_poll_synchronize_rcu_expedited(void); void cond_synchronize_rcu_expedited(unsigned long oldstate); unsigned long get_state_synchronize_rcu(void); +void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index b31e6ed64d1b..4f196ebce7f2 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -335,6 +335,7 @@ struct rcu_torture_ops { bool (*poll_gp_state_exp)(unsigned long oldstate); void (*cond_sync_exp)(unsigned long oldstate); unsigned long (*get_gp_state)(void); + void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); unsigned long (*get_gp_completed)(void); void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp); unsigned long (*start_gp_poll)(void); @@ -504,6 +505,7 @@ static struct rcu_torture_ops rcu_ops = { .sync = synchronize_rcu, .exp_sync = synchronize_rcu_expedited, .get_gp_state = get_state_synchronize_rcu, + .get_gp_state_full = get_state_synchronize_rcu_full, .get_gp_completed = get_completed_synchronize_rcu, .get_gp_completed_full = get_completed_synchronize_rcu_full, .start_gp_poll = start_poll_synchronize_rcu, @@ -1293,12 +1295,12 @@ rcu_torture_writer(void *arg) break; case RTWS_EXP_SYNC: rcu_torture_writer_state = RTWS_EXP_SYNC; - if (cur_ops->get_gp_state && cur_ops->poll_gp_state) - cookie = cur_ops->get_gp_state(); + if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) + cur_ops->get_gp_state_full(&cookie_full); cur_ops->exp_sync(); cur_ops->exp_sync(); - if (cur_ops->get_gp_state && cur_ops->poll_gp_state) - WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); + if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) + WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); rcu_torture_pipe_update(old_rp); break; case RTWS_COND_GET: diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d47c9b6d8106..3fa79ee78b5b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1755,6 +1755,8 @@ static noinline void rcu_gp_cleanup(void) dump_blkd_tasks(rnp, 10); WARN_ON_ONCE(rnp->qsmask); WRITE_ONCE(rnp->gp_seq, new_gp_seq); + if (!rnp->parent) + smp_mb(); // Order against failing poll_state_synchronize_rcu_full(). rdp = this_cpu_ptr(&rcu_data); if (rnp == rdp->mynode) needgp = __note_gp_changes(rnp, rdp) || needgp; @@ -3556,6 +3558,37 @@ unsigned long get_state_synchronize_rcu(void) } EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); +/** + * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited + * @rgosp: location to place combined normal/expedited grace-period state + * + * Places the normal and expedited grace-period states in @rgosp. This + * state value can be passed to a later call to cond_synchronize_rcu_full() + * or poll_state_synchronize_rcu_full() to determine whether or not a + * grace period (whether normal or expedited) has elapsed in the meantime. + * The rcu_gp_oldstate structure takes up twice the memory of an unsigned + * long, but is guaranteed to see all grace periods. In contrast, the + * combined state occupies less memory, but can sometimes fail to take + * grace periods into account. + * + * This does not guarantee that the needed grace period will actually + * start. + */ +void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) +{ + struct rcu_node *rnp = rcu_get_root(); + + /* + * Any prior manipulation of RCU-protected data must happen + * before the loads from ->gp_seq and ->expedited_sequence. + */ + smp_mb(); /* ^^^ */ + rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq); + rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); + rgosp->rgos_polled = rcu_seq_snap(&rcu_state.gp_seq_polled); +} +EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full); + /** * start_poll_synchronize_rcu - Snapshot and start RCU grace period *