rcu/nocb: Allow lockless use of rcu_segcblist_empty()
authorPaul E. McKenney <paulmck@linux.ibm.com>
Mon, 13 May 2019 22:57:50 +0000 (15:57 -0700)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 13 Aug 2019 21:35:49 +0000 (14:35 -0700)
Currently, rcu_segcblist_empty() assumes that the callback list is not
being changed by other CPUs, but upcoming changes will require it to
operate locklessly.  This commit therefore adds the needed READ_ONCE()
call, along with the WRITE_ONCE() calls when updating the callback list's
->head field.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
kernel/rcu/rcu_segcblist.c
kernel/rcu/rcu_segcblist.h

index 0e7fe67..06435a3 100644 (file)
@@ -213,7 +213,7 @@ void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
        if (!rcu_segcblist_ready_cbs(rsclp))
                return; /* Nothing to do. */
        *rclp->tail = rsclp->head;
-       rsclp->head = *rsclp->tails[RCU_DONE_TAIL];
+       WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
        WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
        rclp->tail = rsclp->tails[RCU_DONE_TAIL];
        for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
@@ -268,7 +268,7 @@ void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
        if (!rclp->head)
                return; /* No callbacks to move. */
        *rclp->tail = rsclp->head;
-       rsclp->head = rclp->head;
+       WRITE_ONCE(rsclp->head, rclp->head);
        for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
                if (&rsclp->head == rsclp->tails[i])
                        WRITE_ONCE(rsclp->tails[i], rclp->tail);
index f74960f..d9142b3 100644 (file)
@@ -36,7 +36,7 @@ struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
  */
 static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
 {
-       return !rsclp->head;
+       return !READ_ONCE(rsclp->head);
 }
 
 /* Return number of callbacks in segmented callback list. */