context_tracking, rcu: Rename ct_dynticks_cpu_acquire() into ct_rcu_watching_cpu_acqu...
authorValentin Schneider <vschneid@redhat.com>
Tue, 16 Apr 2024 12:47:12 +0000 (14:47 +0200)
committerNeeraj Upadhyay <neeraj.upadhyay@kernel.org>
Mon, 29 Jul 2024 02:03:10 +0000 (07:33 +0530)
The context_tracking.state RCU_DYNTICKS subvariable has been renamed to
RCU_WATCHING, reflect that change in the related helpers.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
include/linux/context_tracking_state.h
kernel/rcu/tree.c
kernel/rcu/tree_exp.h

index 728b1e6..2d7036a 100644 (file)
@@ -149,7 +149,7 @@ This case is handled by calls to the strongly ordered
 ``atomic_add_return()`` read-modify-write atomic operation that
 is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry
 time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time.
-The grace-period kthread invokes first ``ct_dynticks_cpu_acquire()``
+The grace-period kthread invokes first ``ct_rcu_watching_cpu_acquire()``
 (preceded by a full memory barrier) and ``rcu_dynticks_in_eqs_since()``
 (both of which rely on acquire semantics) to detect idle CPUs.
 
index ad5a06a..ad6570f 100644 (file)
@@ -68,7 +68,7 @@ static __always_inline int ct_rcu_watching_cpu(int cpu)
        return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
 }
 
-static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
 {
        struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
 
index e7f612e..45a9f36 100644 (file)
@@ -323,7 +323,7 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
         * performed by the remote CPU prior to entering idle and therefore can
         * rely solely on acquire semantics.
         */
-       return snap != ct_dynticks_cpu_acquire(rdp->cpu);
+       return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
 }
 
 /*
@@ -782,7 +782,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
         * Ordering between remote CPU's pre idle accesses and post grace period
         * updater's accesses is enforced by the below acquire semantic.
         */
-       rdp->dynticks_snap = ct_dynticks_cpu_acquire(rdp->cpu);
+       rdp->dynticks_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
                trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rcu_gpnum_ovf(rdp->mynode, rdp);
index 4acd29d..daa87fe 100644 (file)
@@ -376,7 +376,7 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
                         * post grace period updater's accesses is enforced by the
                         * below acquire semantic.
                         */
-                       snap = ct_dynticks_cpu_acquire(cpu);
+                       snap = ct_rcu_watching_cpu_acquire(cpu);
                        if (rcu_dynticks_in_eqs(snap))
                                mask_ofl_test |= mask;
                        else