rcu/context_tracking: Move dynticks_nmi_nesting to context tracking
authorFrederic Weisbecker <frederic@kernel.org>
Wed, 8 Jun 2022 14:40:31 +0000 (16:40 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 5 Jul 2022 20:32:59 +0000 (13:32 -0700)
The RCU eqs tracking is going to be performed by the context tracking
subsystem. The related nesting counters thus need to be moved to the
context tracking structure.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
include/linux/context_tracking_state.h
kernel/context_tracking.c
kernel/rcu/rcu.h
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_stall.h

index 13a4a9d..5f11e3d 100644 (file)
@@ -13,6 +13,9 @@ enum ctx_state {
        CONTEXT_GUEST,
 };
 
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
+#define DYNTICK_IRQ_NONIDLE    ((LONG_MAX / 2) + 1)
+
 struct context_tracking {
 #ifdef CONFIG_CONTEXT_TRACKING_USER
        /*
@@ -28,6 +31,7 @@ struct context_tracking {
 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
        atomic_t dynticks;              /* Even value for idle, else odd. */
        long dynticks_nesting;          /* Track process nesting level. */
+       long dynticks_nmi_nesting;      /* Track irq/NMI nesting level. */
 #endif
 };
 
@@ -66,6 +70,18 @@ static __always_inline long ct_dynticks_nesting_cpu(int cpu)
 
        return ct->dynticks_nesting;
 }
+
+static __always_inline long ct_dynticks_nmi_nesting(void)
+{
+       return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+}
+
+static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+{
+       struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+       return ct->dynticks_nmi_nesting;
+}
 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
 
 #ifdef CONFIG_CONTEXT_TRACKING_USER
index dfefe04..7c3033e 100644 (file)
@@ -26,6 +26,7 @@
 DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
        .dynticks_nesting = 1,
+       .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
        .dynticks = ATOMIC_INIT(1),
 #endif
 };
index 4916077..7b4a88d 100644 (file)
 
 #include <trace/events/rcu.h>
 
-/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
-#define DYNTICK_IRQ_NONIDLE    ((LONG_MAX / 2) + 1)
-
-
 /*
  * Grace-period counter management.
  */
index f6bf328..006939b 100644 (file)
@@ -75,7 +75,6 @@
 /* Data structures. */
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
-       .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
 #ifdef CONFIG_RCU_NOCB_CPU
        .cblist.flags = SEGCBLIST_RCU_CORE,
 #endif
@@ -437,11 +436,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
        /* Check for counter underflows */
        RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
                         "RCU dynticks_nesting counter underflow!");
-       RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
+       RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
                         "RCU dynticks_nmi_nesting counter underflow/zero!");
 
        /* Are we at first interrupt nesting level? */
-       nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
+       nesting = ct_dynticks_nmi_nesting();
        if (nesting > 1)
                return false;
 
@@ -612,11 +611,10 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
  */
 static noinstr void rcu_eqs_enter(bool user)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        struct context_tracking *ct = this_cpu_ptr(&context_tracking);
 
-       WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
-       WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
+       WARN_ON_ONCE(ct_dynticks_nmi_nesting() != DYNTICK_IRQ_NONIDLE);
+       WRITE_ONCE(ct->dynticks_nmi_nesting, 0);
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     ct_dynticks_nesting() == 0);
        if (ct_dynticks_nesting() != 1) {
@@ -734,7 +732,7 @@ noinstr void rcu_user_enter(void)
  * rcu_nmi_exit - inform RCU of exit from NMI context
  *
  * If we are returning from the outermost NMI handler that interrupted an
- * RCU-idle period, update ct->dynticks and rdp->dynticks_nmi_nesting
+ * RCU-idle period, update ct->dynticks and ct->dynticks_nmi_nesting
  * to let the RCU grace-period handling know that the CPU is back to
  * being RCU-idle.
  *
@@ -744,7 +742,6 @@ noinstr void rcu_user_enter(void)
 noinstr void rcu_nmi_exit(void)
 {
        struct context_tracking *ct = this_cpu_ptr(&context_tracking);
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
        instrumentation_begin();
        /*
@@ -752,25 +749,25 @@ noinstr void rcu_nmi_exit(void)
         * (We are exiting an NMI handler, so RCU better be paying attention
         * to us!)
         */
-       WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
+       WARN_ON_ONCE(ct_dynticks_nmi_nesting() <= 0);
        WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
 
        /*
         * If the nesting level is not 1, the CPU wasn't RCU-idle, so
         * leave it in non-RCU-idle state.
         */
-       if (rdp->dynticks_nmi_nesting != 1) {
-               trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
+       if (ct_dynticks_nmi_nesting() != 1) {
+               trace_rcu_dyntick(TPS("--="), ct_dynticks_nmi_nesting(), ct_dynticks_nmi_nesting() - 2,
                                  ct_dynticks());
-               WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
-                          rdp->dynticks_nmi_nesting - 2);
+               WRITE_ONCE(ct->dynticks_nmi_nesting, /* No store tearing. */
+                          ct_dynticks_nmi_nesting() - 2);
                instrumentation_end();
                return;
        }
 
        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
-       trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, ct_dynticks());
-       WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+       trace_rcu_dyntick(TPS("Startirq"), ct_dynticks_nmi_nesting(), 0, ct_dynticks());
+       WRITE_ONCE(ct->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 
        // instrumentation for the noinstr rcu_dynticks_eqs_enter()
        instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));
@@ -794,7 +791,7 @@ void rcu_irq_exit_check_preempt(void)
 
        RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
                         "RCU dynticks_nesting counter underflow/zero!");
-       RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
+       RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
                         DYNTICK_IRQ_NONIDLE,
                         "Bad RCU  dynticks_nmi_nesting counter\n");
        RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
@@ -813,11 +810,9 @@ void rcu_irq_exit_check_preempt(void)
 static void noinstr rcu_eqs_exit(bool user)
 {
        struct context_tracking *ct = this_cpu_ptr(&context_tracking);
-       struct rcu_data *rdp;
        long oldval;
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
-       rdp = this_cpu_ptr(&rcu_data);
        oldval = ct_dynticks_nesting();
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
        if (oldval) {
@@ -837,8 +832,8 @@ static void noinstr rcu_eqs_exit(bool user)
        trace_rcu_dyntick(TPS("End"), ct_dynticks_nesting(), 1, ct_dynticks());
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
        WRITE_ONCE(ct->dynticks_nesting, 1);
-       WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
-       WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
+       WARN_ON_ONCE(ct_dynticks_nmi_nesting());
+       WRITE_ONCE(ct->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
        instrumentation_end();
 }
 
@@ -941,7 +936,7 @@ void __rcu_irq_enter_check_tick(void)
  * rcu_nmi_enter - inform RCU of entry to NMI context
  *
  * If the CPU was idle from RCU's viewpoint, update ct->dynticks and
- * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
+ * ct->dynticks_nmi_nesting to let the RCU grace-period handling know
  * that the CPU is active.  This implementation permits nested NMIs, as
  * long as the nesting level does not overflow an int.  (You will probably
  * run out of stack space first.)
@@ -952,11 +947,10 @@ void __rcu_irq_enter_check_tick(void)
 noinstr void rcu_nmi_enter(void)
 {
        long incby = 2;
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        struct context_tracking *ct = this_cpu_ptr(&context_tracking);
 
        /* Complain about underflow. */
-       WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
+       WARN_ON_ONCE(ct_dynticks_nmi_nesting() < 0);
 
        /*
         * If idle from RCU viewpoint, atomically increment ->dynticks
@@ -990,11 +984,11 @@ noinstr void rcu_nmi_enter(void)
        }
 
        trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
-                         rdp->dynticks_nmi_nesting,
-                         rdp->dynticks_nmi_nesting + incby, ct_dynticks());
+                         ct_dynticks_nmi_nesting(),
+                         ct_dynticks_nmi_nesting() + incby, ct_dynticks());
        instrumentation_end();
-       WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
-                  rdp->dynticks_nmi_nesting + incby);
+       WRITE_ONCE(ct->dynticks_nmi_nesting, /* Prevent store tearing. */
+                  ct_dynticks_nmi_nesting() + incby);
        barrier();
 }
 
index 650ff3c..72dbf85 100644 (file)
@@ -187,7 +187,6 @@ struct rcu_data {
 
        /* 3) dynticks interface. */
        int dynticks_snap;              /* Per-GP tracking for dynticks. */
-       long dynticks_nmi_nesting;      /* Track irq/NMI nesting level. */
        bool rcu_need_heavy_qs;         /* GP old, so heavy quiescent state! */
        bool rcu_urgent_qs;             /* GP old need light quiescent state. */
        bool rcu_forced_tick;           /* Forced tick to provide QS. */
index a9c8225..2683ce0 100644 (file)
@@ -479,7 +479,7 @@ static void print_cpu_stall_info(int cpu)
                                "!."[!delta],
               ticks_value, ticks_title,
               rcu_dynticks_snap(cpu) & 0xfff,
-              ct_dynticks_nesting_cpu(cpu), rdp->dynticks_nmi_nesting,
+              ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
               data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
               rcuc_starved ? buf : "",