rcu-tasks: Pull in tasks blocked within RCU Tasks Trace readers
authorPaul E. McKenney <paulmck@kernel.org>
Thu, 19 May 2022 00:19:27 +0000 (17:19 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Mon, 20 Jun 2022 16:22:29 +0000 (09:22 -0700)
This commit scans each CPU's ->rtp_blkd_tasks list, adding them to
the list of holdout tasks.  This will cause the current RCU Tasks Trace
grace period to wait until these tasks exit their RCU Tasks Trace
read-side critical sections.  This commit will enable later work
omitting the scan of the full task list.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: KP Singh <kpsingh@kernel.org>
kernel/rcu/tasks.h

index a8f9586..d318cdf 100644 (file)
@@ -1492,7 +1492,11 @@ static void rcu_tasks_trace_pertask_handler(void *hop_in)
 /* Initialize for a new RCU-tasks-trace grace period. */
 static void rcu_tasks_trace_pregp_step(struct list_head *hop)
 {
+       LIST_HEAD(blkd_tasks);
        int cpu;
+       unsigned long flags;
+       struct rcu_tasks_percpu *rtpcp;
+       struct task_struct *t;
 
        // There shouldn't be any old IPIs, but...
        for_each_possible_cpu(cpu)
@@ -1506,6 +1510,26 @@ static void rcu_tasks_trace_pregp_step(struct list_head *hop)
        // allow safe access to the hop list.
        for_each_online_cpu(cpu)
                smp_call_function_single(cpu, rcu_tasks_trace_pertask_handler, hop, 1);
+
+       // Only after all running tasks have been accounted for is it
+       // safe to take care of the tasks that have blocked within their
+       // current RCU tasks trace read-side critical section.
+       for_each_possible_cpu(cpu) {
+               rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
+               raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
+               list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
+               while (!list_empty(&blkd_tasks)) {
+                       rcu_read_lock();
+                       t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
+                       list_del_init(&t->trc_blkd_node);
+                       list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
+                       raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+                       rcu_tasks_trace_pertask(t, hop);
+                       rcu_read_unlock();
+                       raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
+               }
+               raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+       }
 }
 
 /*