check_cpu_stall(rdp);
/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
- if (rcu_nohz_full_cpu(&rcu_state))
+ if (rcu_nohz_full_cpu())
return 0;
/* Is the RCU core waiting for a quiescent state from this CPU? */
continue;
rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_is_nocb_cpu(cpu)) {
- if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
+ if (!rcu_nocb_cpu_needs_barrier(cpu)) {
_rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
rsp->barrier_sequence);
} else {
static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static void increment_cpu_stall_ticks(void);
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
+static bool rcu_nocb_cpu_needs_barrier(int cpu);
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
static void rcu_init_one_nocb(struct rcu_node *rnp);
static void rcu_spawn_all_nocb_kthreads(int cpu);
static void __init rcu_spawn_nocb_kthreads(void);
#ifdef CONFIG_RCU_NOCB_CPU
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
+static void __init rcu_organize_nocb_kthreads(void);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_bind_gp_kthread(void);
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
+static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void);
static void rcu_dynticks_task_exit(void);
* Does the specified CPU need an RCU callback for the specified flavor
* of rcu_barrier()?
*/
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
unsigned long ret;
for_each_rcu_flavor(rsp) {
for_each_cpu(cpu, rcu_nocb_mask)
init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
- rcu_organize_nocb_kthreads(rsp);
+ rcu_organize_nocb_kthreads();
}
}
* brought online out of order, this can require re-organizing the
* leader-follower relationships.
*/
-static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
+static void rcu_spawn_one_nocb_kthread(int cpu)
{
struct rcu_data *rdp;
struct rcu_data *rdp_last;
/* Spawn the kthread for this CPU and RCU flavor. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
- "rcuo%c/%d", rsp->abbr, cpu);
+ "rcuo%c/%d", rcu_state.abbr, cpu);
BUG_ON(IS_ERR(t));
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
}
if (rcu_scheduler_fully_active)
for_each_rcu_flavor(rsp)
- rcu_spawn_one_nocb_kthread(rsp, cpu);
+ rcu_spawn_one_nocb_kthread(cpu);
}
/*
/*
* Initialize leader-follower relationships for all no-CBs CPU.
*/
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
+static void __init rcu_organize_nocb_kthreads(void)
{
int cpu;
int ls = rcu_nocb_leader_stride;
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
{
WARN_ON_ONCE(1); /* Should be dead code. */
return false;
* This code relies on the fact that all NO_HZ_FULL CPUs are also
* CONFIG_RCU_NOCB_CPU CPUs.
*/
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
+static bool rcu_nohz_full_cpu(void)
{
#ifdef CONFIG_NO_HZ_FULL
if (tick_nohz_full_cpu(smp_processor_id()) &&
(!rcu_gp_in_progress() ||
- ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
+ ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
return true;
#endif /* #ifdef CONFIG_NO_HZ_FULL */
return false;