Merge tag 'sched-core-2022-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / rcu / tree_plugin.h
index 65f25a3..8360d86 100644 (file)
@@ -330,7 +330,7 @@ void rcu_note_context_switch(bool preempt)
                 * then queue the task as required based on the states
                 * of any ongoing and expedited grace periods.
                 */
-               WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
+               WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
                WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
                trace_rcu_preempt_task(rcu_state.name,
                                       t->pid,
@@ -556,16 +556,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
 
-               /* Unboost if we were boosted. */
-               if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
-                       rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
-
                /*
                 * If this was the last task on the expedited lists,
                 * then we need to report up the rcu_node hierarchy.
                 */
                if (!empty_exp && empty_exp_now)
                        rcu_report_exp_rnp(rnp, true);
+
+               /* Unboost if we were boosted. */
+               if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
+                       rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
        } else {
                local_irq_restore(flags);
        }
@@ -773,7 +773,6 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
        int cpu;
        int i;
        struct list_head *lhp;
-       bool onl;
        struct rcu_data *rdp;
        struct rcu_node *rnp1;
 
@@ -797,9 +796,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
        pr_cont("\n");
        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
                rdp = per_cpu_ptr(&rcu_data, cpu);
-               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
                pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
-                       cpu, ".o"[onl],
+                       cpu, ".o"[rcu_rdp_cpu_online(rdp)],
                        (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
                        (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
        }
@@ -996,12 +994,15 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
  */
 static void rcu_cpu_kthread_setup(unsigned int cpu)
 {
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 #ifdef CONFIG_RCU_BOOST
        struct sched_param sp;
 
        sp.sched_priority = kthread_prio;
        sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
+
+       WRITE_ONCE(rdp->rcuc_activity, jiffies);
 }
 
 #ifdef CONFIG_RCU_BOOST
@@ -1172,15 +1173,14 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
        struct sched_param sp;
        struct task_struct *t;
 
+       mutex_lock(&rnp->boost_kthread_mutex);
        if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
-               return;
-
-       rcu_state.boost = 1;
+               goto out;
 
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
                           "rcub/%d", rnp_index);
        if (WARN_ON_ONCE(IS_ERR(t)))
-               return;
+               goto out;
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rnp->boost_kthread_task = t;
@@ -1188,6 +1188,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
        sp.sched_priority = kthread_prio;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
+
+ out:
+       mutex_unlock(&rnp->boost_kthread_mutex);
 }
 
 /*
@@ -1210,14 +1213,16 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
                return;
        if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
                return;
+       mutex_lock(&rnp->boost_kthread_mutex);
        for_each_leaf_node_possible_cpu(rnp, cpu)
                if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
                    cpu != outgoingcpu)
                        cpumask_set_cpu(cpu, cm);
        cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
-       if (cpumask_weight(cm) == 0)
+       if (cpumask_empty(cm))
                cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
        set_cpus_allowed_ptr(t, cm);
+       mutex_unlock(&rnp->boost_kthread_mutex);
        free_cpumask_var(cm);
 }