rcu: Change return type of rcu_spawn_one_boost_kthread()
authorByungchul Park <byungchul.park@lge.com>
Mon, 1 Jul 2019 00:40:39 +0000 (09:40 +0900)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Thu, 1 Aug 2019 21:05:51 +0000 (14:05 -0700)
The return value of rcu_spawn_one_boost_kthread() is not used any longer.
This commit therefore changes its return type from int to void, and
removes the cast to void from its callers.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
kernel/rcu/tree_plugin.h

index 3f1b504..307ae6e 100644 (file)
@@ -1123,7 +1123,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  * already exist.  We only create this kthread for preemptible RCU.
  * Returns zero if all is well, a negated errno otherwise.
  */
-static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
+static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
 {
        int rnp_index = rnp - rcu_get_root();
        unsigned long flags;
@@ -1131,25 +1131,27 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
        struct task_struct *t;
 
        if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
-               return 0;
+               return;
 
        if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
-               return 0;
+               return;
 
        rcu_state.boost = 1;
+
        if (rnp->boost_kthread_task != NULL)
-               return 0;
+               return;
+
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
                           "rcub/%d", rnp_index);
-       if (IS_ERR(t))
-               return PTR_ERR(t);
+       if (WARN_ON_ONCE(IS_ERR(t)))
+               return;
+
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        sp.sched_priority = kthread_prio;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
-       return 0;
 }
 
 /*
@@ -1190,7 +1192,7 @@ static void __init rcu_spawn_boost_kthreads(void)
        struct rcu_node *rnp;
 
        rcu_for_each_leaf_node(rnp)
-               (void)rcu_spawn_one_boost_kthread(rnp);
+               rcu_spawn_one_boost_kthread(rnp);
 }
 
 static void rcu_prepare_kthreads(int cpu)
@@ -1200,7 +1202,7 @@ static void rcu_prepare_kthreads(int cpu)
 
        /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
        if (rcu_scheduler_fully_active)
-               (void)rcu_spawn_one_boost_kthread(rnp);
+               rcu_spawn_one_boost_kthread(rnp);
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */