sched/core: Reorganize ttwu_do_wakeup() and ttwu_do_activate()
authorChengming Zhou <zhouchengming@bytedance.com>
Fri, 23 Dec 2022 10:32:57 +0000 (18:32 +0800)
committerIngo Molnar <mingo@kernel.org>
Sat, 7 Jan 2023 09:48:38 +0000 (10:48 +0100)
ttwu_do_activate() is used for a complete wakeup, in which we will
activate_task() and use ttwu_do_wakeup() to mark the task runnable
and perform wakeup-preemption, also call class->task_woken() callback
and update the rq->idle_stamp.

Since ttwu_runnable() is not a complete wakeup, don't need all those
done in ttwu_do_wakeup(), so we can move those to ttwu_do_activate()
to simplify ttwu_do_wakeup(), making it only mark the task runnable
to be reused in ttwu_runnable() and try_to_wake_up().

This patch should not have any functional changes.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20221223103257.4962-2-zhouchengming@bytedance.com
kernel/sched/core.c

index 255a318..03b8529 100644 (file)
@@ -3625,14 +3625,39 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 }
 
 /*
- * Mark the task runnable and perform wakeup-preemption.
+ * Mark the task runnable.
  */
-static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
-                          struct rq_flags *rf)
+static inline void ttwu_do_wakeup(struct task_struct *p)
 {
-       check_preempt_curr(rq, p, wake_flags);
        WRITE_ONCE(p->__state, TASK_RUNNING);
        trace_sched_wakeup(p);
+}
+
+static void
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
+                struct rq_flags *rf)
+{
+       int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
+
+       lockdep_assert_rq_held(rq);
+
+       if (p->sched_contributes_to_load)
+               rq->nr_uninterruptible--;
+
+#ifdef CONFIG_SMP
+       if (wake_flags & WF_MIGRATED)
+               en_flags |= ENQUEUE_MIGRATED;
+       else
+#endif
+       if (p->in_iowait) {
+               delayacct_blkio_end(p);
+               atomic_dec(&task_rq(p)->nr_iowait);
+       }
+
+       activate_task(rq, p, en_flags);
+       check_preempt_curr(rq, p, wake_flags);
+
+       ttwu_do_wakeup(p);
 
 #ifdef CONFIG_SMP
        if (p->sched_class->task_woken) {
@@ -3662,31 +3687,6 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
 #endif
 }
 
-static void
-ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
-                struct rq_flags *rf)
-{
-       int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
-
-       lockdep_assert_rq_held(rq);
-
-       if (p->sched_contributes_to_load)
-               rq->nr_uninterruptible--;
-
-#ifdef CONFIG_SMP
-       if (wake_flags & WF_MIGRATED)
-               en_flags |= ENQUEUE_MIGRATED;
-       else
-#endif
-       if (p->in_iowait) {
-               delayacct_blkio_end(p);
-               atomic_dec(&task_rq(p)->nr_iowait);
-       }
-
-       activate_task(rq, p, en_flags);
-       ttwu_do_wakeup(rq, p, wake_flags, rf);
-}
-
 /*
  * Consider @p being inside a wait loop:
  *
@@ -3728,8 +3728,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
                        update_rq_clock(rq);
                        check_preempt_curr(rq, p, wake_flags);
                }
-               WRITE_ONCE(p->__state, TASK_RUNNING);
-               trace_sched_wakeup(p);
+               ttwu_do_wakeup(p);
                ret = 1;
        }
        __task_rq_unlock(rq, &rf);
@@ -4095,8 +4094,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                        goto out;
 
                trace_sched_waking(p);
-               WRITE_ONCE(p->__state, TASK_RUNNING);
-               trace_sched_wakeup(p);
+               ttwu_do_wakeup(p);
                goto out;
        }