sched/pelt: Fix update of blocked PELT ordering
authorVincent Guittot <vincent.guittot@linaro.org>
Wed, 30 Oct 2019 11:18:29 +0000 (12:18 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Nov 2019 07:01:31 +0000 (08:01 +0100)
update_cfs_rq_load_avg() can call cpufreq_update_util() to trigger an
update of the frequency. Make sure that RT, DL and IRQ PELT signals have
been updated before calling cpufreq.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: dsmythies@telus.net
Cc: juri.lelli@redhat.com
Cc: mgorman@suse.de
Cc: rostedt@goodmis.org
Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking")
Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking")
Fixes: 91c27493e78d ("sched/irq: Add IRQ utilization tracking")
Link: https://lkml.kernel.org/r/1572434309-32512-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index 22a2fed..69a81a5 100644 (file)
@@ -7547,6 +7547,19 @@ static void update_blocked_averages(int cpu)
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
 
+       /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
+       update_irq_load_avg(rq, 0);
+
+       /* Don't need periodic decay once load/util_avg are null */
+       if (others_have_blocked(rq))
+               done = false;
+
        /*
         * Iterates the task_group tree in a bottom up fashion, see
         * list_add_leaf_cfs_rq() for details.
@@ -7574,14 +7587,6 @@ static void update_blocked_averages(int cpu)
                        done = false;
        }
 
-       curr_class = rq->curr->sched_class;
-       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
-       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
-       update_irq_load_avg(rq, 0);
-       /* Don't need periodic decay once load/util_avg are null */
-       if (others_have_blocked(rq))
-               done = false;
-
        update_blocked_load_status(rq, !done);
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -7642,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
-       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
 
+       /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
        curr_class = rq->curr->sched_class;
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
+
+       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
+
        update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
        rq_unlock_irqrestore(rq, &rf);
 }