sched/pelt: Fix update of blocked PELT ordering
[linux-2.6-microblaze.git] / kernel / sched / fair.c
index 682a754..69a81a5 100644 (file)
@@ -6570,6 +6570,15 @@ static void task_dead_fair(struct task_struct *p)
 {
        remove_entity_load_avg(&p->se);
 }
+
+static int
+balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+       if (rq->nr_running)
+               return 1;
+
+       return newidle_balance(rq, rf) != 0;
+}
 #endif /* CONFIG_SMP */
 
 static unsigned long wakeup_gran(struct sched_entity *se)
@@ -6746,7 +6755,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
        int new_tasks;
 
 again:
-       if (!cfs_rq->nr_running)
+       if (!sched_fair_runnable(rq))
                goto idle;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6884,7 +6893,7 @@ idle:
 /*
  * Account for a descheduled task:
  */
-static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
 {
        struct sched_entity *se = &prev->se;
        struct cfs_rq *cfs_rq;
@@ -7538,6 +7547,19 @@ static void update_blocked_averages(int cpu)
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
 
+       /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
+       update_irq_load_avg(rq, 0);
+
+       /* Don't need periodic decay once load/util_avg are null */
+       if (others_have_blocked(rq))
+               done = false;
+
        /*
         * Iterates the task_group tree in a bottom up fashion, see
         * list_add_leaf_cfs_rq() for details.
@@ -7565,14 +7587,6 @@ static void update_blocked_averages(int cpu)
                        done = false;
        }
 
-       curr_class = rq->curr->sched_class;
-       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
-       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
-       update_irq_load_avg(rq, 0);
-       /* Don't need periodic decay once load/util_avg are null */
-       if (others_have_blocked(rq))
-               done = false;
-
        update_blocked_load_status(rq, !done);
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -7633,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
-       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
 
+       /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
        curr_class = rq->curr->sched_class;
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
+
+       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
+
        update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -10414,11 +10434,11 @@ const struct sched_class fair_sched_class = {
        .check_preempt_curr     = check_preempt_wakeup,
 
        .pick_next_task         = pick_next_task_fair,
-
        .put_prev_task          = put_prev_task_fair,
        .set_next_task          = set_next_task_fair,
 
 #ifdef CONFIG_SMP
+       .balance                = balance_fair,
        .select_task_rq         = select_task_rq_fair,
        .migrate_task_rq        = migrate_task_rq_fair,