Merge branches 'clk-range', 'clk-uniphier', 'clk-apple' and 'clk-qcom' into clk-next
[linux-2.6-microblaze.git] / kernel / sched / fair.c
index 095b0aa..5146163 100644 (file)
@@ -3028,9 +3028,11 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static inline void
 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       u32 divider = get_pelt_divider(&se->avg);
        sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
-       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+       sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
+                                         cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
 }
 #else
 static inline void
@@ -3381,7 +3383,6 @@ void set_task_rq_fair(struct sched_entity *se,
        se->avg.last_update_time = n_last_update_time;
 }
 
-
 /*
  * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
  * propagate its contribution. The key to this propagation is the invariant
@@ -3449,15 +3450,14 @@ void set_task_rq_fair(struct sched_entity *se,
  * XXX: only do this for the part of runnable > running ?
  *
  */
-
 static inline void
 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
-       u32 divider;
+       long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
+       u32 new_sum, divider;
 
        /* Nothing to update */
-       if (!delta)
+       if (!delta_avg)
                return;
 
        /*
@@ -3466,23 +3466,30 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
         */
        divider = get_pelt_divider(&cfs_rq->avg);
 
+
        /* Set new sched_entity's utilization */
        se->avg.util_avg = gcfs_rq->avg.util_avg;
-       se->avg.util_sum = se->avg.util_avg * divider;
+       new_sum = se->avg.util_avg * divider;
+       delta_sum = (long)new_sum - (long)se->avg.util_sum;
+       se->avg.util_sum = new_sum;
 
        /* Update parent cfs_rq utilization */
-       add_positive(&cfs_rq->avg.util_avg, delta);
-       cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
+       add_positive(&cfs_rq->avg.util_avg, delta_avg);
+       add_positive(&cfs_rq->avg.util_sum, delta_sum);
+
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
+                                         cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
 }
 
 static inline void
 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
-       u32 divider;
+       long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
+       u32 new_sum, divider;
 
        /* Nothing to update */
-       if (!delta)
+       if (!delta_avg)
                return;
 
        /*
@@ -3493,19 +3500,25 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
 
        /* Set new sched_entity's runnable */
        se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
-       se->avg.runnable_sum = se->avg.runnable_avg * divider;
+       new_sum = se->avg.runnable_avg * divider;
+       delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
+       se->avg.runnable_sum = new_sum;
 
        /* Update parent cfs_rq runnable */
-       add_positive(&cfs_rq->avg.runnable_avg, delta);
-       cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
+       add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
+       add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
+                                             cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
 }
 
 static inline void
 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
        unsigned long load_avg;
        u64 load_sum = 0;
+       s64 delta_sum;
        u32 divider;
 
        if (!runnable_sum)
@@ -3532,7 +3545,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
                 * assuming all tasks are equally runnable.
                 */
                if (scale_load_down(gcfs_rq->load.weight)) {
-                       load_sum = div_s64(gcfs_rq->avg.load_sum,
+                       load_sum = div_u64(gcfs_rq->avg.load_sum,
                                scale_load_down(gcfs_rq->load.weight));
                }
 
@@ -3549,19 +3562,22 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
        runnable_sum = max(runnable_sum, running_sum);
 
-       load_sum = (s64)se_weight(se) * runnable_sum;
-       load_avg = div_s64(load_sum, divider);
-
-       se->avg.load_sum = runnable_sum;
+       load_sum = se_weight(se) * runnable_sum;
+       load_avg = div_u64(load_sum, divider);
 
-       delta = load_avg - se->avg.load_avg;
-       if (!delta)
+       delta_avg = load_avg - se->avg.load_avg;
+       if (!delta_avg)
                return;
 
-       se->avg.load_avg = load_avg;
+       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
 
-       add_positive(&cfs_rq->avg.load_avg, delta);
-       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+       se->avg.load_sum = runnable_sum;
+       se->avg.load_avg = load_avg;
+       add_positive(&cfs_rq->avg.load_avg, delta_avg);
+       add_positive(&cfs_rq->avg.load_sum, delta_sum);
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
+                                         cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
 }
 
 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3652,7 +3668,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
  *
  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
  *
- * Returns true if the load decayed or we removed load.
+ * Return: true if the load decayed or we removed load.
  *
  * Since both these conditions indicate a changed cfs_rq->avg.load we should
  * call update_tg_load_avg() when this function returns true.
@@ -3677,15 +3693,32 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 
                r = removed_load;
                sub_positive(&sa->load_avg, r);
-               sa->load_sum = sa->load_avg * divider;
+               sub_positive(&sa->load_sum, r * divider);
+               /* See sa->util_sum below */
+               sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
 
                r = removed_util;
                sub_positive(&sa->util_avg, r);
-               sa->util_sum = sa->util_avg * divider;
+               sub_positive(&sa->util_sum, r * divider);
+               /*
+                * Because of rounding, se->util_sum might ends up being +1 more than
+                * cfs->util_sum. Although this is not a problem by itself, detaching
+                * a lot of tasks with the rounding problem between 2 updates of
+                * util_avg (~1ms) can make cfs->util_sum becoming null whereas
+                * cfs_util_avg is not.
+                * Check that util_sum is still above its lower bound for the new
+                * util_avg. Given that period_contrib might have moved since the last
+                * sync, we are only sure that util_sum must be above or equal to
+                *    util_avg * minimum possible divider
+                */
+               sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
 
                r = removed_runnable;
                sub_positive(&sa->runnable_avg, r);
-               sa->runnable_sum = sa->runnable_avg * divider;
+               sub_positive(&sa->runnable_sum, r * divider);
+               /* See sa->util_sum above */
+               sa->runnable_sum = max_t(u32, sa->runnable_sum,
+                                             sa->runnable_avg * PELT_MIN_DIVIDER);
 
                /*
                 * removed_runnable is the unweighted version of removed_load so we
@@ -3772,17 +3805,18 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
  */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       /*
-        * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
-        * See ___update_load_avg() for details.
-        */
-       u32 divider = get_pelt_divider(&cfs_rq->avg);
-
        dequeue_load_avg(cfs_rq, se);
        sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
-       cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
+       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
+                                         cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
+
        sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
-       cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
+       sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
+                                             cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
 
        add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
 
@@ -8539,6 +8573,8 @@ group_type group_classify(unsigned int imbalance_pct,
  *
  * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
  * of @dst_cpu are idle and @sg has lower priority.
+ *
+ * Return: true if @dst_cpu can pull tasks, false otherwise.
  */
 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
                                    struct sg_lb_stats *sgs,
@@ -8614,6 +8650,7 @@ sched_asym(struct lb_env *env, struct sd_lb_stats *sds,  struct sg_lb_stats *sgs
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  * @env: The load balancing environment.
+ * @sds: Load-balancing data with statistics of the local group.
  * @group: sched_group whose statistics are to be updated.
  * @sgs: variable to hold the statistics for this group.
  * @sg_status: Holds flag indicating the status of the sched_group
@@ -9421,12 +9458,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
 /**
  * find_busiest_group - Returns the busiest group within the sched_domain
  * if there is an imbalance.
+ * @env: The load balancing environment.
  *
  * Also calculates the amount of runnable load which should be moved
  * to restore balance.
  *
- * @env: The load balancing environment.
- *
  * Return:     - The busiest group if imbalance exists.
  */
 static struct sched_group *find_busiest_group(struct lb_env *env)