sched/fair: Ensure _sum and _avg values stay consistent
[linux-2.6-microblaze.git] / kernel / sched / fair.c
index 4a3e61a..45edf61 100644 (file)
@@ -3657,15 +3657,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 
                r = removed_load;
                sub_positive(&sa->load_avg, r);
-               sub_positive(&sa->load_sum, r * divider);
+               sa->load_sum = sa->load_avg * divider;
 
                r = removed_util;
                sub_positive(&sa->util_avg, r);
-               sub_positive(&sa->util_sum, r * divider);
+               sa->util_sum = sa->util_avg * divider;
 
                r = removed_runnable;
                sub_positive(&sa->runnable_avg, r);
-               sub_positive(&sa->runnable_sum, r * divider);
+               sa->runnable_sum = sa->runnable_avg * divider;
 
                /*
                 * removed_runnable is the unweighted version of removed_load so we