1 // SPDX-License-Identifier: GPL-2.0
3 * Per Entity Load Tracking
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
23 * Move PELT related code from fair.c into this pelt.c file
24 * Author: Vincent Guittot <vincent.guittot@linaro.org>
27 #include <linux/sched.h>
31 #include <trace/events/sched.h>
35 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
37 static u64 decay_load(u64 val, u64 n)
41 if (unlikely(n > LOAD_AVG_PERIOD * 63))
44 /* after bounds checking we can collapse to 32-bit */
48 * As y^PERIOD = 1/2, we can combine
49 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
50 * With a look-up table which covers y^n (n<PERIOD)
52 * To achieve constant time decay_load.
54 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
55 val >>= local_n / LOAD_AVG_PERIOD;
56 local_n %= LOAD_AVG_PERIOD;
59 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
63 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
65 u32 c1, c2, c3 = d3; /* y^0 == 1 */
70 c1 = decay_load((u64)d1, periods);
78 * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
81 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
86 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
89 * Accumulate the three separate parts of the sum; d1 the remainder
90 * of the last (incomplete) period, d2 the span of full periods and d3
91 * the remainder of the (incomplete) current period.
96 * |<->|<----------------->|<--->|
97 * ... |---x---|------| ... |------|-----x (now)
100 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
106 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
109 static __always_inline u32
110 accumulate_sum(u64 delta, struct sched_avg *sa,
111 unsigned long load, unsigned long runnable, int running)
113 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
116 delta += sa->period_contrib;
117 periods = delta / 1024; /* A period is 1024us (~1ms) */
120 * Step 1: decay old *_sum if we crossed period boundaries.
123 sa->load_sum = decay_load(sa->load_sum, periods);
125 decay_load(sa->runnable_sum, periods);
126 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
134 * This relies on the:
137 * runnable = running = 0;
139 * clause from ___update_load_sum(); this results in
140 * the below usage of @contrib to dissapear entirely,
141 * so no point in calculating it.
143 contrib = __accumulate_pelt_segments(periods,
144 1024 - sa->period_contrib, delta);
147 sa->period_contrib = delta;
150 sa->load_sum += load * contrib;
152 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
154 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
160 * We can represent the historical contribution to runnable average as the
161 * coefficients of a geometric series. To do this we sub-divide our runnable
162 * history into segments of approximately 1ms (1024us); label the segment that
163 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
165 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
167 * (now) (~1ms ago) (~2ms ago)
169 * Let u_i denote the fraction of p_i that the entity was runnable.
171 * We then designate the fractions u_i as our co-efficients, yielding the
172 * following representation of historical load:
173 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
175 * We choose y based on the with of a reasonably scheduling period, fixing:
178 * This means that the contribution to load ~32ms ago (u_32) will be weighted
179 * approximately half as much as the contribution to load within the last ms
182 * When a period "rolls over" and we have new u_0`, multiplying the previous
183 * sum again by y is sufficient to update:
184 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
185 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
187 static __always_inline int
188 ___update_load_sum(u64 now, struct sched_avg *sa,
189 unsigned long load, unsigned long runnable, int running)
193 delta = now - sa->last_update_time;
195 * This should only happen when time goes backwards, which it
196 * unfortunately does during sched clock init when we swap over to TSC.
198 if ((s64)delta < 0) {
199 sa->last_update_time = now;
204 * Use 1024ns as the unit of measurement since it's a reasonable
205 * approximation of 1us and fast to compute.
211 sa->last_update_time += delta << 10;
214 * running is a subset of runnable (weight) so running can't be set if
215 * runnable is clear. But there are some corner cases where the current
216 * se has been already dequeued but cfs_rq->curr still points to it.
217 * This means that weight will be 0 but not running for a sched_entity
218 * but also for a cfs_rq if the latter becomes idle. As an example,
219 * this happens during idle_balance() which calls
220 * update_blocked_averages().
222 * Also see the comment in accumulate_sum().
225 runnable = running = 0;
228 * Now we know we crossed measurement unit boundaries. The *_avg
229 * accrues by two steps:
231 * Step 1: accumulate *_sum since last_update_time. If we haven't
232 * crossed period boundaries, finish.
234 if (!accumulate_sum(delta, sa, load, runnable, running))
240 static __always_inline void
241 ___update_load_avg(struct sched_avg *sa, unsigned long load)
243 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
246 * Step 2: update *_avg.
248 sa->load_avg = div_u64(load * sa->load_sum, divider);
249 sa->runnable_avg = div_u64(sa->runnable_sum, divider);
250 WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
257 * se_weight() = se->load.weight
258 * se_runnable() = !!on_rq
260 * group: [ see update_cfs_group() ]
261 * se_weight() = tg->weight * grq->load_avg / tg->load_avg
262 * se_runnable() = grq->h_nr_running
264 * runnable_sum = se_runnable() * runnable = grq->runnable_sum
265 * runnable_avg = runnable_sum
267 * load_sum := runnable
268 * load_avg = se_weight(se) * load_sum
272 * runnable_sum = \Sum se->avg.runnable_sum
273 * runnable_avg = \Sum se->avg.runnable_avg
275 * load_sum = \Sum se_weight(se) * se->avg.load_sum
276 * load_avg = \Sum se->avg.load_avg
279 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
281 if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
282 ___update_load_avg(&se->avg, se_weight(se));
283 trace_pelt_se_tp(se);
290 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
292 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
293 cfs_rq->curr == se)) {
295 ___update_load_avg(&se->avg, se_weight(se));
296 cfs_se_util_change(&se->avg);
297 trace_pelt_se_tp(se);
304 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
306 if (___update_load_sum(now, &cfs_rq->avg,
307 scale_load_down(cfs_rq->load.weight),
308 cfs_rq->h_nr_running,
309 cfs_rq->curr != NULL)) {
311 ___update_load_avg(&cfs_rq->avg, 1);
312 trace_pelt_cfs_tp(cfs_rq);
322 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
323 * util_sum = cpu_scale * load_sum
324 * runnable_sum = util_sum
326 * load_avg and runnable_avg are not supported and meaningless.
330 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
332 if (___update_load_sum(now, &rq->avg_rt,
337 ___update_load_avg(&rq->avg_rt, 1);
338 trace_pelt_rt_tp(rq);
348 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
349 * util_sum = cpu_scale * load_sum
350 * runnable_sum = util_sum
352 * load_avg and runnable_avg are not supported and meaningless.
356 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
358 if (___update_load_sum(now, &rq->avg_dl,
363 ___update_load_avg(&rq->avg_dl, 1);
364 trace_pelt_dl_tp(rq);
371 #ifdef CONFIG_SCHED_THERMAL_PRESSURE
375 * load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
377 * util_avg and runnable_load_avg are not supported and meaningless.
379 * Unlike rt/dl utilization tracking that track time spent by a cpu
380 * running a rt/dl task through util_avg, the average thermal pressure is
381 * tracked through load_avg. This is because thermal pressure signal is
382 * time weighted "delta" capacity unlike util_avg which is binary.
383 * "delta capacity" = actual capacity -
384 * capped capacity a cpu due to a thermal event.
387 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
389 if (___update_load_sum(now, &rq->avg_thermal,
393 ___update_load_avg(&rq->avg_thermal, 1);
394 trace_pelt_thermal_tp(rq);
402 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
406 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
407 * util_sum = cpu_scale * load_sum
408 * runnable_sum = util_sum
410 * load_avg and runnable_avg are not supported and meaningless.
414 int update_irq_load_avg(struct rq *rq, u64 running)
419 * We can't use clock_pelt because irq time is not accounted in
420 * clock_task. Instead we directly scale the running time to
421 * reflect the real amount of computation
423 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
424 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
427 * We know the time that has been used by interrupt since last update
428 * but we don't when. Let be pessimistic and assume that interrupt has
429 * happened just before the update. This is not so far from reality
430 * because interrupt will most probably wake up task and trig an update
431 * of rq clock during which the metric is updated.
432 * We start to decay with normal context time and then we add the
433 * interrupt context time.
434 * We can safely remove running from rq->clock because
435 * rq->clock += delta with delta >= running
437 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
441 ret += ___update_load_sum(rq->clock, &rq->avg_irq,
447 ___update_load_avg(&rq->avg_irq, 1);
448 trace_pelt_irq_tp(rq);