1 // SPDX-License-Identifier: GPL-2.0
3 * Per Entity Load Tracking
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
23 * Move PELT related code from fair.c into this pelt.c file
24 * Author: Vincent Guittot <vincent.guittot@linaro.org>
27 #include <linux/sched.h>
31 #include <trace/events/sched.h>
35 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
37 static u64 decay_load(u64 val, u64 n)
41 if (unlikely(n > LOAD_AVG_PERIOD * 63))
44 /* after bounds checking we can collapse to 32-bit */
48 * As y^PERIOD = 1/2, we can combine
49 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
50 * With a look-up table which covers y^n (n<PERIOD)
52 * To achieve constant time decay_load.
54 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
55 val >>= local_n / LOAD_AVG_PERIOD;
56 local_n %= LOAD_AVG_PERIOD;
59 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
63 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
65 u32 c1, c2, c3 = d3; /* y^0 == 1 */
70 c1 = decay_load((u64)d1, periods);
78 * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
81 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
86 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
89 * Accumulate the three separate parts of the sum; d1 the remainder
90 * of the last (incomplete) period, d2 the span of full periods and d3
91 * the remainder of the (incomplete) current period.
96 * |<->|<----------------->|<--->|
97 * ... |---x---|------| ... |------|-----x (now)
100 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
106 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
109 static __always_inline u32
110 accumulate_sum(u64 delta, struct sched_avg *sa,
111 unsigned long load, unsigned long runnable, int running)
113 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
116 delta += sa->period_contrib;
117 periods = delta / 1024; /* A period is 1024us (~1ms) */
120 * Step 1: decay old *_sum if we crossed period boundaries.
123 sa->load_sum = decay_load(sa->load_sum, periods);
124 sa->runnable_load_sum =
125 decay_load(sa->runnable_load_sum, periods);
126 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
132 contrib = __accumulate_pelt_segments(periods,
133 1024 - sa->period_contrib, delta);
135 sa->period_contrib = delta;
138 sa->load_sum += load * contrib;
140 sa->runnable_load_sum += runnable * contrib;
142 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
148 * We can represent the historical contribution to runnable average as the
149 * coefficients of a geometric series. To do this we sub-divide our runnable
150 * history into segments of approximately 1ms (1024us); label the segment that
151 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
153 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
155 * (now) (~1ms ago) (~2ms ago)
157 * Let u_i denote the fraction of p_i that the entity was runnable.
159 * We then designate the fractions u_i as our co-efficients, yielding the
160 * following representation of historical load:
161 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
163 * We choose y based on the with of a reasonably scheduling period, fixing:
166 * This means that the contribution to load ~32ms ago (u_32) will be weighted
167 * approximately half as much as the contribution to load within the last ms
170 * When a period "rolls over" and we have new u_0`, multiplying the previous
171 * sum again by y is sufficient to update:
172 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
173 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
175 static __always_inline int
176 ___update_load_sum(u64 now, struct sched_avg *sa,
177 unsigned long load, unsigned long runnable, int running)
181 delta = now - sa->last_update_time;
183 * This should only happen when time goes backwards, which it
184 * unfortunately does during sched clock init when we swap over to TSC.
186 if ((s64)delta < 0) {
187 sa->last_update_time = now;
192 * Use 1024ns as the unit of measurement since it's a reasonable
193 * approximation of 1us and fast to compute.
199 sa->last_update_time += delta << 10;
202 * running is a subset of runnable (weight) so running can't be set if
203 * runnable is clear. But there are some corner cases where the current
204 * se has been already dequeued but cfs_rq->curr still points to it.
205 * This means that weight will be 0 but not running for a sched_entity
206 * but also for a cfs_rq if the latter becomes idle. As an example,
207 * this happens during idle_balance() which calls
208 * update_blocked_averages()
211 runnable = running = 0;
214 * Now we know we crossed measurement unit boundaries. The *_avg
215 * accrues by two steps:
217 * Step 1: accumulate *_sum since last_update_time. If we haven't
218 * crossed period boundaries, finish.
220 if (!accumulate_sum(delta, sa, load, runnable, running))
226 static __always_inline void
227 ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
229 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
232 * Step 2: update *_avg.
234 sa->load_avg = div_u64(load * sa->load_sum, divider);
235 sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
236 WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
243 * se_runnable() == se_weight()
245 * group: [ see update_cfs_group() ]
246 * se_weight() = tg->weight * grq->load_avg / tg->load_avg
247 * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
249 * load_sum := runnable_sum
250 * load_avg = se_weight(se) * runnable_avg
252 * runnable_load_sum := runnable_sum
253 * runnable_load_avg = se_runnable(se) * runnable_avg
255 * XXX collapse load_sum and runnable_load_sum
259 * load_sum = \Sum se_weight(se) * se->avg.load_sum
260 * load_avg = \Sum se->avg.load_avg
262 * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
263 * runnable_load_avg = \Sum se->avg.runable_load_avg
266 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
268 if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
269 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
270 trace_pelt_se_tp(se);
277 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
279 if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
280 cfs_rq->curr == se)) {
282 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
283 cfs_se_util_change(&se->avg);
284 trace_pelt_se_tp(se);
291 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
293 if (___update_load_sum(now, &cfs_rq->avg,
294 scale_load_down(cfs_rq->load.weight),
295 scale_load_down(cfs_rq->runnable_weight),
296 cfs_rq->curr != NULL)) {
298 ___update_load_avg(&cfs_rq->avg, 1, 1);
299 trace_pelt_cfs_tp(cfs_rq);
309 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
310 * util_sum = cpu_scale * load_sum
311 * runnable_load_sum = load_sum
313 * load_avg and runnable_load_avg are not supported and meaningless.
317 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
319 if (___update_load_sum(now, &rq->avg_rt,
324 ___update_load_avg(&rq->avg_rt, 1, 1);
325 trace_pelt_rt_tp(rq);
335 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
336 * util_sum = cpu_scale * load_sum
337 * runnable_load_sum = load_sum
341 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
343 if (___update_load_sum(now, &rq->avg_dl,
348 ___update_load_avg(&rq->avg_dl, 1, 1);
349 trace_pelt_dl_tp(rq);
356 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
360 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
361 * util_sum = cpu_scale * load_sum
362 * runnable_load_sum = load_sum
366 int update_irq_load_avg(struct rq *rq, u64 running)
371 * We can't use clock_pelt because irq time is not accounted in
372 * clock_task. Instead we directly scale the running time to
373 * reflect the real amount of computation
375 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
376 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
379 * We know the time that has been used by interrupt since last update
380 * but we don't when. Let be pessimistic and assume that interrupt has
381 * happened just before the update. This is not so far from reality
382 * because interrupt will most probably wake up task and trig an update
383 * of rq clock during which the metric is updated.
384 * We start to decay with normal context time and then we add the
385 * interrupt context time.
386 * We can safely remove running from rq->clock because
387 * rq->clock += delta with delta >= running
389 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
393 ret += ___update_load_sum(rq->clock, &rq->avg_irq,
399 ___update_load_avg(&rq->avg_irq, 1, 1);
400 trace_pelt_irq_tp(rq);