1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifdef CONFIG_SCHEDSTATS
5 extern struct static_key_false sched_schedstats;
8 * Expects runqueue lock to be held for atomicity of update
11 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
14 rq->rq_sched_info.run_delay += delta;
15 rq->rq_sched_info.pcount++;
20 * Expects runqueue lock to be held for atomicity of update
23 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
26 rq->rq_cpu_time += delta;
30 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
33 rq->rq_sched_info.run_delay += delta;
35 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
36 #define __schedstat_inc(var) do { var++; } while (0)
37 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
38 #define __schedstat_add(var, amt) do { var += (amt); } while (0)
39 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
40 #define __schedstat_set(var, val) do { var = (val); } while (0)
41 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
42 #define schedstat_val(var) (var)
43 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
45 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
46 struct sched_statistics *stats);
48 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
49 struct sched_statistics *stats);
50 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
51 struct sched_statistics *stats);
54 check_schedstat_required(void)
56 if (schedstat_enabled())
59 /* Force schedstat enabled if a dependent tracepoint is active */
60 if (trace_sched_stat_wait_enabled() ||
61 trace_sched_stat_sleep_enabled() ||
62 trace_sched_stat_iowait_enabled() ||
63 trace_sched_stat_blocked_enabled() ||
64 trace_sched_stat_runtime_enabled())
65 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
68 #else /* !CONFIG_SCHEDSTATS: */
70 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
71 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
72 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
73 # define schedstat_enabled() 0
74 # define __schedstat_inc(var) do { } while (0)
75 # define schedstat_inc(var) do { } while (0)
76 # define __schedstat_add(var, amt) do { } while (0)
77 # define schedstat_add(var, amt) do { } while (0)
78 # define __schedstat_set(var, val) do { } while (0)
79 # define schedstat_set(var, val) do { } while (0)
80 # define schedstat_val(var) 0
81 # define schedstat_val_or_zero(var) 0
83 # define __update_stats_wait_start(rq, p, stats) do { } while (0)
84 # define __update_stats_wait_end(rq, p, stats) do { } while (0)
85 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
86 # define check_schedstat_required() do { } while (0)
88 #endif /* CONFIG_SCHEDSTATS */
90 #ifdef CONFIG_FAIR_GROUP_SCHED
91 struct sched_entity_stats {
92 struct sched_entity se;
93 struct sched_statistics stats;
94 } __no_randomize_layout;
97 static inline struct sched_statistics *
98 __schedstats_from_se(struct sched_entity *se)
100 #ifdef CONFIG_FAIR_GROUP_SCHED
101 if (!entity_is_task(se))
102 return &container_of(se, struct sched_entity_stats, se)->stats;
104 return &task_of(se)->stats;
109 * PSI tracks state that persists across sleeps, such as iowaits and
110 * memory stalls. As a result, it has to distinguish between sleeps,
111 * where a task's runnable state changes, and requeues, where a task
112 * and its state are being moved between CPUs and runqueues.
114 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
116 int clear = 0, set = TSK_RUNNING;
118 if (static_branch_likely(&psi_disabled))
122 set |= TSK_MEMSTALL_RUNNING;
124 if (!wakeup || p->sched_psi_wake_requeue) {
127 if (p->sched_psi_wake_requeue)
128 p->sched_psi_wake_requeue = 0;
134 psi_task_change(p, clear, set);
137 static inline void psi_dequeue(struct task_struct *p, bool sleep)
139 int clear = TSK_RUNNING;
141 if (static_branch_likely(&psi_disabled))
145 * A voluntary sleep is a dequeue followed by a task switch. To
146 * avoid walking all ancestors twice, psi_task_switch() handles
147 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
154 clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
156 psi_task_change(p, clear, 0);
159 static inline void psi_ttwu_dequeue(struct task_struct *p)
161 if (static_branch_likely(&psi_disabled))
164 * Is the task being migrated during a wakeup? Make sure to
165 * deregister its sleep-persistent psi states from the old
166 * queue, and let psi_enqueue() know it has to requeue.
168 if (unlikely(p->in_iowait || p->in_memstall)) {
176 clear |= TSK_MEMSTALL;
178 rq = __task_rq_lock(p, &rf);
179 psi_task_change(p, clear, 0);
180 p->sched_psi_wake_requeue = 1;
181 __task_rq_unlock(rq, &rf);
185 static inline void psi_sched_switch(struct task_struct *prev,
186 struct task_struct *next,
189 if (static_branch_likely(&psi_disabled))
192 psi_task_switch(prev, next, sleep);
195 #else /* CONFIG_PSI */
196 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
197 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
198 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
199 static inline void psi_sched_switch(struct task_struct *prev,
200 struct task_struct *next,
202 #endif /* CONFIG_PSI */
204 #ifdef CONFIG_SCHED_INFO
206 * We are interested in knowing how long it was from the *first* time a
207 * task was queued to the time that it finally hit a CPU, we call this routine
208 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
209 * delta taken on each CPU would annul the skew.
211 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
213 unsigned long long delta = 0;
215 if (!t->sched_info.last_queued)
218 delta = rq_clock(rq) - t->sched_info.last_queued;
219 t->sched_info.last_queued = 0;
220 t->sched_info.run_delay += delta;
222 rq_sched_info_dequeue(rq, delta);
226 * Called when a task finally hits the CPU. We can now calculate how
227 * long it was waiting to run. We also note when it began so that we
228 * can keep stats on how long its timeslice is.
230 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
232 unsigned long long now, delta = 0;
234 if (!t->sched_info.last_queued)
238 delta = now - t->sched_info.last_queued;
239 t->sched_info.last_queued = 0;
240 t->sched_info.run_delay += delta;
241 t->sched_info.last_arrival = now;
242 t->sched_info.pcount++;
244 rq_sched_info_arrive(rq, delta);
248 * This function is only called from enqueue_task(), but also only updates
249 * the timestamp if it is already not set. It's assumed that
250 * sched_info_dequeue() will clear that stamp when appropriate.
252 static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
254 if (!t->sched_info.last_queued)
255 t->sched_info.last_queued = rq_clock(rq);
259 * Called when a process ceases being the active-running process involuntarily
260 * due, typically, to expiring its time slice (this may also be called when
261 * switching to the idle task). Now we can calculate how long we ran.
262 * Also, if the process is still in the TASK_RUNNING state, call
263 * sched_info_enqueue() to mark that it has now again started waiting on
266 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
268 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
270 rq_sched_info_depart(rq, delta);
272 if (task_is_running(t))
273 sched_info_enqueue(rq, t);
277 * Called when tasks are switched involuntarily due, typically, to expiring
278 * their time slice. (This may also be called when switching to or from
279 * the idle task.) We are only called when prev != next.
282 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
285 * prev now departs the CPU. It's not interesting to record
286 * stats about how efficient we were at scheduling the idle
289 if (prev != rq->idle)
290 sched_info_depart(rq, prev);
292 if (next != rq->idle)
293 sched_info_arrive(rq, next);
296 #else /* !CONFIG_SCHED_INFO: */
297 # define sched_info_enqueue(rq, t) do { } while (0)
298 # define sched_info_dequeue(rq, t) do { } while (0)
299 # define sched_info_switch(rq, t, next) do { } while (0)
300 #endif /* CONFIG_SCHED_INFO */