1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
4 #include <linux/sched/cputime.h>
6 static DEFINE_SPINLOCK(cgroup_rstat_lock);
7 static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
9 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
11 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
13 return per_cpu_ptr(cgrp->rstat_cpu, cpu);
17 * cgroup_rstat_updated - keep track of updated rstat_cpu
18 * @cgrp: target cgroup
19 * @cpu: cpu on which rstat_cpu was updated
21 * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching
22 * rstat_cpu->updated_children list. See the comment on top of
23 * cgroup_rstat_cpu definition for details.
25 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
27 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
28 struct cgroup *parent;
31 /* nothing to do for root */
32 if (!cgroup_parent(cgrp))
36 * Speculative already-on-list test. This may race leading to
37 * temporary inaccuracies, which is fine.
39 * Because @parent's updated_children is terminated with @parent
40 * instead of NULL, we can tell whether @cgrp is on the list by
41 * testing the next pointer for NULL.
43 if (cgroup_rstat_cpu(cgrp, cpu)->updated_next)
46 raw_spin_lock_irqsave(cpu_lock, flags);
48 /* put @cgrp and all ancestors on the corresponding updated lists */
49 for (parent = cgroup_parent(cgrp); parent;
50 cgrp = parent, parent = cgroup_parent(cgrp)) {
51 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
52 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
55 * Both additions and removals are bottom-up. If a cgroup
56 * is already in the tree, all ancestors are.
58 if (rstatc->updated_next)
61 rstatc->updated_next = prstatc->updated_children;
62 prstatc->updated_children = cgrp;
65 raw_spin_unlock_irqrestore(cpu_lock, flags);
67 EXPORT_SYMBOL_GPL(cgroup_rstat_updated);
70 * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
71 * @pos: current position
72 * @root: root of the tree to traversal
75 * Walks the udpated rstat_cpu tree on @cpu from @root. %NULL @pos starts
76 * the traversal and %NULL return indicates the end. During traversal,
77 * each returned cgroup is unlinked from the tree. Must be called with the
78 * matching cgroup_rstat_cpu_lock held.
80 * The only ordering guarantee is that, for a parent and a child pair
81 * covered by a given traversal, if a child is visited, its parent is
82 * guaranteed to be visited afterwards.
84 static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
85 struct cgroup *root, int cpu)
87 struct cgroup_rstat_cpu *rstatc;
93 * We're gonna walk down to the first leaf and visit/remove it. We
94 * can pick whatever unvisited node as the starting point.
99 pos = cgroup_parent(pos);
101 /* walk down to the first leaf */
103 rstatc = cgroup_rstat_cpu(pos, cpu);
104 if (rstatc->updated_children == pos)
106 pos = rstatc->updated_children;
110 * Unlink @pos from the tree. As the updated_children list is
111 * singly linked, we have to walk it to find the removal point.
112 * However, due to the way we traverse, @pos will be the first
113 * child in most cases. The only exception is @root.
115 if (rstatc->updated_next) {
116 struct cgroup *parent = cgroup_parent(pos);
117 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
118 struct cgroup_rstat_cpu *nrstatc;
119 struct cgroup **nextp;
121 nextp = &prstatc->updated_children;
123 nrstatc = cgroup_rstat_cpu(*nextp, cpu);
127 WARN_ON_ONCE(*nextp == parent);
128 nextp = &nrstatc->updated_next;
131 *nextp = rstatc->updated_next;
132 rstatc->updated_next = NULL;
137 /* only happens for @root */
141 /* see cgroup_rstat_flush() */
142 static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
143 __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
147 lockdep_assert_held(&cgroup_rstat_lock);
149 for_each_possible_cpu(cpu) {
150 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
152 struct cgroup *pos = NULL;
154 raw_spin_lock(cpu_lock);
155 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
156 struct cgroup_subsys_state *css;
158 cgroup_base_stat_flush(pos, cpu);
161 list_for_each_entry_rcu(css, &pos->rstat_css_list,
163 css->ss->css_rstat_flush(css, cpu);
166 raw_spin_unlock(cpu_lock);
168 /* if @may_sleep, play nice and yield if necessary */
169 if (may_sleep && (need_resched() ||
170 spin_needbreak(&cgroup_rstat_lock))) {
171 spin_unlock_irq(&cgroup_rstat_lock);
174 spin_lock_irq(&cgroup_rstat_lock);
180 * cgroup_rstat_flush - flush stats in @cgrp's subtree
181 * @cgrp: target cgroup
183 * Collect all per-cpu stats in @cgrp's subtree into the global counters
184 * and propagate them upwards. After this function returns, all cgroups in
185 * the subtree have up-to-date ->stat.
187 * This also gets all cgroups in the subtree including @cgrp off the
188 * ->updated_children lists.
190 * This function may block.
192 void cgroup_rstat_flush(struct cgroup *cgrp)
196 spin_lock_irq(&cgroup_rstat_lock);
197 cgroup_rstat_flush_locked(cgrp, true);
198 spin_unlock_irq(&cgroup_rstat_lock);
202 * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
203 * @cgrp: target cgroup
205 * This function can be called from any context.
207 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
211 spin_lock_irqsave(&cgroup_rstat_lock, flags);
212 cgroup_rstat_flush_locked(cgrp, false);
213 spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
217 * cgroup_rstat_flush_begin - flush stats in @cgrp's subtree and hold
218 * @cgrp: target cgroup
220 * Flush stats in @cgrp's subtree and prevent further flushes. Must be
221 * paired with cgroup_rstat_flush_release().
223 * This function may block.
225 void cgroup_rstat_flush_hold(struct cgroup *cgrp)
226 __acquires(&cgroup_rstat_lock)
229 spin_lock_irq(&cgroup_rstat_lock);
230 cgroup_rstat_flush_locked(cgrp, true);
234 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
236 void cgroup_rstat_flush_release(void)
237 __releases(&cgroup_rstat_lock)
239 spin_unlock_irq(&cgroup_rstat_lock);
242 int cgroup_rstat_init(struct cgroup *cgrp)
246 /* the root cgrp has rstat_cpu preallocated */
247 if (!cgrp->rstat_cpu) {
248 cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
249 if (!cgrp->rstat_cpu)
253 /* ->updated_children list is self terminated */
254 for_each_possible_cpu(cpu) {
255 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
257 rstatc->updated_children = cgrp;
258 u64_stats_init(&rstatc->bsync);
264 void cgroup_rstat_exit(struct cgroup *cgrp)
268 cgroup_rstat_flush(cgrp);
271 for_each_possible_cpu(cpu) {
272 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
274 if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
275 WARN_ON_ONCE(rstatc->updated_next))
279 free_percpu(cgrp->rstat_cpu);
280 cgrp->rstat_cpu = NULL;
283 void __init cgroup_rstat_boot(void)
287 for_each_possible_cpu(cpu)
288 raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
290 BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
294 * Functions for cgroup basic resource statistics implemented on top of
297 static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat,
298 struct cgroup_base_stat *src_bstat)
300 dst_bstat->cputime.utime += src_bstat->cputime.utime;
301 dst_bstat->cputime.stime += src_bstat->cputime.stime;
302 dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
305 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
307 struct cgroup *parent = cgroup_parent(cgrp);
308 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
309 struct task_cputime *last_cputime = &rstatc->last_bstat.cputime;
310 struct task_cputime cputime;
311 struct cgroup_base_stat delta;
314 /* fetch the current per-cpu values */
316 seq = __u64_stats_fetch_begin(&rstatc->bsync);
317 cputime = rstatc->bstat.cputime;
318 } while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
320 /* calculate the delta to propgate */
321 delta.cputime.utime = cputime.utime - last_cputime->utime;
322 delta.cputime.stime = cputime.stime - last_cputime->stime;
323 delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime -
324 last_cputime->sum_exec_runtime;
325 *last_cputime = cputime;
327 /* transfer the pending stat into delta */
328 cgroup_base_stat_accumulate(&delta, &cgrp->pending_bstat);
329 memset(&cgrp->pending_bstat, 0, sizeof(cgrp->pending_bstat));
331 /* propagate delta into the global stat and the parent's pending */
332 cgroup_base_stat_accumulate(&cgrp->bstat, &delta);
334 cgroup_base_stat_accumulate(&parent->pending_bstat, &delta);
337 static struct cgroup_rstat_cpu *
338 cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
340 struct cgroup_rstat_cpu *rstatc;
342 rstatc = get_cpu_ptr(cgrp->rstat_cpu);
343 u64_stats_update_begin(&rstatc->bsync);
347 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
348 struct cgroup_rstat_cpu *rstatc)
350 u64_stats_update_end(&rstatc->bsync);
351 cgroup_rstat_updated(cgrp, smp_processor_id());
355 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
357 struct cgroup_rstat_cpu *rstatc;
359 rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
360 rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
361 cgroup_base_stat_cputime_account_end(cgrp, rstatc);
364 void __cgroup_account_cputime_field(struct cgroup *cgrp,
365 enum cpu_usage_stat index, u64 delta_exec)
367 struct cgroup_rstat_cpu *rstatc;
369 rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
374 rstatc->bstat.cputime.utime += delta_exec;
378 case CPUTIME_SOFTIRQ:
379 rstatc->bstat.cputime.stime += delta_exec;
385 cgroup_base_stat_cputime_account_end(cgrp, rstatc);
388 void cgroup_base_stat_cputime_show(struct seq_file *seq)
390 struct cgroup *cgrp = seq_css(seq)->cgroup;
391 u64 usage, utime, stime;
393 if (!cgroup_parent(cgrp))
396 cgroup_rstat_flush_hold(cgrp);
397 usage = cgrp->bstat.cputime.sum_exec_runtime;
398 cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime);
399 cgroup_rstat_flush_release();
401 do_div(usage, NSEC_PER_USEC);
402 do_div(utime, NSEC_PER_USEC);
403 do_div(stime, NSEC_PER_USEC);
405 seq_printf(seq, "usage_usec %llu\n"
407 "system_usec %llu\n",
408 usage, utime, stime);