1 // SPDX-License-Identifier: GPL-2.0
16 #include "thread_map.h"
18 #include <linux/zalloc.h>
20 void update_stats(struct stats *stats, u64 val)
25 delta = val - stats->mean;
26 stats->mean += delta / stats->n;
27 stats->M2 += delta*(val - stats->mean);
36 double avg_stats(struct stats *stats)
42 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
44 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
45 * s^2 = -------------------------------
48 * http://en.wikipedia.org/wiki/Stddev
50 * The std dev of the mean is related to the std dev by:
57 double stddev_stats(struct stats *stats)
59 double variance, variance_mean;
64 variance = stats->M2 / (stats->n - 1);
65 variance_mean = variance / stats->n;
67 return sqrt(variance_mean);
70 double rel_stddev_stats(double stddev, double avg)
75 pct = 100.0 * stddev/avg;
80 bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
82 struct perf_stat_evsel *ps = evsel->stats;
87 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
88 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
90 ID(CYCLES_IN_TX, cpu/cycles-t/),
91 ID(TRANSACTION_START, cpu/tx-start/),
92 ID(ELISION_START, cpu/el-start/),
93 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
94 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
95 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
96 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
97 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
98 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
99 ID(TOPDOWN_RETIRING, topdown-retiring),
100 ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
101 ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
102 ID(TOPDOWN_BE_BOUND, topdown-be-bound),
103 ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
104 ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
105 ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
106 ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
107 ID(SMI_NUM, msr/smi/),
108 ID(APERF, msr/aperf/),
112 static void perf_stat_evsel_id_init(struct evsel *evsel)
114 struct perf_stat_evsel *ps = evsel->stats;
117 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
119 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
120 if (!strcmp(evsel__name(evsel), id_str[i]) ||
121 (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
122 && strstr(evsel__name(evsel), evsel->pmu_name))) {
129 static void evsel__reset_stat_priv(struct evsel *evsel)
132 struct perf_stat_evsel *ps = evsel->stats;
134 for (i = 0; i < 3; i++)
135 init_stats(&ps->res_stats[i]);
137 perf_stat_evsel_id_init(evsel);
140 static int evsel__alloc_stat_priv(struct evsel *evsel)
142 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
143 if (evsel->stats == NULL)
145 evsel__reset_stat_priv(evsel);
149 static void evsel__free_stat_priv(struct evsel *evsel)
151 struct perf_stat_evsel *ps = evsel->stats;
154 zfree(&ps->group_data);
155 zfree(&evsel->stats);
158 static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
160 int cpu_map_nr = evsel__nr_cpus(evsel);
161 int nthreads = perf_thread_map__nr(evsel->core.threads);
162 struct perf_counts *counts;
164 counts = perf_counts__new(cpu_map_nr, nthreads);
166 evsel->prev_raw_counts = counts;
168 return counts ? 0 : -ENOMEM;
171 static void evsel__free_prev_raw_counts(struct evsel *evsel)
173 perf_counts__delete(evsel->prev_raw_counts);
174 evsel->prev_raw_counts = NULL;
177 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
179 if (evsel->prev_raw_counts)
180 perf_counts__reset(evsel->prev_raw_counts);
183 static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
185 if (evsel__alloc_stat_priv(evsel) < 0 ||
186 evsel__alloc_counts(evsel) < 0 ||
187 (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
193 int evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
197 evlist__for_each_entry(evlist, evsel) {
198 if (evsel__alloc_stats(evsel, alloc_raw))
205 evlist__free_stats(evlist);
209 void evlist__free_stats(struct evlist *evlist)
213 evlist__for_each_entry(evlist, evsel) {
214 evsel__free_stat_priv(evsel);
215 evsel__free_counts(evsel);
216 evsel__free_prev_raw_counts(evsel);
220 void evlist__reset_stats(struct evlist *evlist)
224 evlist__for_each_entry(evlist, evsel) {
225 evsel__reset_stat_priv(evsel);
226 evsel__reset_counts(evsel);
230 void evlist__reset_prev_raw_counts(struct evlist *evlist)
234 evlist__for_each_entry(evlist, evsel)
235 evsel__reset_prev_raw_counts(evsel);
238 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
240 int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
242 for (int thread = 0; thread < nthreads; thread++) {
243 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
244 *perf_counts(evsel->counts, idx, thread) =
245 *perf_counts(evsel->prev_raw_counts, idx, thread);
249 evsel->counts->aggr = evsel->prev_raw_counts->aggr;
252 void evlist__copy_prev_raw_counts(struct evlist *evlist)
256 evlist__for_each_entry(evlist, evsel)
257 evsel__copy_prev_raw_counts(evsel);
260 void evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
265 * To collect the overall statistics for interval mode,
266 * we copy the counts from evsel->prev_raw_counts to
267 * evsel->counts. The perf_stat_process_counter creates
268 * aggr values from per cpu values, but the per cpu values
269 * are 0 for AGGR_GLOBAL. So we use a trick that saves the
270 * previous aggr value to the first member of perf_counts,
271 * then aggr calculation in process_counter_values can work
274 evlist__for_each_entry(evlist, evsel) {
275 *perf_counts(evsel->prev_raw_counts, 0, 0) =
276 evsel->prev_raw_counts->aggr;
280 static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
282 uint64_t *key = (uint64_t *) __key;
284 return *key & 0xffffffff;
287 static bool pkg_id_equal(const void *__key1, const void *__key2,
288 void *ctx __maybe_unused)
290 uint64_t *key1 = (uint64_t *) __key1;
291 uint64_t *key2 = (uint64_t *) __key2;
293 return *key1 == *key2;
296 static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
297 int cpu_map_idx, bool *skip)
299 struct hashmap *mask = counter->per_pkg_mask;
300 struct perf_cpu_map *cpus = evsel__cpus(counter);
301 struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
307 if (!counter->per_pkg)
310 if (perf_cpu_map__empty(cpus))
314 mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
318 counter->per_pkg_mask = mask;
322 * we do not consider an event that has not run as a good
323 * instance to mark a package as used (skip=1). Otherwise
324 * we may run into a situation where the first CPU in a package
325 * is not running anything, yet the second is, and this function
326 * would mark the package as used after the first CPU and would
327 * not read the values from the second CPU.
329 if (!(vals->run && vals->ena))
332 s = cpu__get_socket_id(cpu);
337 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
338 * We use hashmap(socket, die) to check the used socket+die pair.
340 d = cpu__get_die_id(cpu);
344 key = malloc(sizeof(*key));
348 *key = (uint64_t)d << 32 | s;
349 if (hashmap__find(mask, (void *)key, NULL)) {
353 ret = hashmap__add(mask, (void *)key, (void *)1);
359 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
360 int cpu_map_idx, int thread,
361 struct perf_counts_values *count)
363 struct perf_counts_values *aggr = &evsel->counts->aggr;
364 static struct perf_counts_values zero;
367 if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
368 pr_err("failed to read per-pkg counter\n");
375 switch (config->aggr_mode) {
382 if (!evsel->snapshot)
383 evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
384 perf_counts_values__scale(count, config->scale, NULL);
385 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
386 perf_stat__update_shadow_stats(evsel, count->val,
387 cpu_map_idx, &rt_stat);
390 if (config->aggr_mode == AGGR_THREAD) {
392 perf_stat__update_shadow_stats(evsel,
393 count->val, 0, &config->stats[thread]);
395 perf_stat__update_shadow_stats(evsel,
396 count->val, 0, &rt_stat);
400 aggr->val += count->val;
401 aggr->ena += count->ena;
402 aggr->run += count->run;
411 static int process_counter_maps(struct perf_stat_config *config,
412 struct evsel *counter)
414 int nthreads = perf_thread_map__nr(counter->core.threads);
415 int ncpus = evsel__nr_cpus(counter);
418 if (counter->core.system_wide)
421 for (thread = 0; thread < nthreads; thread++) {
422 for (idx = 0; idx < ncpus; idx++) {
423 if (process_counter_values(config, counter, idx, thread,
424 perf_counts(counter->counts, idx, thread)))
432 int perf_stat_process_counter(struct perf_stat_config *config,
433 struct evsel *counter)
435 struct perf_counts_values *aggr = &counter->counts->aggr;
436 struct perf_stat_evsel *ps = counter->stats;
437 u64 *count = counter->counts->aggr.values;
440 aggr->val = aggr->ena = aggr->run = 0;
442 if (counter->per_pkg)
443 evsel__zero_per_pkg(counter);
445 ret = process_counter_maps(config, counter);
449 if (config->aggr_mode != AGGR_GLOBAL)
452 if (!counter->snapshot)
453 evsel__compute_deltas(counter, -1, -1, aggr);
454 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
456 for (i = 0; i < 3; i++)
457 update_stats(&ps->res_stats[i], count[i]);
460 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
461 evsel__name(counter), count[0], count[1], count[2]);
465 * Save the full runtime - to allow normalization during printout:
467 perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
472 int perf_event__process_stat_event(struct perf_session *session,
473 union perf_event *event)
475 struct perf_counts_values count, *ptr;
476 struct perf_record_stat *st = &event->stat;
477 struct evsel *counter;
484 counter = evlist__id2evsel(session->evlist, st->id);
486 pr_err("Failed to resolve counter for stat event.\n");
489 cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
490 if (cpu_map_idx == -1) {
491 pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
494 ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
496 pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
497 st->cpu, st->thread, evsel__name(counter));
501 counter->supported = true;
505 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
507 struct perf_record_stat *st = (struct perf_record_stat *)event;
510 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
511 st->id, st->cpu, st->thread);
512 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
513 st->val, st->ena, st->run);
518 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
520 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
523 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
524 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
529 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
531 struct perf_stat_config sc;
534 perf_event__read_stat_config(&sc, &event->stat_config);
536 ret = fprintf(fp, "\n");
537 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
538 ret += fprintf(fp, "... scale %d\n", sc.scale);
539 ret += fprintf(fp, "... interval %u\n", sc.interval);
544 int create_perf_stat_counter(struct evsel *evsel,
545 struct perf_stat_config *config,
546 struct target *target,
549 struct perf_event_attr *attr = &evsel->core.attr;
550 struct evsel *leader = evsel__leader(evsel);
552 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
553 PERF_FORMAT_TOTAL_TIME_RUNNING;
556 * The event is part of non trivial group, let's enable
557 * the group read (for leader) and ID retrieval for all
560 if (leader->core.nr_members > 1)
561 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
563 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
566 * Some events get initialized with sample_(period/type) set,
567 * like tracepoints. Clear it up for counting.
569 attr->sample_period = 0;
571 if (config->identifier)
572 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
574 if (config->all_user) {
575 attr->exclude_kernel = 1;
576 attr->exclude_user = 0;
579 if (config->all_kernel) {
580 attr->exclude_kernel = 0;
581 attr->exclude_user = 1;
585 * Disabling all counters initially, they will be enabled
586 * either manually by us or by kernel via enable_on_exec
589 if (evsel__is_group_leader(evsel)) {
593 * In case of initial_delay we enable tracee
596 if (target__none(target) && !config->initial_delay)
597 attr->enable_on_exec = 1;
600 if (target__has_cpu(target) && !target__has_per_thread(target))
601 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
603 return evsel__open_per_thread(evsel, evsel->core.threads);