1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/kernel.h>
11 #include "map_symbol.h"
12 #include "mem-events.h"
16 #include "pmu-hybrid.h"
18 unsigned int perf_mem_events__loads_ldlat = 30;
20 #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
22 static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
23 E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"),
24 E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"),
29 static char mem_loads_name[100];
30 static bool mem_loads_name__init;
32 struct perf_mem_event * __weak perf_mem_events__ptr(int i)
34 if (i >= PERF_MEM_EVENTS__MAX)
37 return &perf_mem_events[i];
40 char * __weak perf_mem_events__name(int i, char *pmu_name __maybe_unused)
42 struct perf_mem_event *e = perf_mem_events__ptr(i);
47 if (i == PERF_MEM_EVENTS__LOAD) {
48 if (!mem_loads_name__init) {
49 mem_loads_name__init = true;
50 scnprintf(mem_loads_name, sizeof(mem_loads_name),
51 e->name, perf_mem_events__loads_ldlat);
53 return mem_loads_name;
56 return (char *)e->name;
59 __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
64 int perf_mem_events__parse(const char *str)
66 char *tok, *saveptr = NULL;
71 /* We need buffer that we know we can write to. */
72 buf = malloc(strlen(str) + 1);
78 tok = strtok_r((char *)buf, ",", &saveptr);
81 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
82 struct perf_mem_event *e = perf_mem_events__ptr(j);
87 if (strstr(e->tag, tok))
88 e->record = found = true;
91 tok = strtok_r(NULL, ",", &saveptr);
99 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
103 static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
108 scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
109 return !stat(path, &st);
112 int perf_mem_events__init(void)
114 const char *mnt = sysfs__mount();
121 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
122 struct perf_mem_event *e = perf_mem_events__ptr(j);
123 struct perf_pmu *pmu;
124 char sysfs_name[100];
127 * If the event entry isn't valid, skip initialization
128 * and "e->supported" will keep false.
133 if (!perf_pmu__has_hybrid()) {
134 scnprintf(sysfs_name, sizeof(sysfs_name),
135 e->sysfs_name, "cpu");
136 e->supported = perf_mem_event__supported(mnt, sysfs_name);
138 perf_pmu__for_each_hybrid_pmu(pmu) {
139 scnprintf(sysfs_name, sizeof(sysfs_name),
140 e->sysfs_name, pmu->name);
141 e->supported |= perf_mem_event__supported(mnt, sysfs_name);
149 return found ? 0 : -ENOENT;
152 void perf_mem_events__list(void)
156 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
157 struct perf_mem_event *e = perf_mem_events__ptr(j);
159 fprintf(stderr, "%-13s%-*s%s\n",
161 verbose > 0 ? 25 : 0,
162 verbose > 0 ? perf_mem_events__name(j, NULL) : "",
163 e->supported ? ": available" : "");
167 static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
170 const char *mnt = sysfs__mount();
171 char sysfs_name[100];
172 struct perf_pmu *pmu;
174 perf_pmu__for_each_hybrid_pmu(pmu) {
175 scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
177 if (!perf_mem_event__supported(mnt, sysfs_name)) {
178 pr_err("failed: event '%s' not supported\n",
179 perf_mem_events__name(idx, pmu->name));
184 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
185 char **rec_tmp, int *tmp_nr)
187 int i = *argv_nr, k = 0;
188 struct perf_mem_event *e;
189 struct perf_pmu *pmu;
192 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
193 e = perf_mem_events__ptr(j);
197 if (!perf_pmu__has_hybrid()) {
199 pr_err("failed: event '%s' not supported\n",
200 perf_mem_events__name(j, NULL));
204 rec_argv[i++] = "-e";
205 rec_argv[i++] = perf_mem_events__name(j, NULL);
208 perf_mem_events__print_unsupport_hybrid(e, j);
212 perf_pmu__for_each_hybrid_pmu(pmu) {
213 rec_argv[i++] = "-e";
214 s = perf_mem_events__name(j, pmu->name);
232 static const char * const tlb_access[] = {
242 int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
245 u64 m = PERF_MEM_TLB_NA;
248 sz -= 1; /* -1 for null termination */
252 m = mem_info->data_src.mem_dtlb;
254 hit = m & PERF_MEM_TLB_HIT;
255 miss = m & PERF_MEM_TLB_MISS;
257 /* already taken care of */
258 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
260 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
267 l += scnprintf(out + l, sz - l, tlb_access[i]);
270 l += scnprintf(out, sz - l, "N/A");
272 l += scnprintf(out + l, sz - l, " hit");
274 l += scnprintf(out + l, sz - l, " miss");
279 static const char * const mem_lvl[] = {
288 "Remote RAM (1 hop)",
289 "Remote RAM (2 hops)",
290 "Remote Cache (1 hop)",
291 "Remote Cache (2 hops)",
296 static const char * const mem_lvlnum[] = {
297 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
298 [PERF_MEM_LVLNUM_LFB] = "LFB",
299 [PERF_MEM_LVLNUM_RAM] = "RAM",
300 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
301 [PERF_MEM_LVLNUM_NA] = "N/A",
304 static const char * const mem_hops[] = {
307 * While printing, 'Remote' will be added to represent
308 * 'Remote core, same node' accesses as remote field need
309 * to be set with mem_hops field.
313 "socket, same board",
317 static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
319 u64 op = PERF_MEM_LOCK_NA;
323 op = mem_info->data_src.mem_op;
325 if (op & PERF_MEM_OP_NA)
326 l = scnprintf(out, sz, "N/A");
327 else if (op & PERF_MEM_OP_LOAD)
328 l = scnprintf(out, sz, "LOAD");
329 else if (op & PERF_MEM_OP_STORE)
330 l = scnprintf(out, sz, "STORE");
331 else if (op & PERF_MEM_OP_PFETCH)
332 l = scnprintf(out, sz, "PFETCH");
333 else if (op & PERF_MEM_OP_EXEC)
334 l = scnprintf(out, sz, "EXEC");
336 l = scnprintf(out, sz, "No");
341 int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
344 u64 m = PERF_MEM_LVL_NA;
349 m = mem_info->data_src.mem_lvl;
351 sz -= 1; /* -1 for null termination */
354 hit = m & PERF_MEM_LVL_HIT;
355 miss = m & PERF_MEM_LVL_MISS;
357 /* already taken care of */
358 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
360 if (mem_info && mem_info->data_src.mem_remote) {
361 strcat(out, "Remote ");
366 * Incase mem_hops field is set, we can skip printing data source via
367 * PERF_MEM_LVL namespace.
369 if (mem_info && mem_info->data_src.mem_hops) {
370 l += scnprintf(out + l, sz - l, "%s ", mem_hops[mem_info->data_src.mem_hops]);
372 for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) {
379 l += scnprintf(out + l, sz - l, mem_lvl[i]);
383 if (mem_info && mem_info->data_src.mem_lvl_num) {
384 int lvl = mem_info->data_src.mem_lvl_num;
390 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
392 l += scnprintf(out + l, sz - l, "L%d", lvl);
396 l += scnprintf(out + l, sz - l, "N/A");
398 l += scnprintf(out + l, sz - l, " hit");
400 l += scnprintf(out + l, sz - l, " miss");
405 static const char * const snoop_access[] = {
413 int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
416 u64 m = PERF_MEM_SNOOP_NA;
418 sz -= 1; /* -1 for null termination */
422 m = mem_info->data_src.mem_snoop;
424 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
431 l += scnprintf(out + l, sz - l, snoop_access[i]);
434 (mem_info->data_src.mem_snoopx & PERF_MEM_SNOOPX_FWD)) {
439 l += scnprintf(out + l, sz - l, "Fwd");
443 l += scnprintf(out, sz - l, "N/A");
448 int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
450 u64 mask = PERF_MEM_LOCK_NA;
454 mask = mem_info->data_src.mem_lock;
456 if (mask & PERF_MEM_LOCK_NA)
457 l = scnprintf(out, sz, "N/A");
458 else if (mask & PERF_MEM_LOCK_LOCKED)
459 l = scnprintf(out, sz, "Yes");
461 l = scnprintf(out, sz, "No");
466 int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
469 u64 mask = PERF_MEM_BLK_NA;
471 sz -= 1; /* -1 for null termination */
475 mask = mem_info->data_src.mem_blk;
477 if (!mask || (mask & PERF_MEM_BLK_NA)) {
478 l += scnprintf(out + l, sz - l, " N/A");
481 if (mask & PERF_MEM_BLK_DATA)
482 l += scnprintf(out + l, sz - l, " Data");
483 if (mask & PERF_MEM_BLK_ADDR)
484 l += scnprintf(out + l, sz - l, " Addr");
489 int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
493 i += scnprintf(out, sz, "|OP ");
494 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
495 i += scnprintf(out + i, sz - i, "|LVL ");
496 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
497 i += scnprintf(out + i, sz - i, "|SNP ");
498 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
499 i += scnprintf(out + i, sz - i, "|TLB ");
500 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
501 i += scnprintf(out + i, sz - i, "|LCK ");
502 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
503 i += scnprintf(out + i, sz - i, "|BLK ");
504 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
509 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
511 union perf_mem_data_src *data_src = &mi->data_src;
512 u64 daddr = mi->daddr.addr;
513 u64 op = data_src->mem_op;
514 u64 lvl = data_src->mem_lvl;
515 u64 snoop = data_src->mem_snoop;
516 u64 lock = data_src->mem_lock;
517 u64 blk = data_src->mem_blk;
519 * Skylake might report unknown remote level via this
520 * bit, consider it when evaluating remote HITMs.
522 * Incase of power, remote field can also be used to denote cache
523 * accesses from the another core of same node. Hence, setting
524 * mrem only when HOPS is zero along with set remote field.
526 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
529 #define HITM_INC(__f) \
535 #define P(a, b) PERF_MEM_##a##_##b
539 if (lock & P(LOCK, LOCKED)) stats->locks++;
541 if (blk & P(BLK, DATA)) stats->blk_data++;
542 if (blk & P(BLK, ADDR)) stats->blk_addr++;
544 if (op & P(OP, LOAD)) {
553 if (lvl & P(LVL, HIT)) {
554 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
555 if (lvl & P(LVL, IO)) stats->ld_io++;
556 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
557 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
558 if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
559 if (lvl & P(LVL, L3 )) {
560 if (snoop & P(SNOOP, HITM))
566 if (lvl & P(LVL, LOC_RAM)) {
568 if (snoop & P(SNOOP, HIT))
574 if ((lvl & P(LVL, REM_RAM1)) ||
575 (lvl & P(LVL, REM_RAM2)) ||
578 if (snoop & P(SNOOP, HIT))
585 if ((lvl & P(LVL, REM_CCE1)) ||
586 (lvl & P(LVL, REM_CCE2)) ||
588 if (snoop & P(SNOOP, HIT))
590 else if (snoop & P(SNOOP, HITM))
594 if ((lvl & P(LVL, MISS)))
597 } else if (op & P(OP, STORE)) {
606 if (lvl & P(LVL, HIT)) {
607 if (lvl & P(LVL, UNC)) stats->st_uncache++;
608 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
610 if (lvl & P(LVL, MISS))
611 if (lvl & P(LVL, L1)) stats->st_l1miss++;
612 if (lvl & P(LVL, NA))
615 /* unparsable data_src? */
620 if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
630 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
632 stats->nr_entries += add->nr_entries;
634 stats->locks += add->locks;
635 stats->store += add->store;
636 stats->st_uncache += add->st_uncache;
637 stats->st_noadrs += add->st_noadrs;
638 stats->st_l1hit += add->st_l1hit;
639 stats->st_l1miss += add->st_l1miss;
640 stats->st_na += add->st_na;
641 stats->load += add->load;
642 stats->ld_excl += add->ld_excl;
643 stats->ld_shared += add->ld_shared;
644 stats->ld_uncache += add->ld_uncache;
645 stats->ld_io += add->ld_io;
646 stats->ld_miss += add->ld_miss;
647 stats->ld_noadrs += add->ld_noadrs;
648 stats->ld_fbhit += add->ld_fbhit;
649 stats->ld_l1hit += add->ld_l1hit;
650 stats->ld_l2hit += add->ld_l2hit;
651 stats->ld_llchit += add->ld_llchit;
652 stats->lcl_hitm += add->lcl_hitm;
653 stats->rmt_hitm += add->rmt_hitm;
654 stats->tot_hitm += add->tot_hitm;
655 stats->rmt_hit += add->rmt_hit;
656 stats->lcl_dram += add->lcl_dram;
657 stats->rmt_dram += add->rmt_dram;
658 stats->blk_data += add->blk_data;
659 stats->blk_addr += add->blk_addr;
660 stats->nomap += add->nomap;
661 stats->noparse += add->noparse;