1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/mman.h>
14 #include <traceevent/event-parse.h>
15 #include "mem-events.h"
16 #include <linux/kernel.h>
19 const char default_parent_pattern[] = "^sys_|^do_page_fault";
20 const char *parent_pattern = default_parent_pattern;
21 const char *default_sort_order = "comm,dso,symbol";
22 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
23 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
24 const char default_top_sort_order[] = "dso,symbol";
25 const char default_diff_sort_order[] = "dso,symbol";
26 const char default_tracepoint_sort_order[] = "trace";
27 const char *sort_order;
28 const char *field_order;
29 regex_t ignore_callees_regex;
30 int have_ignore_callees = 0;
31 enum sort_mode sort__mode = SORT_MODE__NORMAL;
34 * Replaces all occurrences of a char used with the:
36 * -t, --field-separator
38 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator.
42 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
48 n = vsnprintf(bf, size, fmt, ap);
49 if (symbol_conf.field_sep && n > 0) {
53 sep = strchr(sep, *symbol_conf.field_sep);
66 static int64_t cmp_null(const void *l, const void *r)
79 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
81 return right->thread->tid - left->thread->tid;
84 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
85 size_t size, unsigned int width)
87 const char *comm = thread__comm_str(he->thread);
89 width = max(7U, width) - 8;
90 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
91 width, width, comm ?: "");
94 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
96 const struct thread *th = arg;
98 if (type != HIST_FILTER__THREAD)
101 return th && he->thread != th;
104 struct sort_entry sort_thread = {
105 .se_header = " Pid:Command",
106 .se_cmp = sort__thread_cmp,
107 .se_snprintf = hist_entry__thread_snprintf,
108 .se_filter = hist_entry__thread_filter,
109 .se_width_idx = HISTC_THREAD,
115 * We can't use pointer comparison in functions below,
116 * because it gives different results based on pointer
117 * values, which could break some sorting assumptions.
120 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
122 return strcmp(comm__str(right->comm), comm__str(left->comm));
126 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
128 return strcmp(comm__str(right->comm), comm__str(left->comm));
132 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
134 return strcmp(comm__str(right->comm), comm__str(left->comm));
137 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
138 size_t size, unsigned int width)
140 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
143 struct sort_entry sort_comm = {
144 .se_header = "Command",
145 .se_cmp = sort__comm_cmp,
146 .se_collapse = sort__comm_collapse,
147 .se_sort = sort__comm_sort,
148 .se_snprintf = hist_entry__comm_snprintf,
149 .se_filter = hist_entry__thread_filter,
150 .se_width_idx = HISTC_COMM,
155 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
157 struct dso *dso_l = map_l ? map_l->dso : NULL;
158 struct dso *dso_r = map_r ? map_r->dso : NULL;
159 const char *dso_name_l, *dso_name_r;
161 if (!dso_l || !dso_r)
162 return cmp_null(dso_r, dso_l);
165 dso_name_l = dso_l->long_name;
166 dso_name_r = dso_r->long_name;
168 dso_name_l = dso_l->short_name;
169 dso_name_r = dso_r->short_name;
172 return strcmp(dso_name_l, dso_name_r);
176 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
178 return _sort__dso_cmp(right->ms.map, left->ms.map);
181 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
182 size_t size, unsigned int width)
184 if (map && map->dso) {
185 const char *dso_name = verbose > 0 ? map->dso->long_name :
186 map->dso->short_name;
187 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
190 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
193 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
194 size_t size, unsigned int width)
196 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
199 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
201 const struct dso *dso = arg;
203 if (type != HIST_FILTER__DSO)
206 return dso && (!he->ms.map || he->ms.map->dso != dso);
209 struct sort_entry sort_dso = {
210 .se_header = "Shared Object",
211 .se_cmp = sort__dso_cmp,
212 .se_snprintf = hist_entry__dso_snprintf,
213 .se_filter = hist_entry__dso_filter,
214 .se_width_idx = HISTC_DSO,
219 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
221 return (int64_t)(right_ip - left_ip);
224 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
226 if (!sym_l || !sym_r)
227 return cmp_null(sym_l, sym_r);
232 if (sym_l->inlined || sym_r->inlined) {
233 int ret = strcmp(sym_l->name, sym_r->name);
237 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
241 if (sym_l->start != sym_r->start)
242 return (int64_t)(sym_r->start - sym_l->start);
244 return (int64_t)(sym_r->end - sym_l->end);
248 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
252 if (!left->ms.sym && !right->ms.sym)
253 return _sort__addr_cmp(left->ip, right->ip);
256 * comparing symbol address alone is not enough since it's a
257 * relative address within a dso.
259 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
260 ret = sort__dso_cmp(left, right);
265 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
269 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
271 if (!left->ms.sym || !right->ms.sym)
272 return cmp_null(left->ms.sym, right->ms.sym);
274 return strcmp(right->ms.sym->name, left->ms.sym->name);
277 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
278 u64 ip, char level, char *bf, size_t size,
284 char o = map ? dso__symtab_origin(map->dso) : '!';
285 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
286 BITS_PER_LONG / 4 + 2, ip, o);
289 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
291 if (sym->type == STT_OBJECT) {
292 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
293 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
294 ip - map->unmap_ip(map, sym->start));
296 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
300 ret += repsep_snprintf(bf + ret, size - ret,
304 size_t len = BITS_PER_LONG / 4;
305 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
312 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
313 size_t size, unsigned int width)
315 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
316 he->level, bf, size, width);
319 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
321 const char *sym = arg;
323 if (type != HIST_FILTER__SYMBOL)
326 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
329 struct sort_entry sort_sym = {
330 .se_header = "Symbol",
331 .se_cmp = sort__sym_cmp,
332 .se_sort = sort__sym_sort,
333 .se_snprintf = hist_entry__sym_snprintf,
334 .se_filter = hist_entry__sym_filter,
335 .se_width_idx = HISTC_SYMBOL,
340 char *hist_entry__srcline(struct hist_entry *he)
342 return map__srcline(he->ms.map, he->ip, he->ms.sym);
346 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
349 left->srcline = hist_entry__srcline(left);
351 right->srcline = hist_entry__srcline(right);
353 return strcmp(right->srcline, left->srcline);
356 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
357 size_t size, unsigned int width)
360 he->srcline = hist_entry__srcline(he);
362 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
365 struct sort_entry sort_srcline = {
366 .se_header = "Source:Line",
367 .se_cmp = sort__srcline_cmp,
368 .se_snprintf = hist_entry__srcline_snprintf,
369 .se_width_idx = HISTC_SRCLINE,
372 /* --sort srcline_from */
374 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
376 return map__srcline(ams->map, ams->al_addr, ams->sym);
380 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
382 if (!left->branch_info->srcline_from)
383 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
385 if (!right->branch_info->srcline_from)
386 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
388 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
391 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
392 size_t size, unsigned int width)
394 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
397 struct sort_entry sort_srcline_from = {
398 .se_header = "From Source:Line",
399 .se_cmp = sort__srcline_from_cmp,
400 .se_snprintf = hist_entry__srcline_from_snprintf,
401 .se_width_idx = HISTC_SRCLINE_FROM,
404 /* --sort srcline_to */
407 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
409 if (!left->branch_info->srcline_to)
410 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
412 if (!right->branch_info->srcline_to)
413 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
415 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
418 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
419 size_t size, unsigned int width)
421 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
424 struct sort_entry sort_srcline_to = {
425 .se_header = "To Source:Line",
426 .se_cmp = sort__srcline_to_cmp,
427 .se_snprintf = hist_entry__srcline_to_snprintf,
428 .se_width_idx = HISTC_SRCLINE_TO,
433 static char no_srcfile[1];
435 static char *hist_entry__get_srcfile(struct hist_entry *e)
438 struct map *map = e->ms.map;
443 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
444 e->ms.sym, false, true, true, e->ip);
445 if (!strcmp(sf, SRCLINE_UNKNOWN))
457 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
460 left->srcfile = hist_entry__get_srcfile(left);
462 right->srcfile = hist_entry__get_srcfile(right);
464 return strcmp(right->srcfile, left->srcfile);
467 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
468 size_t size, unsigned int width)
471 he->srcfile = hist_entry__get_srcfile(he);
473 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
476 struct sort_entry sort_srcfile = {
477 .se_header = "Source File",
478 .se_cmp = sort__srcfile_cmp,
479 .se_snprintf = hist_entry__srcfile_snprintf,
480 .se_width_idx = HISTC_SRCFILE,
486 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
488 struct symbol *sym_l = left->parent;
489 struct symbol *sym_r = right->parent;
491 if (!sym_l || !sym_r)
492 return cmp_null(sym_l, sym_r);
494 return strcmp(sym_r->name, sym_l->name);
497 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
498 size_t size, unsigned int width)
500 return repsep_snprintf(bf, size, "%-*.*s", width, width,
501 he->parent ? he->parent->name : "[other]");
504 struct sort_entry sort_parent = {
505 .se_header = "Parent symbol",
506 .se_cmp = sort__parent_cmp,
507 .se_snprintf = hist_entry__parent_snprintf,
508 .se_width_idx = HISTC_PARENT,
514 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
516 return right->cpu - left->cpu;
519 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
520 size_t size, unsigned int width)
522 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
525 struct sort_entry sort_cpu = {
527 .se_cmp = sort__cpu_cmp,
528 .se_snprintf = hist_entry__cpu_snprintf,
529 .se_width_idx = HISTC_CPU,
532 /* --sort cgroup_id */
534 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
536 return (int64_t)(right_dev - left_dev);
539 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
541 return (int64_t)(right_ino - left_ino);
545 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
549 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
553 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
554 left->cgroup_id.ino);
557 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
558 char *bf, size_t size,
559 unsigned int width __maybe_unused)
561 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
565 struct sort_entry sort_cgroup_id = {
566 .se_header = "cgroup id (dev/inode)",
567 .se_cmp = sort__cgroup_id_cmp,
568 .se_snprintf = hist_entry__cgroup_id_snprintf,
569 .se_width_idx = HISTC_CGROUP_ID,
575 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
577 return right->socket - left->socket;
580 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
581 size_t size, unsigned int width)
583 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
586 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
588 int sk = *(const int *)arg;
590 if (type != HIST_FILTER__SOCKET)
593 return sk >= 0 && he->socket != sk;
596 struct sort_entry sort_socket = {
597 .se_header = "Socket",
598 .se_cmp = sort__socket_cmp,
599 .se_snprintf = hist_entry__socket_snprintf,
600 .se_filter = hist_entry__socket_filter,
601 .se_width_idx = HISTC_SOCKET,
606 static char *get_trace_output(struct hist_entry *he)
608 struct trace_seq seq;
609 struct perf_evsel *evsel;
610 struct tep_record rec = {
611 .data = he->raw_data,
612 .size = he->raw_size,
615 evsel = hists_to_evsel(he->hists);
617 trace_seq_init(&seq);
618 if (symbol_conf.raw_trace) {
619 tep_print_fields(&seq, he->raw_data, he->raw_size,
622 tep_event_info(&seq, evsel->tp_format, &rec);
625 * Trim the buffer, it starts at 4KB and we're not going to
626 * add anything more to this buffer.
628 return realloc(seq.buffer, seq.len + 1);
632 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
634 struct perf_evsel *evsel;
636 evsel = hists_to_evsel(left->hists);
637 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
640 if (left->trace_output == NULL)
641 left->trace_output = get_trace_output(left);
642 if (right->trace_output == NULL)
643 right->trace_output = get_trace_output(right);
645 return strcmp(right->trace_output, left->trace_output);
648 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
649 size_t size, unsigned int width)
651 struct perf_evsel *evsel;
653 evsel = hists_to_evsel(he->hists);
654 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
655 return scnprintf(bf, size, "%-.*s", width, "N/A");
657 if (he->trace_output == NULL)
658 he->trace_output = get_trace_output(he);
659 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
662 struct sort_entry sort_trace = {
663 .se_header = "Trace output",
664 .se_cmp = sort__trace_cmp,
665 .se_snprintf = hist_entry__trace_snprintf,
666 .se_width_idx = HISTC_TRACE,
669 /* sort keys for branch stacks */
672 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
674 if (!left->branch_info || !right->branch_info)
675 return cmp_null(left->branch_info, right->branch_info);
677 return _sort__dso_cmp(left->branch_info->from.map,
678 right->branch_info->from.map);
681 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
682 size_t size, unsigned int width)
685 return _hist_entry__dso_snprintf(he->branch_info->from.map,
688 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
691 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
694 const struct dso *dso = arg;
696 if (type != HIST_FILTER__DSO)
699 return dso && (!he->branch_info || !he->branch_info->from.map ||
700 he->branch_info->from.map->dso != dso);
704 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
706 if (!left->branch_info || !right->branch_info)
707 return cmp_null(left->branch_info, right->branch_info);
709 return _sort__dso_cmp(left->branch_info->to.map,
710 right->branch_info->to.map);
713 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
714 size_t size, unsigned int width)
717 return _hist_entry__dso_snprintf(he->branch_info->to.map,
720 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
723 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
726 const struct dso *dso = arg;
728 if (type != HIST_FILTER__DSO)
731 return dso && (!he->branch_info || !he->branch_info->to.map ||
732 he->branch_info->to.map->dso != dso);
736 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
738 struct addr_map_symbol *from_l = &left->branch_info->from;
739 struct addr_map_symbol *from_r = &right->branch_info->from;
741 if (!left->branch_info || !right->branch_info)
742 return cmp_null(left->branch_info, right->branch_info);
744 from_l = &left->branch_info->from;
745 from_r = &right->branch_info->from;
747 if (!from_l->sym && !from_r->sym)
748 return _sort__addr_cmp(from_l->addr, from_r->addr);
750 return _sort__sym_cmp(from_l->sym, from_r->sym);
754 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
756 struct addr_map_symbol *to_l, *to_r;
758 if (!left->branch_info || !right->branch_info)
759 return cmp_null(left->branch_info, right->branch_info);
761 to_l = &left->branch_info->to;
762 to_r = &right->branch_info->to;
764 if (!to_l->sym && !to_r->sym)
765 return _sort__addr_cmp(to_l->addr, to_r->addr);
767 return _sort__sym_cmp(to_l->sym, to_r->sym);
770 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
771 size_t size, unsigned int width)
773 if (he->branch_info) {
774 struct addr_map_symbol *from = &he->branch_info->from;
776 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
777 he->level, bf, size, width);
780 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
783 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
784 size_t size, unsigned int width)
786 if (he->branch_info) {
787 struct addr_map_symbol *to = &he->branch_info->to;
789 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
790 he->level, bf, size, width);
793 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
796 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
799 const char *sym = arg;
801 if (type != HIST_FILTER__SYMBOL)
804 return sym && !(he->branch_info && he->branch_info->from.sym &&
805 strstr(he->branch_info->from.sym->name, sym));
808 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
811 const char *sym = arg;
813 if (type != HIST_FILTER__SYMBOL)
816 return sym && !(he->branch_info && he->branch_info->to.sym &&
817 strstr(he->branch_info->to.sym->name, sym));
820 struct sort_entry sort_dso_from = {
821 .se_header = "Source Shared Object",
822 .se_cmp = sort__dso_from_cmp,
823 .se_snprintf = hist_entry__dso_from_snprintf,
824 .se_filter = hist_entry__dso_from_filter,
825 .se_width_idx = HISTC_DSO_FROM,
828 struct sort_entry sort_dso_to = {
829 .se_header = "Target Shared Object",
830 .se_cmp = sort__dso_to_cmp,
831 .se_snprintf = hist_entry__dso_to_snprintf,
832 .se_filter = hist_entry__dso_to_filter,
833 .se_width_idx = HISTC_DSO_TO,
836 struct sort_entry sort_sym_from = {
837 .se_header = "Source Symbol",
838 .se_cmp = sort__sym_from_cmp,
839 .se_snprintf = hist_entry__sym_from_snprintf,
840 .se_filter = hist_entry__sym_from_filter,
841 .se_width_idx = HISTC_SYMBOL_FROM,
844 struct sort_entry sort_sym_to = {
845 .se_header = "Target Symbol",
846 .se_cmp = sort__sym_to_cmp,
847 .se_snprintf = hist_entry__sym_to_snprintf,
848 .se_filter = hist_entry__sym_to_filter,
849 .se_width_idx = HISTC_SYMBOL_TO,
853 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
857 if (!left->branch_info || !right->branch_info)
858 return cmp_null(left->branch_info, right->branch_info);
860 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
861 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
865 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
866 size_t size, unsigned int width){
867 static const char *out = "N/A";
869 if (he->branch_info) {
870 if (he->branch_info->flags.predicted)
872 else if (he->branch_info->flags.mispred)
876 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
880 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
882 if (!left->branch_info || !right->branch_info)
883 return cmp_null(left->branch_info, right->branch_info);
885 return left->branch_info->flags.cycles -
886 right->branch_info->flags.cycles;
889 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
890 size_t size, unsigned int width)
892 if (!he->branch_info)
893 return scnprintf(bf, size, "%-.*s", width, "N/A");
894 if (he->branch_info->flags.cycles == 0)
895 return repsep_snprintf(bf, size, "%-*s", width, "-");
896 return repsep_snprintf(bf, size, "%-*hd", width,
897 he->branch_info->flags.cycles);
900 struct sort_entry sort_cycles = {
901 .se_header = "Basic Block Cycles",
902 .se_cmp = sort__cycles_cmp,
903 .se_snprintf = hist_entry__cycles_snprintf,
904 .se_width_idx = HISTC_CYCLES,
907 /* --sort daddr_sym */
909 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
911 uint64_t l = 0, r = 0;
914 l = left->mem_info->daddr.addr;
916 r = right->mem_info->daddr.addr;
918 return (int64_t)(r - l);
921 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
922 size_t size, unsigned int width)
925 struct map *map = NULL;
926 struct symbol *sym = NULL;
929 addr = he->mem_info->daddr.addr;
930 map = he->mem_info->daddr.map;
931 sym = he->mem_info->daddr.sym;
933 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
938 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
940 uint64_t l = 0, r = 0;
943 l = left->mem_info->iaddr.addr;
945 r = right->mem_info->iaddr.addr;
947 return (int64_t)(r - l);
950 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
951 size_t size, unsigned int width)
954 struct map *map = NULL;
955 struct symbol *sym = NULL;
958 addr = he->mem_info->iaddr.addr;
959 map = he->mem_info->iaddr.map;
960 sym = he->mem_info->iaddr.sym;
962 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
967 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
969 struct map *map_l = NULL;
970 struct map *map_r = NULL;
973 map_l = left->mem_info->daddr.map;
975 map_r = right->mem_info->daddr.map;
977 return _sort__dso_cmp(map_l, map_r);
980 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
981 size_t size, unsigned int width)
983 struct map *map = NULL;
986 map = he->mem_info->daddr.map;
988 return _hist_entry__dso_snprintf(map, bf, size, width);
992 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
994 union perf_mem_data_src data_src_l;
995 union perf_mem_data_src data_src_r;
998 data_src_l = left->mem_info->data_src;
1000 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1002 if (right->mem_info)
1003 data_src_r = right->mem_info->data_src;
1005 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1007 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1010 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1011 size_t size, unsigned int width)
1015 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1016 return repsep_snprintf(bf, size, "%.*s", width, out);
1020 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1022 union perf_mem_data_src data_src_l;
1023 union perf_mem_data_src data_src_r;
1026 data_src_l = left->mem_info->data_src;
1028 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1030 if (right->mem_info)
1031 data_src_r = right->mem_info->data_src;
1033 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1035 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1038 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1039 size_t size, unsigned int width)
1043 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1044 return repsep_snprintf(bf, size, "%-*s", width, out);
1048 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1050 union perf_mem_data_src data_src_l;
1051 union perf_mem_data_src data_src_r;
1054 data_src_l = left->mem_info->data_src;
1056 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1058 if (right->mem_info)
1059 data_src_r = right->mem_info->data_src;
1061 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1063 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1066 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1067 size_t size, unsigned int width)
1071 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1072 return repsep_snprintf(bf, size, "%-*s", width, out);
1076 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1078 union perf_mem_data_src data_src_l;
1079 union perf_mem_data_src data_src_r;
1082 data_src_l = left->mem_info->data_src;
1084 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1086 if (right->mem_info)
1087 data_src_r = right->mem_info->data_src;
1089 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1091 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1094 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1095 size_t size, unsigned int width)
1099 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1100 return repsep_snprintf(bf, size, "%-*s", width, out);
1104 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1107 struct map *l_map, *r_map;
1109 if (!left->mem_info) return -1;
1110 if (!right->mem_info) return 1;
1112 /* group event types together */
1113 if (left->cpumode > right->cpumode) return -1;
1114 if (left->cpumode < right->cpumode) return 1;
1116 l_map = left->mem_info->daddr.map;
1117 r_map = right->mem_info->daddr.map;
1119 /* if both are NULL, jump to sort on al_addr instead */
1120 if (!l_map && !r_map)
1123 if (!l_map) return -1;
1124 if (!r_map) return 1;
1126 if (l_map->maj > r_map->maj) return -1;
1127 if (l_map->maj < r_map->maj) return 1;
1129 if (l_map->min > r_map->min) return -1;
1130 if (l_map->min < r_map->min) return 1;
1132 if (l_map->ino > r_map->ino) return -1;
1133 if (l_map->ino < r_map->ino) return 1;
1135 if (l_map->ino_generation > r_map->ino_generation) return -1;
1136 if (l_map->ino_generation < r_map->ino_generation) return 1;
1139 * Addresses with no major/minor numbers are assumed to be
1140 * anonymous in userspace. Sort those on pid then address.
1142 * The kernel and non-zero major/minor mapped areas are
1143 * assumed to be unity mapped. Sort those on address.
1146 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1147 (!(l_map->flags & MAP_SHARED)) &&
1148 !l_map->maj && !l_map->min && !l_map->ino &&
1149 !l_map->ino_generation) {
1150 /* userspace anonymous */
1152 if (left->thread->pid_ > right->thread->pid_) return -1;
1153 if (left->thread->pid_ < right->thread->pid_) return 1;
1157 /* al_addr does all the right addr - start + offset calculations */
1158 l = cl_address(left->mem_info->daddr.al_addr);
1159 r = cl_address(right->mem_info->daddr.al_addr);
1161 if (l > r) return -1;
1162 if (l < r) return 1;
1167 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1168 size_t size, unsigned int width)
1172 struct map *map = NULL;
1173 struct symbol *sym = NULL;
1174 char level = he->level;
1177 addr = cl_address(he->mem_info->daddr.al_addr);
1178 map = he->mem_info->daddr.map;
1179 sym = he->mem_info->daddr.sym;
1181 /* print [s] for shared data mmaps */
1182 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1183 map && !(map->prot & PROT_EXEC) &&
1184 (map->flags & MAP_SHARED) &&
1185 (map->maj || map->min || map->ino ||
1186 map->ino_generation))
1191 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1195 struct sort_entry sort_mispredict = {
1196 .se_header = "Branch Mispredicted",
1197 .se_cmp = sort__mispredict_cmp,
1198 .se_snprintf = hist_entry__mispredict_snprintf,
1199 .se_width_idx = HISTC_MISPREDICT,
1202 static u64 he_weight(struct hist_entry *he)
1204 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1208 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1210 return he_weight(left) - he_weight(right);
1213 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1214 size_t size, unsigned int width)
1216 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1219 struct sort_entry sort_local_weight = {
1220 .se_header = "Local Weight",
1221 .se_cmp = sort__local_weight_cmp,
1222 .se_snprintf = hist_entry__local_weight_snprintf,
1223 .se_width_idx = HISTC_LOCAL_WEIGHT,
1227 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1229 return left->stat.weight - right->stat.weight;
1232 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1233 size_t size, unsigned int width)
1235 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1238 struct sort_entry sort_global_weight = {
1239 .se_header = "Weight",
1240 .se_cmp = sort__global_weight_cmp,
1241 .se_snprintf = hist_entry__global_weight_snprintf,
1242 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1245 struct sort_entry sort_mem_daddr_sym = {
1246 .se_header = "Data Symbol",
1247 .se_cmp = sort__daddr_cmp,
1248 .se_snprintf = hist_entry__daddr_snprintf,
1249 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1252 struct sort_entry sort_mem_iaddr_sym = {
1253 .se_header = "Code Symbol",
1254 .se_cmp = sort__iaddr_cmp,
1255 .se_snprintf = hist_entry__iaddr_snprintf,
1256 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1259 struct sort_entry sort_mem_daddr_dso = {
1260 .se_header = "Data Object",
1261 .se_cmp = sort__dso_daddr_cmp,
1262 .se_snprintf = hist_entry__dso_daddr_snprintf,
1263 .se_width_idx = HISTC_MEM_DADDR_DSO,
1266 struct sort_entry sort_mem_locked = {
1267 .se_header = "Locked",
1268 .se_cmp = sort__locked_cmp,
1269 .se_snprintf = hist_entry__locked_snprintf,
1270 .se_width_idx = HISTC_MEM_LOCKED,
1273 struct sort_entry sort_mem_tlb = {
1274 .se_header = "TLB access",
1275 .se_cmp = sort__tlb_cmp,
1276 .se_snprintf = hist_entry__tlb_snprintf,
1277 .se_width_idx = HISTC_MEM_TLB,
1280 struct sort_entry sort_mem_lvl = {
1281 .se_header = "Memory access",
1282 .se_cmp = sort__lvl_cmp,
1283 .se_snprintf = hist_entry__lvl_snprintf,
1284 .se_width_idx = HISTC_MEM_LVL,
1287 struct sort_entry sort_mem_snoop = {
1288 .se_header = "Snoop",
1289 .se_cmp = sort__snoop_cmp,
1290 .se_snprintf = hist_entry__snoop_snprintf,
1291 .se_width_idx = HISTC_MEM_SNOOP,
1294 struct sort_entry sort_mem_dcacheline = {
1295 .se_header = "Data Cacheline",
1296 .se_cmp = sort__dcacheline_cmp,
1297 .se_snprintf = hist_entry__dcacheline_snprintf,
1298 .se_width_idx = HISTC_MEM_DCACHELINE,
1302 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1304 uint64_t l = 0, r = 0;
1307 l = left->mem_info->daddr.phys_addr;
1308 if (right->mem_info)
1309 r = right->mem_info->daddr.phys_addr;
1311 return (int64_t)(r - l);
1314 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1315 size_t size, unsigned int width)
1319 size_t len = BITS_PER_LONG / 4;
1321 addr = he->mem_info->daddr.phys_addr;
1323 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1325 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1327 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1335 struct sort_entry sort_mem_phys_daddr = {
1336 .se_header = "Data Physical Address",
1337 .se_cmp = sort__phys_daddr_cmp,
1338 .se_snprintf = hist_entry__phys_daddr_snprintf,
1339 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1343 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1345 if (!left->branch_info || !right->branch_info)
1346 return cmp_null(left->branch_info, right->branch_info);
1348 return left->branch_info->flags.abort !=
1349 right->branch_info->flags.abort;
1352 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1353 size_t size, unsigned int width)
1355 static const char *out = "N/A";
1357 if (he->branch_info) {
1358 if (he->branch_info->flags.abort)
1364 return repsep_snprintf(bf, size, "%-*s", width, out);
1367 struct sort_entry sort_abort = {
1368 .se_header = "Transaction abort",
1369 .se_cmp = sort__abort_cmp,
1370 .se_snprintf = hist_entry__abort_snprintf,
1371 .se_width_idx = HISTC_ABORT,
1375 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1377 if (!left->branch_info || !right->branch_info)
1378 return cmp_null(left->branch_info, right->branch_info);
1380 return left->branch_info->flags.in_tx !=
1381 right->branch_info->flags.in_tx;
1384 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1385 size_t size, unsigned int width)
1387 static const char *out = "N/A";
1389 if (he->branch_info) {
1390 if (he->branch_info->flags.in_tx)
1396 return repsep_snprintf(bf, size, "%-*s", width, out);
1399 struct sort_entry sort_in_tx = {
1400 .se_header = "Branch in transaction",
1401 .se_cmp = sort__in_tx_cmp,
1402 .se_snprintf = hist_entry__in_tx_snprintf,
1403 .se_width_idx = HISTC_IN_TX,
1407 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1409 return left->transaction - right->transaction;
1412 static inline char *add_str(char *p, const char *str)
1415 return p + strlen(str);
1418 static struct txbit {
1423 { PERF_TXN_ELISION, "EL ", 0 },
1424 { PERF_TXN_TRANSACTION, "TX ", 1 },
1425 { PERF_TXN_SYNC, "SYNC ", 1 },
1426 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1427 { PERF_TXN_RETRY, "RETRY ", 0 },
1428 { PERF_TXN_CONFLICT, "CON ", 0 },
1429 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1430 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1434 int hist_entry__transaction_len(void)
1439 for (i = 0; txbits[i].name; i++) {
1440 if (!txbits[i].skip_for_len)
1441 len += strlen(txbits[i].name);
1443 len += 4; /* :XX<space> */
1447 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1448 size_t size, unsigned int width)
1450 u64 t = he->transaction;
1456 for (i = 0; txbits[i].name; i++)
1457 if (txbits[i].flag & t)
1458 p = add_str(p, txbits[i].name);
1459 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1460 p = add_str(p, "NEITHER ");
1461 if (t & PERF_TXN_ABORT_MASK) {
1462 sprintf(p, ":%" PRIx64,
1463 (t & PERF_TXN_ABORT_MASK) >>
1464 PERF_TXN_ABORT_SHIFT);
1468 return repsep_snprintf(bf, size, "%-*s", width, buf);
1471 struct sort_entry sort_transaction = {
1472 .se_header = "Transaction ",
1473 .se_cmp = sort__transaction_cmp,
1474 .se_snprintf = hist_entry__transaction_snprintf,
1475 .se_width_idx = HISTC_TRANSACTION,
1478 /* --sort symbol_size */
1480 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1482 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1483 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1485 return size_l < size_r ? -1 :
1486 size_l == size_r ? 0 : 1;
1490 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1492 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1495 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1496 size_t bf_size, unsigned int width)
1499 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1501 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1504 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1505 size_t size, unsigned int width)
1507 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1510 struct sort_entry sort_sym_size = {
1511 .se_header = "Symbol size",
1512 .se_cmp = sort__sym_size_cmp,
1513 .se_snprintf = hist_entry__sym_size_snprintf,
1514 .se_width_idx = HISTC_SYM_SIZE,
1517 /* --sort dso_size */
1519 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1521 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1522 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1524 return size_l < size_r ? -1 :
1525 size_l == size_r ? 0 : 1;
1529 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1531 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1534 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1535 size_t bf_size, unsigned int width)
1537 if (map && map->dso)
1538 return repsep_snprintf(bf, bf_size, "%*d", width,
1541 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1544 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1545 size_t size, unsigned int width)
1547 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1550 struct sort_entry sort_dso_size = {
1551 .se_header = "DSO size",
1552 .se_cmp = sort__dso_size_cmp,
1553 .se_snprintf = hist_entry__dso_size_snprintf,
1554 .se_width_idx = HISTC_DSO_SIZE,
1558 struct sort_dimension {
1560 struct sort_entry *entry;
1564 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1566 static struct sort_dimension common_sort_dimensions[] = {
1567 DIM(SORT_PID, "pid", sort_thread),
1568 DIM(SORT_COMM, "comm", sort_comm),
1569 DIM(SORT_DSO, "dso", sort_dso),
1570 DIM(SORT_SYM, "symbol", sort_sym),
1571 DIM(SORT_PARENT, "parent", sort_parent),
1572 DIM(SORT_CPU, "cpu", sort_cpu),
1573 DIM(SORT_SOCKET, "socket", sort_socket),
1574 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1575 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1576 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1577 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1578 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1579 DIM(SORT_TRACE, "trace", sort_trace),
1580 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1581 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1582 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1587 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1589 static struct sort_dimension bstack_sort_dimensions[] = {
1590 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1591 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1592 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1593 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1594 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1595 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1596 DIM(SORT_ABORT, "abort", sort_abort),
1597 DIM(SORT_CYCLES, "cycles", sort_cycles),
1598 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1599 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1604 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1606 static struct sort_dimension memory_sort_dimensions[] = {
1607 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1608 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1609 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1610 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1611 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1612 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1613 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1614 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1615 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1620 struct hpp_dimension {
1622 struct perf_hpp_fmt *fmt;
1626 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1628 static struct hpp_dimension hpp_sort_dimensions[] = {
1629 DIM(PERF_HPP__OVERHEAD, "overhead"),
1630 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1631 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1632 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1633 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1634 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1635 DIM(PERF_HPP__SAMPLES, "sample"),
1636 DIM(PERF_HPP__PERIOD, "period"),
1641 struct hpp_sort_entry {
1642 struct perf_hpp_fmt hpp;
1643 struct sort_entry *se;
1646 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1648 struct hpp_sort_entry *hse;
1650 if (!perf_hpp__is_sort_entry(fmt))
1653 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1654 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1657 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1658 struct hists *hists, int line __maybe_unused,
1659 int *span __maybe_unused)
1661 struct hpp_sort_entry *hse;
1662 size_t len = fmt->user_len;
1664 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1667 len = hists__col_len(hists, hse->se->se_width_idx);
1669 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1672 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1673 struct perf_hpp *hpp __maybe_unused,
1674 struct hists *hists)
1676 struct hpp_sort_entry *hse;
1677 size_t len = fmt->user_len;
1679 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1682 len = hists__col_len(hists, hse->se->se_width_idx);
1687 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1688 struct hist_entry *he)
1690 struct hpp_sort_entry *hse;
1691 size_t len = fmt->user_len;
1693 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1696 len = hists__col_len(he->hists, hse->se->se_width_idx);
1698 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1701 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1702 struct hist_entry *a, struct hist_entry *b)
1704 struct hpp_sort_entry *hse;
1706 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1707 return hse->se->se_cmp(a, b);
1710 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1711 struct hist_entry *a, struct hist_entry *b)
1713 struct hpp_sort_entry *hse;
1714 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1716 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1717 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1718 return collapse_fn(a, b);
1721 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1722 struct hist_entry *a, struct hist_entry *b)
1724 struct hpp_sort_entry *hse;
1725 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1727 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1728 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1729 return sort_fn(a, b);
1732 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1734 return format->header == __sort__hpp_header;
1737 #define MK_SORT_ENTRY_CHK(key) \
1738 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1740 struct hpp_sort_entry *hse; \
1742 if (!perf_hpp__is_sort_entry(fmt)) \
1745 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1746 return hse->se == &sort_ ## key ; \
1749 MK_SORT_ENTRY_CHK(trace)
1750 MK_SORT_ENTRY_CHK(srcline)
1751 MK_SORT_ENTRY_CHK(srcfile)
1752 MK_SORT_ENTRY_CHK(thread)
1753 MK_SORT_ENTRY_CHK(comm)
1754 MK_SORT_ENTRY_CHK(dso)
1755 MK_SORT_ENTRY_CHK(sym)
1758 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1760 struct hpp_sort_entry *hse_a;
1761 struct hpp_sort_entry *hse_b;
1763 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1766 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1767 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1769 return hse_a->se == hse_b->se;
1772 static void hse_free(struct perf_hpp_fmt *fmt)
1774 struct hpp_sort_entry *hse;
1776 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1780 static struct hpp_sort_entry *
1781 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1783 struct hpp_sort_entry *hse;
1785 hse = malloc(sizeof(*hse));
1787 pr_err("Memory allocation failed\n");
1791 hse->se = sd->entry;
1792 hse->hpp.name = sd->entry->se_header;
1793 hse->hpp.header = __sort__hpp_header;
1794 hse->hpp.width = __sort__hpp_width;
1795 hse->hpp.entry = __sort__hpp_entry;
1796 hse->hpp.color = NULL;
1798 hse->hpp.cmp = __sort__hpp_cmp;
1799 hse->hpp.collapse = __sort__hpp_collapse;
1800 hse->hpp.sort = __sort__hpp_sort;
1801 hse->hpp.equal = __sort__hpp_equal;
1802 hse->hpp.free = hse_free;
1804 INIT_LIST_HEAD(&hse->hpp.list);
1805 INIT_LIST_HEAD(&hse->hpp.sort_list);
1806 hse->hpp.elide = false;
1808 hse->hpp.user_len = 0;
1809 hse->hpp.level = level;
1814 static void hpp_free(struct perf_hpp_fmt *fmt)
1819 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1822 struct perf_hpp_fmt *fmt;
1824 fmt = memdup(hd->fmt, sizeof(*fmt));
1826 INIT_LIST_HEAD(&fmt->list);
1827 INIT_LIST_HEAD(&fmt->sort_list);
1828 fmt->free = hpp_free;
1835 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1837 struct perf_hpp_fmt *fmt;
1838 struct hpp_sort_entry *hse;
1842 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1843 if (!perf_hpp__is_sort_entry(fmt))
1846 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1847 if (hse->se->se_filter == NULL)
1851 * hist entry is filtered if any of sort key in the hpp list
1852 * is applied. But it should skip non-matched filter types.
1854 r = hse->se->se_filter(he, type, arg);
1865 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1866 struct perf_hpp_list *list,
1869 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1874 perf_hpp_list__register_sort_field(list, &hse->hpp);
1878 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1879 struct perf_hpp_list *list)
1881 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1886 perf_hpp_list__column_register(list, &hse->hpp);
1890 struct hpp_dynamic_entry {
1891 struct perf_hpp_fmt hpp;
1892 struct perf_evsel *evsel;
1893 struct format_field *field;
1894 unsigned dynamic_len;
1898 static int hde_width(struct hpp_dynamic_entry *hde)
1900 if (!hde->hpp.len) {
1901 int len = hde->dynamic_len;
1902 int namelen = strlen(hde->field->name);
1903 int fieldlen = hde->field->size;
1908 if (!(hde->field->flags & FIELD_IS_STRING)) {
1909 /* length for print hex numbers */
1910 fieldlen = hde->field->size * 2 + 2;
1917 return hde->hpp.len;
1920 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1921 struct hist_entry *he)
1924 struct format_field *field = hde->field;
1931 /* parse pretty print result and update max length */
1932 if (!he->trace_output)
1933 he->trace_output = get_trace_output(he);
1935 namelen = strlen(field->name);
1936 str = he->trace_output;
1939 pos = strchr(str, ' ');
1942 pos = str + strlen(str);
1945 if (!strncmp(str, field->name, namelen)) {
1951 if (len > hde->dynamic_len)
1952 hde->dynamic_len = len;
1963 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1964 struct hists *hists __maybe_unused,
1965 int line __maybe_unused,
1966 int *span __maybe_unused)
1968 struct hpp_dynamic_entry *hde;
1969 size_t len = fmt->user_len;
1971 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1974 len = hde_width(hde);
1976 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1979 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1980 struct perf_hpp *hpp __maybe_unused,
1981 struct hists *hists __maybe_unused)
1983 struct hpp_dynamic_entry *hde;
1984 size_t len = fmt->user_len;
1986 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1989 len = hde_width(hde);
1994 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1996 struct hpp_dynamic_entry *hde;
1998 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2000 return hists_to_evsel(hists) == hde->evsel;
2003 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2004 struct hist_entry *he)
2006 struct hpp_dynamic_entry *hde;
2007 size_t len = fmt->user_len;
2009 struct format_field *field;
2014 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2017 len = hde_width(hde);
2022 if (!he->trace_output)
2023 he->trace_output = get_trace_output(he);
2026 namelen = strlen(field->name);
2027 str = he->trace_output;
2030 pos = strchr(str, ' ');
2033 pos = str + strlen(str);
2036 if (!strncmp(str, field->name, namelen)) {
2038 str = strndup(str, pos - str);
2041 return scnprintf(hpp->buf, hpp->size,
2042 "%*.*s", len, len, "ERROR");
2053 struct trace_seq seq;
2055 trace_seq_init(&seq);
2056 tep_print_field(&seq, he->raw_data, hde->field);
2060 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2065 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2066 struct hist_entry *a, struct hist_entry *b)
2068 struct hpp_dynamic_entry *hde;
2069 struct format_field *field;
2070 unsigned offset, size;
2072 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2075 update_dynamic_len(hde, a);
2080 if (field->flags & FIELD_IS_DYNAMIC) {
2081 unsigned long long dyn;
2083 tep_read_number_field(field, a->raw_data, &dyn);
2084 offset = dyn & 0xffff;
2085 size = (dyn >> 16) & 0xffff;
2087 /* record max width for output */
2088 if (size > hde->dynamic_len)
2089 hde->dynamic_len = size;
2091 offset = field->offset;
2095 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2098 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2100 return fmt->cmp == __sort__hde_cmp;
2103 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2105 struct hpp_dynamic_entry *hde_a;
2106 struct hpp_dynamic_entry *hde_b;
2108 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2111 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2112 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2114 return hde_a->field == hde_b->field;
2117 static void hde_free(struct perf_hpp_fmt *fmt)
2119 struct hpp_dynamic_entry *hde;
2121 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2125 static struct hpp_dynamic_entry *
2126 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
2129 struct hpp_dynamic_entry *hde;
2131 hde = malloc(sizeof(*hde));
2133 pr_debug("Memory allocation failed\n");
2139 hde->dynamic_len = 0;
2141 hde->hpp.name = field->name;
2142 hde->hpp.header = __sort__hde_header;
2143 hde->hpp.width = __sort__hde_width;
2144 hde->hpp.entry = __sort__hde_entry;
2145 hde->hpp.color = NULL;
2147 hde->hpp.cmp = __sort__hde_cmp;
2148 hde->hpp.collapse = __sort__hde_cmp;
2149 hde->hpp.sort = __sort__hde_cmp;
2150 hde->hpp.equal = __sort__hde_equal;
2151 hde->hpp.free = hde_free;
2153 INIT_LIST_HEAD(&hde->hpp.list);
2154 INIT_LIST_HEAD(&hde->hpp.sort_list);
2155 hde->hpp.elide = false;
2157 hde->hpp.user_len = 0;
2158 hde->hpp.level = level;
2163 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2165 struct perf_hpp_fmt *new_fmt = NULL;
2167 if (perf_hpp__is_sort_entry(fmt)) {
2168 struct hpp_sort_entry *hse, *new_hse;
2170 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2171 new_hse = memdup(hse, sizeof(*hse));
2173 new_fmt = &new_hse->hpp;
2174 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2175 struct hpp_dynamic_entry *hde, *new_hde;
2177 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2178 new_hde = memdup(hde, sizeof(*hde));
2180 new_fmt = &new_hde->hpp;
2182 new_fmt = memdup(fmt, sizeof(*fmt));
2185 INIT_LIST_HEAD(&new_fmt->list);
2186 INIT_LIST_HEAD(&new_fmt->sort_list);
2191 static int parse_field_name(char *str, char **event, char **field, char **opt)
2193 char *event_name, *field_name, *opt_name;
2196 field_name = strchr(str, '.');
2199 *field_name++ = '\0';
2205 opt_name = strchr(field_name, '/');
2209 *event = event_name;
2210 *field = field_name;
2216 /* find match evsel using a given event name. The event name can be:
2217 * 1. '%' + event index (e.g. '%1' for first event)
2218 * 2. full event name (e.g. sched:sched_switch)
2219 * 3. partial event name (should not contain ':')
2221 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
2223 struct perf_evsel *evsel = NULL;
2224 struct perf_evsel *pos;
2228 if (event_name[0] == '%') {
2229 int nr = strtol(event_name+1, NULL, 0);
2231 if (nr > evlist->nr_entries)
2234 evsel = perf_evlist__first(evlist);
2236 evsel = perf_evsel__next(evsel);
2241 full_name = !!strchr(event_name, ':');
2242 evlist__for_each_entry(evlist, pos) {
2244 if (full_name && !strcmp(pos->name, event_name))
2247 if (!full_name && strstr(pos->name, event_name)) {
2249 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2250 event_name, evsel->name, pos->name);
2260 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2261 struct format_field *field,
2262 bool raw_trace, int level)
2264 struct hpp_dynamic_entry *hde;
2266 hde = __alloc_dynamic_entry(evsel, field, level);
2270 hde->raw_trace = raw_trace;
2272 perf_hpp__register_sort_field(&hde->hpp);
2276 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2279 struct format_field *field;
2281 field = evsel->tp_format->format.fields;
2283 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2287 field = field->next;
2292 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2296 struct perf_evsel *evsel;
2298 evlist__for_each_entry(evlist, evsel) {
2299 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2302 ret = add_evsel_fields(evsel, raw_trace, level);
2309 static int add_all_matching_fields(struct perf_evlist *evlist,
2310 char *field_name, bool raw_trace, int level)
2313 struct perf_evsel *evsel;
2314 struct format_field *field;
2316 evlist__for_each_entry(evlist, evsel) {
2317 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2320 field = tep_find_any_field(evsel->tp_format, field_name);
2324 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2331 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2334 char *str, *event_name, *field_name, *opt_name;
2335 struct perf_evsel *evsel;
2336 struct format_field *field;
2337 bool raw_trace = symbol_conf.raw_trace;
2347 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2353 if (strcmp(opt_name, "raw")) {
2354 pr_debug("unsupported field option %s\n", opt_name);
2361 if (!strcmp(field_name, "trace_fields")) {
2362 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2366 if (event_name == NULL) {
2367 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2371 evsel = find_evsel(evlist, event_name);
2372 if (evsel == NULL) {
2373 pr_debug("Cannot find event: %s\n", event_name);
2378 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2379 pr_debug("%s is not a tracepoint event\n", event_name);
2384 if (!strcmp(field_name, "*")) {
2385 ret = add_evsel_fields(evsel, raw_trace, level);
2387 field = tep_find_any_field(evsel->tp_format, field_name);
2388 if (field == NULL) {
2389 pr_debug("Cannot find event field for %s.%s\n",
2390 event_name, field_name);
2394 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2402 static int __sort_dimension__add(struct sort_dimension *sd,
2403 struct perf_hpp_list *list,
2409 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2412 if (sd->entry->se_collapse)
2413 list->need_collapse = 1;
2420 static int __hpp_dimension__add(struct hpp_dimension *hd,
2421 struct perf_hpp_list *list,
2424 struct perf_hpp_fmt *fmt;
2429 fmt = __hpp_dimension__alloc_hpp(hd, level);
2434 perf_hpp_list__register_sort_field(list, fmt);
2438 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2439 struct sort_dimension *sd)
2444 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2451 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2452 struct hpp_dimension *hd)
2454 struct perf_hpp_fmt *fmt;
2459 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2464 perf_hpp_list__column_register(list, fmt);
2468 int hpp_dimension__add_output(unsigned col)
2470 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2471 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2474 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2475 struct perf_evlist *evlist,
2480 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2481 struct sort_dimension *sd = &common_sort_dimensions[i];
2483 if (strncasecmp(tok, sd->name, strlen(tok)))
2486 if (sd->entry == &sort_parent) {
2487 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2491 regerror(ret, &parent_regex, err, sizeof(err));
2492 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2496 } else if (sd->entry == &sort_sym) {
2499 * perf diff displays the performance difference amongst
2500 * two or more perf.data files. Those files could come
2501 * from different binaries. So we should not compare
2502 * their ips, but the name of symbol.
2504 if (sort__mode == SORT_MODE__DIFF)
2505 sd->entry->se_collapse = sort__sym_sort;
2507 } else if (sd->entry == &sort_dso) {
2509 } else if (sd->entry == &sort_socket) {
2511 } else if (sd->entry == &sort_thread) {
2513 } else if (sd->entry == &sort_comm) {
2517 return __sort_dimension__add(sd, list, level);
2520 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2521 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2523 if (strncasecmp(tok, hd->name, strlen(tok)))
2526 return __hpp_dimension__add(hd, list, level);
2529 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2530 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2532 if (strncasecmp(tok, sd->name, strlen(tok)))
2535 if (sort__mode != SORT_MODE__BRANCH)
2538 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2541 __sort_dimension__add(sd, list, level);
2545 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2546 struct sort_dimension *sd = &memory_sort_dimensions[i];
2548 if (strncasecmp(tok, sd->name, strlen(tok)))
2551 if (sort__mode != SORT_MODE__MEMORY)
2554 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2557 if (sd->entry == &sort_mem_daddr_sym)
2560 __sort_dimension__add(sd, list, level);
2564 if (!add_dynamic_entry(evlist, tok, level))
2570 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2571 struct perf_evlist *evlist)
2577 bool in_group = false;
2581 tmp = strpbrk(str, "{}, ");
2586 next_level = level + 1;
2590 else if (*tmp == '}')
2598 ret = sort_dimension__add(list, tok, evlist, level);
2599 if (ret == -EINVAL) {
2600 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2601 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2603 pr_err("Invalid --sort key: `%s'", tok);
2605 } else if (ret == -ESRCH) {
2606 pr_err("Unknown --sort key: `%s'", tok);
2617 static const char *get_default_sort_order(struct perf_evlist *evlist)
2619 const char *default_sort_orders[] = {
2621 default_branch_sort_order,
2622 default_mem_sort_order,
2623 default_top_sort_order,
2624 default_diff_sort_order,
2625 default_tracepoint_sort_order,
2627 bool use_trace = true;
2628 struct perf_evsel *evsel;
2630 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2632 if (evlist == NULL || perf_evlist__empty(evlist))
2635 evlist__for_each_entry(evlist, evsel) {
2636 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2643 sort__mode = SORT_MODE__TRACEPOINT;
2644 if (symbol_conf.raw_trace)
2645 return "trace_fields";
2648 return default_sort_orders[sort__mode];
2651 static int setup_sort_order(struct perf_evlist *evlist)
2653 char *new_sort_order;
2656 * Append '+'-prefixed sort order to the default sort
2659 if (!sort_order || is_strict_order(sort_order))
2662 if (sort_order[1] == '\0') {
2663 pr_err("Invalid --sort key: `+'");
2668 * We allocate new sort_order string, but we never free it,
2669 * because it's checked over the rest of the code.
2671 if (asprintf(&new_sort_order, "%s,%s",
2672 get_default_sort_order(evlist), sort_order + 1) < 0) {
2673 pr_err("Not enough memory to set up --sort");
2677 sort_order = new_sort_order;
2682 * Adds 'pre,' prefix into 'str' is 'pre' is
2683 * not already part of 'str'.
2685 static char *prefix_if_not_in(const char *pre, char *str)
2689 if (!str || strstr(str, pre))
2692 if (asprintf(&n, "%s,%s", pre, str) < 0)
2699 static char *setup_overhead(char *keys)
2701 if (sort__mode == SORT_MODE__DIFF)
2704 keys = prefix_if_not_in("overhead", keys);
2706 if (symbol_conf.cumulate_callchain)
2707 keys = prefix_if_not_in("overhead_children", keys);
2712 static int __setup_sorting(struct perf_evlist *evlist)
2715 const char *sort_keys;
2718 ret = setup_sort_order(evlist);
2722 sort_keys = sort_order;
2723 if (sort_keys == NULL) {
2724 if (is_strict_order(field_order)) {
2726 * If user specified field order but no sort order,
2727 * we'll honor it and not add default sort orders.
2732 sort_keys = get_default_sort_order(evlist);
2735 str = strdup(sort_keys);
2737 pr_err("Not enough memory to setup sort keys");
2742 * Prepend overhead fields for backward compatibility.
2744 if (!is_strict_order(field_order)) {
2745 str = setup_overhead(str);
2747 pr_err("Not enough memory to setup overhead keys");
2752 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2758 void perf_hpp__set_elide(int idx, bool elide)
2760 struct perf_hpp_fmt *fmt;
2761 struct hpp_sort_entry *hse;
2763 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2764 if (!perf_hpp__is_sort_entry(fmt))
2767 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2768 if (hse->se->se_width_idx == idx) {
2775 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2777 if (list && strlist__nr_entries(list) == 1) {
2779 fprintf(fp, "# %s: %s\n", list_name,
2780 strlist__entry(list, 0)->s);
2786 static bool get_elide(int idx, FILE *output)
2790 return __get_elide(symbol_conf.sym_list, "symbol", output);
2792 return __get_elide(symbol_conf.dso_list, "dso", output);
2794 return __get_elide(symbol_conf.comm_list, "comm", output);
2799 if (sort__mode != SORT_MODE__BRANCH)
2803 case HISTC_SYMBOL_FROM:
2804 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2805 case HISTC_SYMBOL_TO:
2806 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2807 case HISTC_DSO_FROM:
2808 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2810 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2818 void sort__setup_elide(FILE *output)
2820 struct perf_hpp_fmt *fmt;
2821 struct hpp_sort_entry *hse;
2823 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2824 if (!perf_hpp__is_sort_entry(fmt))
2827 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2828 fmt->elide = get_elide(hse->se->se_width_idx, output);
2832 * It makes no sense to elide all of sort entries.
2833 * Just revert them to show up again.
2835 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2836 if (!perf_hpp__is_sort_entry(fmt))
2843 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2844 if (!perf_hpp__is_sort_entry(fmt))
2851 int output_field_add(struct perf_hpp_list *list, char *tok)
2855 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2856 struct sort_dimension *sd = &common_sort_dimensions[i];
2858 if (strncasecmp(tok, sd->name, strlen(tok)))
2861 return __sort_dimension__add_output(list, sd);
2864 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2865 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2867 if (strncasecmp(tok, hd->name, strlen(tok)))
2870 return __hpp_dimension__add_output(list, hd);
2873 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2874 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2876 if (strncasecmp(tok, sd->name, strlen(tok)))
2879 return __sort_dimension__add_output(list, sd);
2882 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2883 struct sort_dimension *sd = &memory_sort_dimensions[i];
2885 if (strncasecmp(tok, sd->name, strlen(tok)))
2888 return __sort_dimension__add_output(list, sd);
2894 static int setup_output_list(struct perf_hpp_list *list, char *str)
2899 for (tok = strtok_r(str, ", ", &tmp);
2900 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2901 ret = output_field_add(list, tok);
2902 if (ret == -EINVAL) {
2903 ui__error("Invalid --fields key: `%s'", tok);
2905 } else if (ret == -ESRCH) {
2906 ui__error("Unknown --fields key: `%s'", tok);
2914 void reset_dimensions(void)
2918 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2919 common_sort_dimensions[i].taken = 0;
2921 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2922 hpp_sort_dimensions[i].taken = 0;
2924 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2925 bstack_sort_dimensions[i].taken = 0;
2927 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2928 memory_sort_dimensions[i].taken = 0;
2931 bool is_strict_order(const char *order)
2933 return order && (*order != '+');
2936 static int __setup_output_field(void)
2941 if (field_order == NULL)
2944 strp = str = strdup(field_order);
2946 pr_err("Not enough memory to setup output fields");
2950 if (!is_strict_order(field_order))
2953 if (!strlen(strp)) {
2954 pr_err("Invalid --fields key: `+'");
2958 ret = setup_output_list(&perf_hpp_list, strp);
2965 int setup_sorting(struct perf_evlist *evlist)
2969 err = __setup_sorting(evlist);
2973 if (parent_pattern != default_parent_pattern) {
2974 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2982 * perf diff doesn't use default hpp output fields.
2984 if (sort__mode != SORT_MODE__DIFF)
2987 err = __setup_output_field();
2991 /* copy sort keys to output fields */
2992 perf_hpp__setup_output_field(&perf_hpp_list);
2993 /* and then copy output fields to sort keys */
2994 perf_hpp__append_sort_keys(&perf_hpp_list);
2996 /* setup hists-specific output fields */
2997 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3003 void reset_output_field(void)
3005 perf_hpp_list.need_collapse = 0;
3006 perf_hpp_list.parent = 0;
3007 perf_hpp_list.sym = 0;
3008 perf_hpp_list.dso = 0;
3014 perf_hpp__reset_output_field(&perf_hpp_list);