1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/mman.h>
7 #include <linux/time64.h>
12 #include "cacheline.h"
16 #include "map_symbol.h"
24 #include <traceevent/event-parse.h>
25 #include "mem-events.h"
27 #include "time-utils.h"
28 #include <linux/kernel.h>
29 #include <linux/string.h>
32 const char default_parent_pattern[] = "^sys_|^do_page_fault";
33 const char *parent_pattern = default_parent_pattern;
34 const char *default_sort_order = "comm,dso,symbol";
35 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
36 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
37 const char default_top_sort_order[] = "dso,symbol";
38 const char default_diff_sort_order[] = "dso,symbol";
39 const char default_tracepoint_sort_order[] = "trace";
40 const char *sort_order;
41 const char *field_order;
42 regex_t ignore_callees_regex;
43 int have_ignore_callees = 0;
44 enum sort_mode sort__mode = SORT_MODE__NORMAL;
47 * Replaces all occurrences of a char used with the:
49 * -t, --field-separator
51 * option, that uses a special separator character and don't pad with spaces,
52 * replacing all occurrences of this separator in symbol names (and other
53 * output) with a '.' character, that thus it's the only non valid separator.
55 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
61 n = vsnprintf(bf, size, fmt, ap);
62 if (symbol_conf.field_sep && n > 0) {
66 sep = strchr(sep, *symbol_conf.field_sep);
79 static int64_t cmp_null(const void *l, const void *r)
92 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
94 return right->thread->tid - left->thread->tid;
97 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
98 size_t size, unsigned int width)
100 const char *comm = thread__comm_str(he->thread);
102 width = max(7U, width) - 8;
103 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
104 width, width, comm ?: "");
107 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
109 const struct thread *th = arg;
111 if (type != HIST_FILTER__THREAD)
114 return th && he->thread != th;
117 struct sort_entry sort_thread = {
118 .se_header = " Pid:Command",
119 .se_cmp = sort__thread_cmp,
120 .se_snprintf = hist_entry__thread_snprintf,
121 .se_filter = hist_entry__thread_filter,
122 .se_width_idx = HISTC_THREAD,
128 * We can't use pointer comparison in functions below,
129 * because it gives different results based on pointer
130 * values, which could break some sorting assumptions.
133 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
135 return strcmp(comm__str(right->comm), comm__str(left->comm));
139 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
141 return strcmp(comm__str(right->comm), comm__str(left->comm));
145 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
147 return strcmp(comm__str(right->comm), comm__str(left->comm));
150 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
151 size_t size, unsigned int width)
153 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
156 struct sort_entry sort_comm = {
157 .se_header = "Command",
158 .se_cmp = sort__comm_cmp,
159 .se_collapse = sort__comm_collapse,
160 .se_sort = sort__comm_sort,
161 .se_snprintf = hist_entry__comm_snprintf,
162 .se_filter = hist_entry__thread_filter,
163 .se_width_idx = HISTC_COMM,
168 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
170 struct dso *dso_l = map_l ? map_l->dso : NULL;
171 struct dso *dso_r = map_r ? map_r->dso : NULL;
172 const char *dso_name_l, *dso_name_r;
174 if (!dso_l || !dso_r)
175 return cmp_null(dso_r, dso_l);
178 dso_name_l = dso_l->long_name;
179 dso_name_r = dso_r->long_name;
181 dso_name_l = dso_l->short_name;
182 dso_name_r = dso_r->short_name;
185 return strcmp(dso_name_l, dso_name_r);
189 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
191 return _sort__dso_cmp(right->ms.map, left->ms.map);
194 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
195 size_t size, unsigned int width)
197 if (map && map->dso) {
198 const char *dso_name = verbose > 0 ? map->dso->long_name :
199 map->dso->short_name;
200 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
203 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
206 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
207 size_t size, unsigned int width)
209 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
212 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
214 const struct dso *dso = arg;
216 if (type != HIST_FILTER__DSO)
219 return dso && (!he->ms.map || he->ms.map->dso != dso);
222 struct sort_entry sort_dso = {
223 .se_header = "Shared Object",
224 .se_cmp = sort__dso_cmp,
225 .se_snprintf = hist_entry__dso_snprintf,
226 .se_filter = hist_entry__dso_filter,
227 .se_width_idx = HISTC_DSO,
232 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
234 return (int64_t)(right_ip - left_ip);
237 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
239 if (!sym_l || !sym_r)
240 return cmp_null(sym_l, sym_r);
245 if (sym_l->inlined || sym_r->inlined) {
246 int ret = strcmp(sym_l->name, sym_r->name);
250 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
254 if (sym_l->start != sym_r->start)
255 return (int64_t)(sym_r->start - sym_l->start);
257 return (int64_t)(sym_r->end - sym_l->end);
261 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
265 if (!left->ms.sym && !right->ms.sym)
266 return _sort__addr_cmp(left->ip, right->ip);
269 * comparing symbol address alone is not enough since it's a
270 * relative address within a dso.
272 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
273 ret = sort__dso_cmp(left, right);
278 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
282 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
284 if (!left->ms.sym || !right->ms.sym)
285 return cmp_null(left->ms.sym, right->ms.sym);
287 return strcmp(right->ms.sym->name, left->ms.sym->name);
290 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
291 u64 ip, char level, char *bf, size_t size,
294 struct symbol *sym = ms->sym;
295 struct map *map = ms->map;
299 char o = map ? dso__symtab_origin(map->dso) : '!';
300 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
301 BITS_PER_LONG / 4 + 2, ip, o);
304 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
306 if (sym->type == STT_OBJECT) {
307 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
308 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
309 ip - map->unmap_ip(map, sym->start));
311 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
315 ret += repsep_snprintf(bf + ret, size - ret,
319 size_t len = BITS_PER_LONG / 4;
320 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
327 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
329 return _hist_entry__sym_snprintf(&he->ms, he->ip,
330 he->level, bf, size, width);
333 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
335 const char *sym = arg;
337 if (type != HIST_FILTER__SYMBOL)
340 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
343 struct sort_entry sort_sym = {
344 .se_header = "Symbol",
345 .se_cmp = sort__sym_cmp,
346 .se_sort = sort__sym_sort,
347 .se_snprintf = hist_entry__sym_snprintf,
348 .se_filter = hist_entry__sym_filter,
349 .se_width_idx = HISTC_SYMBOL,
354 char *hist_entry__srcline(struct hist_entry *he)
356 return map__srcline(he->ms.map, he->ip, he->ms.sym);
360 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
363 left->srcline = hist_entry__srcline(left);
365 right->srcline = hist_entry__srcline(right);
367 return strcmp(right->srcline, left->srcline);
370 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
371 size_t size, unsigned int width)
374 he->srcline = hist_entry__srcline(he);
376 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
379 struct sort_entry sort_srcline = {
380 .se_header = "Source:Line",
381 .se_cmp = sort__srcline_cmp,
382 .se_snprintf = hist_entry__srcline_snprintf,
383 .se_width_idx = HISTC_SRCLINE,
386 /* --sort srcline_from */
388 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
390 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
394 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
396 if (!left->branch_info->srcline_from)
397 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
399 if (!right->branch_info->srcline_from)
400 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
402 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
405 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
406 size_t size, unsigned int width)
408 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
411 struct sort_entry sort_srcline_from = {
412 .se_header = "From Source:Line",
413 .se_cmp = sort__srcline_from_cmp,
414 .se_snprintf = hist_entry__srcline_from_snprintf,
415 .se_width_idx = HISTC_SRCLINE_FROM,
418 /* --sort srcline_to */
421 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
423 if (!left->branch_info->srcline_to)
424 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
426 if (!right->branch_info->srcline_to)
427 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
429 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
432 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
433 size_t size, unsigned int width)
435 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
438 struct sort_entry sort_srcline_to = {
439 .se_header = "To Source:Line",
440 .se_cmp = sort__srcline_to_cmp,
441 .se_snprintf = hist_entry__srcline_to_snprintf,
442 .se_width_idx = HISTC_SRCLINE_TO,
445 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
446 size_t size, unsigned int width)
449 struct symbol *sym = he->ms.sym;
450 struct annotation *notes;
451 double ipc = 0.0, coverage = 0.0;
455 return repsep_snprintf(bf, size, "%-*s", width, "-");
457 notes = symbol__annotation(sym);
459 if (notes->hit_cycles)
460 ipc = notes->hit_insn / ((double)notes->hit_cycles);
462 if (notes->total_insn) {
463 coverage = notes->cover_insn * 100.0 /
464 ((double)notes->total_insn);
467 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
468 return repsep_snprintf(bf, size, "%-*s", width, tmp);
471 struct sort_entry sort_sym_ipc = {
472 .se_header = "IPC [IPC Coverage]",
473 .se_cmp = sort__sym_cmp,
474 .se_snprintf = hist_entry__sym_ipc_snprintf,
475 .se_width_idx = HISTC_SYMBOL_IPC,
478 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
480 char *bf, size_t size,
485 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
486 return repsep_snprintf(bf, size, "%-*s", width, tmp);
489 struct sort_entry sort_sym_ipc_null = {
490 .se_header = "IPC [IPC Coverage]",
491 .se_cmp = sort__sym_cmp,
492 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
493 .se_width_idx = HISTC_SYMBOL_IPC,
498 static char no_srcfile[1];
500 static char *hist_entry__get_srcfile(struct hist_entry *e)
503 struct map *map = e->ms.map;
508 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
509 e->ms.sym, false, true, true, e->ip);
510 if (!strcmp(sf, SRCLINE_UNKNOWN))
522 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
525 left->srcfile = hist_entry__get_srcfile(left);
527 right->srcfile = hist_entry__get_srcfile(right);
529 return strcmp(right->srcfile, left->srcfile);
532 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
533 size_t size, unsigned int width)
536 he->srcfile = hist_entry__get_srcfile(he);
538 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
541 struct sort_entry sort_srcfile = {
542 .se_header = "Source File",
543 .se_cmp = sort__srcfile_cmp,
544 .se_snprintf = hist_entry__srcfile_snprintf,
545 .se_width_idx = HISTC_SRCFILE,
551 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
553 struct symbol *sym_l = left->parent;
554 struct symbol *sym_r = right->parent;
556 if (!sym_l || !sym_r)
557 return cmp_null(sym_l, sym_r);
559 return strcmp(sym_r->name, sym_l->name);
562 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
563 size_t size, unsigned int width)
565 return repsep_snprintf(bf, size, "%-*.*s", width, width,
566 he->parent ? he->parent->name : "[other]");
569 struct sort_entry sort_parent = {
570 .se_header = "Parent symbol",
571 .se_cmp = sort__parent_cmp,
572 .se_snprintf = hist_entry__parent_snprintf,
573 .se_width_idx = HISTC_PARENT,
579 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
581 return right->cpu - left->cpu;
584 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
585 size_t size, unsigned int width)
587 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
590 struct sort_entry sort_cpu = {
592 .se_cmp = sort__cpu_cmp,
593 .se_snprintf = hist_entry__cpu_snprintf,
594 .se_width_idx = HISTC_CPU,
597 /* --sort cgroup_id */
599 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
601 return (int64_t)(right_dev - left_dev);
604 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
606 return (int64_t)(right_ino - left_ino);
610 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
614 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
618 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
619 left->cgroup_id.ino);
622 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
623 char *bf, size_t size,
624 unsigned int width __maybe_unused)
626 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
630 struct sort_entry sort_cgroup_id = {
631 .se_header = "cgroup id (dev/inode)",
632 .se_cmp = sort__cgroup_id_cmp,
633 .se_snprintf = hist_entry__cgroup_id_snprintf,
634 .se_width_idx = HISTC_CGROUP_ID,
640 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
642 return right->socket - left->socket;
645 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
646 size_t size, unsigned int width)
648 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
651 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
653 int sk = *(const int *)arg;
655 if (type != HIST_FILTER__SOCKET)
658 return sk >= 0 && he->socket != sk;
661 struct sort_entry sort_socket = {
662 .se_header = "Socket",
663 .se_cmp = sort__socket_cmp,
664 .se_snprintf = hist_entry__socket_snprintf,
665 .se_filter = hist_entry__socket_filter,
666 .se_width_idx = HISTC_SOCKET,
672 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
674 return right->time - left->time;
677 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
678 size_t size, unsigned int width)
682 if (symbol_conf.nanosecs)
683 timestamp__scnprintf_nsec(he->time, he_time,
686 timestamp__scnprintf_usec(he->time, he_time,
689 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
692 struct sort_entry sort_time = {
694 .se_cmp = sort__time_cmp,
695 .se_snprintf = hist_entry__time_snprintf,
696 .se_width_idx = HISTC_TIME,
701 static char *get_trace_output(struct hist_entry *he)
703 struct trace_seq seq;
705 struct tep_record rec = {
706 .data = he->raw_data,
707 .size = he->raw_size,
710 evsel = hists_to_evsel(he->hists);
712 trace_seq_init(&seq);
713 if (symbol_conf.raw_trace) {
714 tep_print_fields(&seq, he->raw_data, he->raw_size,
717 tep_print_event(evsel->tp_format->tep,
718 &seq, &rec, "%s", TEP_PRINT_INFO);
721 * Trim the buffer, it starts at 4KB and we're not going to
722 * add anything more to this buffer.
724 return realloc(seq.buffer, seq.len + 1);
728 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
732 evsel = hists_to_evsel(left->hists);
733 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
736 if (left->trace_output == NULL)
737 left->trace_output = get_trace_output(left);
738 if (right->trace_output == NULL)
739 right->trace_output = get_trace_output(right);
741 return strcmp(right->trace_output, left->trace_output);
744 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
745 size_t size, unsigned int width)
749 evsel = hists_to_evsel(he->hists);
750 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
751 return scnprintf(bf, size, "%-.*s", width, "N/A");
753 if (he->trace_output == NULL)
754 he->trace_output = get_trace_output(he);
755 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
758 struct sort_entry sort_trace = {
759 .se_header = "Trace output",
760 .se_cmp = sort__trace_cmp,
761 .se_snprintf = hist_entry__trace_snprintf,
762 .se_width_idx = HISTC_TRACE,
765 /* sort keys for branch stacks */
768 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
770 if (!left->branch_info || !right->branch_info)
771 return cmp_null(left->branch_info, right->branch_info);
773 return _sort__dso_cmp(left->branch_info->from.ms.map,
774 right->branch_info->from.ms.map);
777 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
778 size_t size, unsigned int width)
781 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
784 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
787 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
790 const struct dso *dso = arg;
792 if (type != HIST_FILTER__DSO)
795 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
796 he->branch_info->from.ms.map->dso != dso);
800 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
802 if (!left->branch_info || !right->branch_info)
803 return cmp_null(left->branch_info, right->branch_info);
805 return _sort__dso_cmp(left->branch_info->to.ms.map,
806 right->branch_info->to.ms.map);
809 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
810 size_t size, unsigned int width)
813 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
816 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
819 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
822 const struct dso *dso = arg;
824 if (type != HIST_FILTER__DSO)
827 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
828 he->branch_info->to.ms.map->dso != dso);
832 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
834 struct addr_map_symbol *from_l = &left->branch_info->from;
835 struct addr_map_symbol *from_r = &right->branch_info->from;
837 if (!left->branch_info || !right->branch_info)
838 return cmp_null(left->branch_info, right->branch_info);
840 from_l = &left->branch_info->from;
841 from_r = &right->branch_info->from;
843 if (!from_l->ms.sym && !from_r->ms.sym)
844 return _sort__addr_cmp(from_l->addr, from_r->addr);
846 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
850 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
852 struct addr_map_symbol *to_l, *to_r;
854 if (!left->branch_info || !right->branch_info)
855 return cmp_null(left->branch_info, right->branch_info);
857 to_l = &left->branch_info->to;
858 to_r = &right->branch_info->to;
860 if (!to_l->ms.sym && !to_r->ms.sym)
861 return _sort__addr_cmp(to_l->addr, to_r->addr);
863 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
866 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
867 size_t size, unsigned int width)
869 if (he->branch_info) {
870 struct addr_map_symbol *from = &he->branch_info->from;
872 return _hist_entry__sym_snprintf(&from->ms, from->addr, he->level, bf, size, width);
875 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
878 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
879 size_t size, unsigned int width)
881 if (he->branch_info) {
882 struct addr_map_symbol *to = &he->branch_info->to;
884 return _hist_entry__sym_snprintf(&to->ms, to->addr, he->level, bf, size, width);
887 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
890 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
893 const char *sym = arg;
895 if (type != HIST_FILTER__SYMBOL)
898 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
899 strstr(he->branch_info->from.ms.sym->name, sym));
902 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
905 const char *sym = arg;
907 if (type != HIST_FILTER__SYMBOL)
910 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
911 strstr(he->branch_info->to.ms.sym->name, sym));
914 struct sort_entry sort_dso_from = {
915 .se_header = "Source Shared Object",
916 .se_cmp = sort__dso_from_cmp,
917 .se_snprintf = hist_entry__dso_from_snprintf,
918 .se_filter = hist_entry__dso_from_filter,
919 .se_width_idx = HISTC_DSO_FROM,
922 struct sort_entry sort_dso_to = {
923 .se_header = "Target Shared Object",
924 .se_cmp = sort__dso_to_cmp,
925 .se_snprintf = hist_entry__dso_to_snprintf,
926 .se_filter = hist_entry__dso_to_filter,
927 .se_width_idx = HISTC_DSO_TO,
930 struct sort_entry sort_sym_from = {
931 .se_header = "Source Symbol",
932 .se_cmp = sort__sym_from_cmp,
933 .se_snprintf = hist_entry__sym_from_snprintf,
934 .se_filter = hist_entry__sym_from_filter,
935 .se_width_idx = HISTC_SYMBOL_FROM,
938 struct sort_entry sort_sym_to = {
939 .se_header = "Target Symbol",
940 .se_cmp = sort__sym_to_cmp,
941 .se_snprintf = hist_entry__sym_to_snprintf,
942 .se_filter = hist_entry__sym_to_filter,
943 .se_width_idx = HISTC_SYMBOL_TO,
947 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
951 if (!left->branch_info || !right->branch_info)
952 return cmp_null(left->branch_info, right->branch_info);
954 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
955 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
959 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
960 size_t size, unsigned int width){
961 static const char *out = "N/A";
963 if (he->branch_info) {
964 if (he->branch_info->flags.predicted)
966 else if (he->branch_info->flags.mispred)
970 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
974 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
976 if (!left->branch_info || !right->branch_info)
977 return cmp_null(left->branch_info, right->branch_info);
979 return left->branch_info->flags.cycles -
980 right->branch_info->flags.cycles;
983 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
984 size_t size, unsigned int width)
986 if (!he->branch_info)
987 return scnprintf(bf, size, "%-.*s", width, "N/A");
988 if (he->branch_info->flags.cycles == 0)
989 return repsep_snprintf(bf, size, "%-*s", width, "-");
990 return repsep_snprintf(bf, size, "%-*hd", width,
991 he->branch_info->flags.cycles);
994 struct sort_entry sort_cycles = {
995 .se_header = "Basic Block Cycles",
996 .se_cmp = sort__cycles_cmp,
997 .se_snprintf = hist_entry__cycles_snprintf,
998 .se_width_idx = HISTC_CYCLES,
1001 /* --sort daddr_sym */
1003 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1005 uint64_t l = 0, r = 0;
1008 l = left->mem_info->daddr.addr;
1009 if (right->mem_info)
1010 r = right->mem_info->daddr.addr;
1012 return (int64_t)(r - l);
1015 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1016 size_t size, unsigned int width)
1019 struct map_symbol *ms = NULL;
1022 addr = he->mem_info->daddr.addr;
1023 ms = &he->mem_info->daddr.ms;
1025 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1029 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1031 uint64_t l = 0, r = 0;
1034 l = left->mem_info->iaddr.addr;
1035 if (right->mem_info)
1036 r = right->mem_info->iaddr.addr;
1038 return (int64_t)(r - l);
1041 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1042 size_t size, unsigned int width)
1045 struct map_symbol *ms = NULL;
1048 addr = he->mem_info->iaddr.addr;
1049 ms = &he->mem_info->iaddr.ms;
1051 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1055 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1057 struct map *map_l = NULL;
1058 struct map *map_r = NULL;
1061 map_l = left->mem_info->daddr.ms.map;
1062 if (right->mem_info)
1063 map_r = right->mem_info->daddr.ms.map;
1065 return _sort__dso_cmp(map_l, map_r);
1068 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1069 size_t size, unsigned int width)
1071 struct map *map = NULL;
1074 map = he->mem_info->daddr.ms.map;
1076 return _hist_entry__dso_snprintf(map, bf, size, width);
1080 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1082 union perf_mem_data_src data_src_l;
1083 union perf_mem_data_src data_src_r;
1086 data_src_l = left->mem_info->data_src;
1088 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1090 if (right->mem_info)
1091 data_src_r = right->mem_info->data_src;
1093 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1095 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1098 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1099 size_t size, unsigned int width)
1103 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1104 return repsep_snprintf(bf, size, "%.*s", width, out);
1108 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1110 union perf_mem_data_src data_src_l;
1111 union perf_mem_data_src data_src_r;
1114 data_src_l = left->mem_info->data_src;
1116 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1118 if (right->mem_info)
1119 data_src_r = right->mem_info->data_src;
1121 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1123 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1126 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1127 size_t size, unsigned int width)
1131 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1132 return repsep_snprintf(bf, size, "%-*s", width, out);
1136 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1138 union perf_mem_data_src data_src_l;
1139 union perf_mem_data_src data_src_r;
1142 data_src_l = left->mem_info->data_src;
1144 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1146 if (right->mem_info)
1147 data_src_r = right->mem_info->data_src;
1149 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1151 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1154 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1155 size_t size, unsigned int width)
1159 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1160 return repsep_snprintf(bf, size, "%-*s", width, out);
1164 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1166 union perf_mem_data_src data_src_l;
1167 union perf_mem_data_src data_src_r;
1170 data_src_l = left->mem_info->data_src;
1172 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1174 if (right->mem_info)
1175 data_src_r = right->mem_info->data_src;
1177 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1179 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1182 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1183 size_t size, unsigned int width)
1187 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1188 return repsep_snprintf(bf, size, "%-*s", width, out);
1192 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1195 struct map *l_map, *r_map;
1198 if (!left->mem_info) return -1;
1199 if (!right->mem_info) return 1;
1201 /* group event types together */
1202 if (left->cpumode > right->cpumode) return -1;
1203 if (left->cpumode < right->cpumode) return 1;
1205 l_map = left->mem_info->daddr.ms.map;
1206 r_map = right->mem_info->daddr.ms.map;
1208 /* if both are NULL, jump to sort on al_addr instead */
1209 if (!l_map && !r_map)
1212 if (!l_map) return -1;
1213 if (!r_map) return 1;
1215 rc = dso__cmp_id(l_map->dso, r_map->dso);
1219 * Addresses with no major/minor numbers are assumed to be
1220 * anonymous in userspace. Sort those on pid then address.
1222 * The kernel and non-zero major/minor mapped areas are
1223 * assumed to be unity mapped. Sort those on address.
1226 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1227 (!(l_map->flags & MAP_SHARED)) &&
1228 !l_map->dso->id.maj && !l_map->dso->id.min &&
1229 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
1230 /* userspace anonymous */
1232 if (left->thread->pid_ > right->thread->pid_) return -1;
1233 if (left->thread->pid_ < right->thread->pid_) return 1;
1237 /* al_addr does all the right addr - start + offset calculations */
1238 l = cl_address(left->mem_info->daddr.al_addr);
1239 r = cl_address(right->mem_info->daddr.al_addr);
1241 if (l > r) return -1;
1242 if (l < r) return 1;
1247 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1248 size_t size, unsigned int width)
1252 struct map_symbol *ms = NULL;
1253 char level = he->level;
1256 struct map *map = he->mem_info->daddr.ms.map;
1258 addr = cl_address(he->mem_info->daddr.al_addr);
1259 ms = &he->mem_info->daddr.ms;
1261 /* print [s] for shared data mmaps */
1262 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1263 map && !(map->prot & PROT_EXEC) &&
1264 (map->flags & MAP_SHARED) &&
1265 (map->dso->id.maj || map->dso->id.min ||
1266 map->dso->id.ino || map->dso->id.ino_generation))
1271 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1274 struct sort_entry sort_mispredict = {
1275 .se_header = "Branch Mispredicted",
1276 .se_cmp = sort__mispredict_cmp,
1277 .se_snprintf = hist_entry__mispredict_snprintf,
1278 .se_width_idx = HISTC_MISPREDICT,
1281 static u64 he_weight(struct hist_entry *he)
1283 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1287 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1289 return he_weight(left) - he_weight(right);
1292 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1293 size_t size, unsigned int width)
1295 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1298 struct sort_entry sort_local_weight = {
1299 .se_header = "Local Weight",
1300 .se_cmp = sort__local_weight_cmp,
1301 .se_snprintf = hist_entry__local_weight_snprintf,
1302 .se_width_idx = HISTC_LOCAL_WEIGHT,
1306 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1308 return left->stat.weight - right->stat.weight;
1311 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1312 size_t size, unsigned int width)
1314 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1317 struct sort_entry sort_global_weight = {
1318 .se_header = "Weight",
1319 .se_cmp = sort__global_weight_cmp,
1320 .se_snprintf = hist_entry__global_weight_snprintf,
1321 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1324 struct sort_entry sort_mem_daddr_sym = {
1325 .se_header = "Data Symbol",
1326 .se_cmp = sort__daddr_cmp,
1327 .se_snprintf = hist_entry__daddr_snprintf,
1328 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1331 struct sort_entry sort_mem_iaddr_sym = {
1332 .se_header = "Code Symbol",
1333 .se_cmp = sort__iaddr_cmp,
1334 .se_snprintf = hist_entry__iaddr_snprintf,
1335 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1338 struct sort_entry sort_mem_daddr_dso = {
1339 .se_header = "Data Object",
1340 .se_cmp = sort__dso_daddr_cmp,
1341 .se_snprintf = hist_entry__dso_daddr_snprintf,
1342 .se_width_idx = HISTC_MEM_DADDR_DSO,
1345 struct sort_entry sort_mem_locked = {
1346 .se_header = "Locked",
1347 .se_cmp = sort__locked_cmp,
1348 .se_snprintf = hist_entry__locked_snprintf,
1349 .se_width_idx = HISTC_MEM_LOCKED,
1352 struct sort_entry sort_mem_tlb = {
1353 .se_header = "TLB access",
1354 .se_cmp = sort__tlb_cmp,
1355 .se_snprintf = hist_entry__tlb_snprintf,
1356 .se_width_idx = HISTC_MEM_TLB,
1359 struct sort_entry sort_mem_lvl = {
1360 .se_header = "Memory access",
1361 .se_cmp = sort__lvl_cmp,
1362 .se_snprintf = hist_entry__lvl_snprintf,
1363 .se_width_idx = HISTC_MEM_LVL,
1366 struct sort_entry sort_mem_snoop = {
1367 .se_header = "Snoop",
1368 .se_cmp = sort__snoop_cmp,
1369 .se_snprintf = hist_entry__snoop_snprintf,
1370 .se_width_idx = HISTC_MEM_SNOOP,
1373 struct sort_entry sort_mem_dcacheline = {
1374 .se_header = "Data Cacheline",
1375 .se_cmp = sort__dcacheline_cmp,
1376 .se_snprintf = hist_entry__dcacheline_snprintf,
1377 .se_width_idx = HISTC_MEM_DCACHELINE,
1381 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1383 uint64_t l = 0, r = 0;
1386 l = left->mem_info->daddr.phys_addr;
1387 if (right->mem_info)
1388 r = right->mem_info->daddr.phys_addr;
1390 return (int64_t)(r - l);
1393 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1394 size_t size, unsigned int width)
1398 size_t len = BITS_PER_LONG / 4;
1400 addr = he->mem_info->daddr.phys_addr;
1402 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1404 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1406 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1414 struct sort_entry sort_mem_phys_daddr = {
1415 .se_header = "Data Physical Address",
1416 .se_cmp = sort__phys_daddr_cmp,
1417 .se_snprintf = hist_entry__phys_daddr_snprintf,
1418 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1422 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1424 if (!left->branch_info || !right->branch_info)
1425 return cmp_null(left->branch_info, right->branch_info);
1427 return left->branch_info->flags.abort !=
1428 right->branch_info->flags.abort;
1431 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1432 size_t size, unsigned int width)
1434 static const char *out = "N/A";
1436 if (he->branch_info) {
1437 if (he->branch_info->flags.abort)
1443 return repsep_snprintf(bf, size, "%-*s", width, out);
1446 struct sort_entry sort_abort = {
1447 .se_header = "Transaction abort",
1448 .se_cmp = sort__abort_cmp,
1449 .se_snprintf = hist_entry__abort_snprintf,
1450 .se_width_idx = HISTC_ABORT,
1454 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1456 if (!left->branch_info || !right->branch_info)
1457 return cmp_null(left->branch_info, right->branch_info);
1459 return left->branch_info->flags.in_tx !=
1460 right->branch_info->flags.in_tx;
1463 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1464 size_t size, unsigned int width)
1466 static const char *out = "N/A";
1468 if (he->branch_info) {
1469 if (he->branch_info->flags.in_tx)
1475 return repsep_snprintf(bf, size, "%-*s", width, out);
1478 struct sort_entry sort_in_tx = {
1479 .se_header = "Branch in transaction",
1480 .se_cmp = sort__in_tx_cmp,
1481 .se_snprintf = hist_entry__in_tx_snprintf,
1482 .se_width_idx = HISTC_IN_TX,
1486 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1488 return left->transaction - right->transaction;
1491 static inline char *add_str(char *p, const char *str)
1494 return p + strlen(str);
1497 static struct txbit {
1502 { PERF_TXN_ELISION, "EL ", 0 },
1503 { PERF_TXN_TRANSACTION, "TX ", 1 },
1504 { PERF_TXN_SYNC, "SYNC ", 1 },
1505 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1506 { PERF_TXN_RETRY, "RETRY ", 0 },
1507 { PERF_TXN_CONFLICT, "CON ", 0 },
1508 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1509 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1513 int hist_entry__transaction_len(void)
1518 for (i = 0; txbits[i].name; i++) {
1519 if (!txbits[i].skip_for_len)
1520 len += strlen(txbits[i].name);
1522 len += 4; /* :XX<space> */
1526 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1527 size_t size, unsigned int width)
1529 u64 t = he->transaction;
1535 for (i = 0; txbits[i].name; i++)
1536 if (txbits[i].flag & t)
1537 p = add_str(p, txbits[i].name);
1538 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1539 p = add_str(p, "NEITHER ");
1540 if (t & PERF_TXN_ABORT_MASK) {
1541 sprintf(p, ":%" PRIx64,
1542 (t & PERF_TXN_ABORT_MASK) >>
1543 PERF_TXN_ABORT_SHIFT);
1547 return repsep_snprintf(bf, size, "%-*s", width, buf);
1550 struct sort_entry sort_transaction = {
1551 .se_header = "Transaction ",
1552 .se_cmp = sort__transaction_cmp,
1553 .se_snprintf = hist_entry__transaction_snprintf,
1554 .se_width_idx = HISTC_TRANSACTION,
1557 /* --sort symbol_size */
1559 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1561 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1562 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1564 return size_l < size_r ? -1 :
1565 size_l == size_r ? 0 : 1;
1569 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1571 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1574 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1575 size_t bf_size, unsigned int width)
1578 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1580 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1583 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1584 size_t size, unsigned int width)
1586 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1589 struct sort_entry sort_sym_size = {
1590 .se_header = "Symbol size",
1591 .se_cmp = sort__sym_size_cmp,
1592 .se_snprintf = hist_entry__sym_size_snprintf,
1593 .se_width_idx = HISTC_SYM_SIZE,
1596 /* --sort dso_size */
1598 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1600 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1601 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1603 return size_l < size_r ? -1 :
1604 size_l == size_r ? 0 : 1;
1608 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1610 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1613 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1614 size_t bf_size, unsigned int width)
1616 if (map && map->dso)
1617 return repsep_snprintf(bf, bf_size, "%*d", width,
1620 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1623 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1624 size_t size, unsigned int width)
1626 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1629 struct sort_entry sort_dso_size = {
1630 .se_header = "DSO size",
1631 .se_cmp = sort__dso_size_cmp,
1632 .se_snprintf = hist_entry__dso_size_snprintf,
1633 .se_width_idx = HISTC_DSO_SIZE,
1637 struct sort_dimension {
1639 struct sort_entry *entry;
1643 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1645 static struct sort_dimension common_sort_dimensions[] = {
1646 DIM(SORT_PID, "pid", sort_thread),
1647 DIM(SORT_COMM, "comm", sort_comm),
1648 DIM(SORT_DSO, "dso", sort_dso),
1649 DIM(SORT_SYM, "symbol", sort_sym),
1650 DIM(SORT_PARENT, "parent", sort_parent),
1651 DIM(SORT_CPU, "cpu", sort_cpu),
1652 DIM(SORT_SOCKET, "socket", sort_socket),
1653 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1654 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1655 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1656 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1657 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1658 DIM(SORT_TRACE, "trace", sort_trace),
1659 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1660 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1661 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1662 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1663 DIM(SORT_TIME, "time", sort_time),
1668 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1670 static struct sort_dimension bstack_sort_dimensions[] = {
1671 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1672 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1673 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1674 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1675 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1676 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1677 DIM(SORT_ABORT, "abort", sort_abort),
1678 DIM(SORT_CYCLES, "cycles", sort_cycles),
1679 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1680 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1681 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
1686 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1688 static struct sort_dimension memory_sort_dimensions[] = {
1689 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1690 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1691 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1692 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1693 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1694 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1695 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1696 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1697 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1702 struct hpp_dimension {
1704 struct perf_hpp_fmt *fmt;
1708 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1710 static struct hpp_dimension hpp_sort_dimensions[] = {
1711 DIM(PERF_HPP__OVERHEAD, "overhead"),
1712 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1713 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1714 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1715 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1716 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1717 DIM(PERF_HPP__SAMPLES, "sample"),
1718 DIM(PERF_HPP__PERIOD, "period"),
1723 struct hpp_sort_entry {
1724 struct perf_hpp_fmt hpp;
1725 struct sort_entry *se;
1728 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1730 struct hpp_sort_entry *hse;
1732 if (!perf_hpp__is_sort_entry(fmt))
1735 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1736 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1739 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1740 struct hists *hists, int line __maybe_unused,
1741 int *span __maybe_unused)
1743 struct hpp_sort_entry *hse;
1744 size_t len = fmt->user_len;
1746 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1749 len = hists__col_len(hists, hse->se->se_width_idx);
1751 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1754 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1755 struct perf_hpp *hpp __maybe_unused,
1756 struct hists *hists)
1758 struct hpp_sort_entry *hse;
1759 size_t len = fmt->user_len;
1761 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1764 len = hists__col_len(hists, hse->se->se_width_idx);
1769 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1770 struct hist_entry *he)
1772 struct hpp_sort_entry *hse;
1773 size_t len = fmt->user_len;
1775 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1778 len = hists__col_len(he->hists, hse->se->se_width_idx);
1780 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1783 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1784 struct hist_entry *a, struct hist_entry *b)
1786 struct hpp_sort_entry *hse;
1788 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1789 return hse->se->se_cmp(a, b);
1792 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1793 struct hist_entry *a, struct hist_entry *b)
1795 struct hpp_sort_entry *hse;
1796 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1798 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1799 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1800 return collapse_fn(a, b);
1803 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1804 struct hist_entry *a, struct hist_entry *b)
1806 struct hpp_sort_entry *hse;
1807 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1809 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1810 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1811 return sort_fn(a, b);
1814 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1816 return format->header == __sort__hpp_header;
1819 #define MK_SORT_ENTRY_CHK(key) \
1820 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1822 struct hpp_sort_entry *hse; \
1824 if (!perf_hpp__is_sort_entry(fmt)) \
1827 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1828 return hse->se == &sort_ ## key ; \
1831 MK_SORT_ENTRY_CHK(trace)
1832 MK_SORT_ENTRY_CHK(srcline)
1833 MK_SORT_ENTRY_CHK(srcfile)
1834 MK_SORT_ENTRY_CHK(thread)
1835 MK_SORT_ENTRY_CHK(comm)
1836 MK_SORT_ENTRY_CHK(dso)
1837 MK_SORT_ENTRY_CHK(sym)
1840 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1842 struct hpp_sort_entry *hse_a;
1843 struct hpp_sort_entry *hse_b;
1845 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1848 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1849 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1851 return hse_a->se == hse_b->se;
1854 static void hse_free(struct perf_hpp_fmt *fmt)
1856 struct hpp_sort_entry *hse;
1858 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1862 static struct hpp_sort_entry *
1863 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1865 struct hpp_sort_entry *hse;
1867 hse = malloc(sizeof(*hse));
1869 pr_err("Memory allocation failed\n");
1873 hse->se = sd->entry;
1874 hse->hpp.name = sd->entry->se_header;
1875 hse->hpp.header = __sort__hpp_header;
1876 hse->hpp.width = __sort__hpp_width;
1877 hse->hpp.entry = __sort__hpp_entry;
1878 hse->hpp.color = NULL;
1880 hse->hpp.cmp = __sort__hpp_cmp;
1881 hse->hpp.collapse = __sort__hpp_collapse;
1882 hse->hpp.sort = __sort__hpp_sort;
1883 hse->hpp.equal = __sort__hpp_equal;
1884 hse->hpp.free = hse_free;
1886 INIT_LIST_HEAD(&hse->hpp.list);
1887 INIT_LIST_HEAD(&hse->hpp.sort_list);
1888 hse->hpp.elide = false;
1890 hse->hpp.user_len = 0;
1891 hse->hpp.level = level;
1896 static void hpp_free(struct perf_hpp_fmt *fmt)
1901 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1904 struct perf_hpp_fmt *fmt;
1906 fmt = memdup(hd->fmt, sizeof(*fmt));
1908 INIT_LIST_HEAD(&fmt->list);
1909 INIT_LIST_HEAD(&fmt->sort_list);
1910 fmt->free = hpp_free;
1917 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1919 struct perf_hpp_fmt *fmt;
1920 struct hpp_sort_entry *hse;
1924 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1925 if (!perf_hpp__is_sort_entry(fmt))
1928 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1929 if (hse->se->se_filter == NULL)
1933 * hist entry is filtered if any of sort key in the hpp list
1934 * is applied. But it should skip non-matched filter types.
1936 r = hse->se->se_filter(he, type, arg);
1947 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1948 struct perf_hpp_list *list,
1951 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1956 perf_hpp_list__register_sort_field(list, &hse->hpp);
1960 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1961 struct perf_hpp_list *list)
1963 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1968 perf_hpp_list__column_register(list, &hse->hpp);
1972 struct hpp_dynamic_entry {
1973 struct perf_hpp_fmt hpp;
1974 struct evsel *evsel;
1975 struct tep_format_field *field;
1976 unsigned dynamic_len;
1980 static int hde_width(struct hpp_dynamic_entry *hde)
1982 if (!hde->hpp.len) {
1983 int len = hde->dynamic_len;
1984 int namelen = strlen(hde->field->name);
1985 int fieldlen = hde->field->size;
1990 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
1991 /* length for print hex numbers */
1992 fieldlen = hde->field->size * 2 + 2;
1999 return hde->hpp.len;
2002 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2003 struct hist_entry *he)
2006 struct tep_format_field *field = hde->field;
2013 /* parse pretty print result and update max length */
2014 if (!he->trace_output)
2015 he->trace_output = get_trace_output(he);
2017 namelen = strlen(field->name);
2018 str = he->trace_output;
2021 pos = strchr(str, ' ');
2024 pos = str + strlen(str);
2027 if (!strncmp(str, field->name, namelen)) {
2033 if (len > hde->dynamic_len)
2034 hde->dynamic_len = len;
2045 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2046 struct hists *hists __maybe_unused,
2047 int line __maybe_unused,
2048 int *span __maybe_unused)
2050 struct hpp_dynamic_entry *hde;
2051 size_t len = fmt->user_len;
2053 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2056 len = hde_width(hde);
2058 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2061 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2062 struct perf_hpp *hpp __maybe_unused,
2063 struct hists *hists __maybe_unused)
2065 struct hpp_dynamic_entry *hde;
2066 size_t len = fmt->user_len;
2068 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2071 len = hde_width(hde);
2076 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2078 struct hpp_dynamic_entry *hde;
2080 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2082 return hists_to_evsel(hists) == hde->evsel;
2085 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2086 struct hist_entry *he)
2088 struct hpp_dynamic_entry *hde;
2089 size_t len = fmt->user_len;
2091 struct tep_format_field *field;
2096 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2099 len = hde_width(hde);
2104 if (!he->trace_output)
2105 he->trace_output = get_trace_output(he);
2108 namelen = strlen(field->name);
2109 str = he->trace_output;
2112 pos = strchr(str, ' ');
2115 pos = str + strlen(str);
2118 if (!strncmp(str, field->name, namelen)) {
2120 str = strndup(str, pos - str);
2123 return scnprintf(hpp->buf, hpp->size,
2124 "%*.*s", len, len, "ERROR");
2135 struct trace_seq seq;
2137 trace_seq_init(&seq);
2138 tep_print_field(&seq, he->raw_data, hde->field);
2142 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2147 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2148 struct hist_entry *a, struct hist_entry *b)
2150 struct hpp_dynamic_entry *hde;
2151 struct tep_format_field *field;
2152 unsigned offset, size;
2154 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2157 update_dynamic_len(hde, a);
2162 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2163 unsigned long long dyn;
2165 tep_read_number_field(field, a->raw_data, &dyn);
2166 offset = dyn & 0xffff;
2167 size = (dyn >> 16) & 0xffff;
2169 /* record max width for output */
2170 if (size > hde->dynamic_len)
2171 hde->dynamic_len = size;
2173 offset = field->offset;
2177 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2180 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2182 return fmt->cmp == __sort__hde_cmp;
2185 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2187 struct hpp_dynamic_entry *hde_a;
2188 struct hpp_dynamic_entry *hde_b;
2190 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2193 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2194 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2196 return hde_a->field == hde_b->field;
2199 static void hde_free(struct perf_hpp_fmt *fmt)
2201 struct hpp_dynamic_entry *hde;
2203 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2207 static struct hpp_dynamic_entry *
2208 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2211 struct hpp_dynamic_entry *hde;
2213 hde = malloc(sizeof(*hde));
2215 pr_debug("Memory allocation failed\n");
2221 hde->dynamic_len = 0;
2223 hde->hpp.name = field->name;
2224 hde->hpp.header = __sort__hde_header;
2225 hde->hpp.width = __sort__hde_width;
2226 hde->hpp.entry = __sort__hde_entry;
2227 hde->hpp.color = NULL;
2229 hde->hpp.cmp = __sort__hde_cmp;
2230 hde->hpp.collapse = __sort__hde_cmp;
2231 hde->hpp.sort = __sort__hde_cmp;
2232 hde->hpp.equal = __sort__hde_equal;
2233 hde->hpp.free = hde_free;
2235 INIT_LIST_HEAD(&hde->hpp.list);
2236 INIT_LIST_HEAD(&hde->hpp.sort_list);
2237 hde->hpp.elide = false;
2239 hde->hpp.user_len = 0;
2240 hde->hpp.level = level;
2245 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2247 struct perf_hpp_fmt *new_fmt = NULL;
2249 if (perf_hpp__is_sort_entry(fmt)) {
2250 struct hpp_sort_entry *hse, *new_hse;
2252 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2253 new_hse = memdup(hse, sizeof(*hse));
2255 new_fmt = &new_hse->hpp;
2256 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2257 struct hpp_dynamic_entry *hde, *new_hde;
2259 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2260 new_hde = memdup(hde, sizeof(*hde));
2262 new_fmt = &new_hde->hpp;
2264 new_fmt = memdup(fmt, sizeof(*fmt));
2267 INIT_LIST_HEAD(&new_fmt->list);
2268 INIT_LIST_HEAD(&new_fmt->sort_list);
2273 static int parse_field_name(char *str, char **event, char **field, char **opt)
2275 char *event_name, *field_name, *opt_name;
2278 field_name = strchr(str, '.');
2281 *field_name++ = '\0';
2287 opt_name = strchr(field_name, '/');
2291 *event = event_name;
2292 *field = field_name;
2298 /* find match evsel using a given event name. The event name can be:
2299 * 1. '%' + event index (e.g. '%1' for first event)
2300 * 2. full event name (e.g. sched:sched_switch)
2301 * 3. partial event name (should not contain ':')
2303 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2305 struct evsel *evsel = NULL;
2310 if (event_name[0] == '%') {
2311 int nr = strtol(event_name+1, NULL, 0);
2313 if (nr > evlist->core.nr_entries)
2316 evsel = evlist__first(evlist);
2318 evsel = perf_evsel__next(evsel);
2323 full_name = !!strchr(event_name, ':');
2324 evlist__for_each_entry(evlist, pos) {
2326 if (full_name && !strcmp(pos->name, event_name))
2329 if (!full_name && strstr(pos->name, event_name)) {
2331 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2332 event_name, evsel->name, pos->name);
2342 static int __dynamic_dimension__add(struct evsel *evsel,
2343 struct tep_format_field *field,
2344 bool raw_trace, int level)
2346 struct hpp_dynamic_entry *hde;
2348 hde = __alloc_dynamic_entry(evsel, field, level);
2352 hde->raw_trace = raw_trace;
2354 perf_hpp__register_sort_field(&hde->hpp);
2358 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2361 struct tep_format_field *field;
2363 field = evsel->tp_format->format.fields;
2365 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2369 field = field->next;
2374 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2378 struct evsel *evsel;
2380 evlist__for_each_entry(evlist, evsel) {
2381 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2384 ret = add_evsel_fields(evsel, raw_trace, level);
2391 static int add_all_matching_fields(struct evlist *evlist,
2392 char *field_name, bool raw_trace, int level)
2395 struct evsel *evsel;
2396 struct tep_format_field *field;
2398 evlist__for_each_entry(evlist, evsel) {
2399 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2402 field = tep_find_any_field(evsel->tp_format, field_name);
2406 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2413 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2416 char *str, *event_name, *field_name, *opt_name;
2417 struct evsel *evsel;
2418 struct tep_format_field *field;
2419 bool raw_trace = symbol_conf.raw_trace;
2429 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2435 if (strcmp(opt_name, "raw")) {
2436 pr_debug("unsupported field option %s\n", opt_name);
2443 if (!strcmp(field_name, "trace_fields")) {
2444 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2448 if (event_name == NULL) {
2449 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2453 evsel = find_evsel(evlist, event_name);
2454 if (evsel == NULL) {
2455 pr_debug("Cannot find event: %s\n", event_name);
2460 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2461 pr_debug("%s is not a tracepoint event\n", event_name);
2466 if (!strcmp(field_name, "*")) {
2467 ret = add_evsel_fields(evsel, raw_trace, level);
2469 field = tep_find_any_field(evsel->tp_format, field_name);
2470 if (field == NULL) {
2471 pr_debug("Cannot find event field for %s.%s\n",
2472 event_name, field_name);
2476 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2484 static int __sort_dimension__add(struct sort_dimension *sd,
2485 struct perf_hpp_list *list,
2491 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2494 if (sd->entry->se_collapse)
2495 list->need_collapse = 1;
2502 static int __hpp_dimension__add(struct hpp_dimension *hd,
2503 struct perf_hpp_list *list,
2506 struct perf_hpp_fmt *fmt;
2511 fmt = __hpp_dimension__alloc_hpp(hd, level);
2516 perf_hpp_list__register_sort_field(list, fmt);
2520 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2521 struct sort_dimension *sd)
2526 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2533 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2534 struct hpp_dimension *hd)
2536 struct perf_hpp_fmt *fmt;
2541 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2546 perf_hpp_list__column_register(list, fmt);
2550 int hpp_dimension__add_output(unsigned col)
2552 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2553 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2556 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2557 struct evlist *evlist,
2562 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2563 struct sort_dimension *sd = &common_sort_dimensions[i];
2565 if (strncasecmp(tok, sd->name, strlen(tok)))
2568 if (sd->entry == &sort_parent) {
2569 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2573 regerror(ret, &parent_regex, err, sizeof(err));
2574 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2578 } else if (sd->entry == &sort_sym) {
2581 * perf diff displays the performance difference amongst
2582 * two or more perf.data files. Those files could come
2583 * from different binaries. So we should not compare
2584 * their ips, but the name of symbol.
2586 if (sort__mode == SORT_MODE__DIFF)
2587 sd->entry->se_collapse = sort__sym_sort;
2589 } else if (sd->entry == &sort_dso) {
2591 } else if (sd->entry == &sort_socket) {
2593 } else if (sd->entry == &sort_thread) {
2595 } else if (sd->entry == &sort_comm) {
2599 return __sort_dimension__add(sd, list, level);
2602 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2603 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2605 if (strncasecmp(tok, hd->name, strlen(tok)))
2608 return __hpp_dimension__add(hd, list, level);
2611 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2612 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2614 if (strncasecmp(tok, sd->name, strlen(tok)))
2617 if (sort__mode != SORT_MODE__BRANCH)
2620 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2623 __sort_dimension__add(sd, list, level);
2627 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2628 struct sort_dimension *sd = &memory_sort_dimensions[i];
2630 if (strncasecmp(tok, sd->name, strlen(tok)))
2633 if (sort__mode != SORT_MODE__MEMORY)
2636 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2639 if (sd->entry == &sort_mem_daddr_sym)
2642 __sort_dimension__add(sd, list, level);
2646 if (!add_dynamic_entry(evlist, tok, level))
2652 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2653 struct evlist *evlist)
2659 bool in_group = false;
2663 tmp = strpbrk(str, "{}, ");
2668 next_level = level + 1;
2672 else if (*tmp == '}')
2680 ret = sort_dimension__add(list, tok, evlist, level);
2681 if (ret == -EINVAL) {
2682 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2683 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2685 ui__error("Invalid --sort key: `%s'", tok);
2687 } else if (ret == -ESRCH) {
2688 ui__error("Unknown --sort key: `%s'", tok);
2699 static const char *get_default_sort_order(struct evlist *evlist)
2701 const char *default_sort_orders[] = {
2703 default_branch_sort_order,
2704 default_mem_sort_order,
2705 default_top_sort_order,
2706 default_diff_sort_order,
2707 default_tracepoint_sort_order,
2709 bool use_trace = true;
2710 struct evsel *evsel;
2712 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2714 if (evlist == NULL || perf_evlist__empty(evlist))
2717 evlist__for_each_entry(evlist, evsel) {
2718 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2725 sort__mode = SORT_MODE__TRACEPOINT;
2726 if (symbol_conf.raw_trace)
2727 return "trace_fields";
2730 return default_sort_orders[sort__mode];
2733 static int setup_sort_order(struct evlist *evlist)
2735 char *new_sort_order;
2738 * Append '+'-prefixed sort order to the default sort
2741 if (!sort_order || is_strict_order(sort_order))
2744 if (sort_order[1] == '\0') {
2745 ui__error("Invalid --sort key: `+'");
2750 * We allocate new sort_order string, but we never free it,
2751 * because it's checked over the rest of the code.
2753 if (asprintf(&new_sort_order, "%s,%s",
2754 get_default_sort_order(evlist), sort_order + 1) < 0) {
2755 pr_err("Not enough memory to set up --sort");
2759 sort_order = new_sort_order;
2764 * Adds 'pre,' prefix into 'str' is 'pre' is
2765 * not already part of 'str'.
2767 static char *prefix_if_not_in(const char *pre, char *str)
2771 if (!str || strstr(str, pre))
2774 if (asprintf(&n, "%s,%s", pre, str) < 0)
2781 static char *setup_overhead(char *keys)
2783 if (sort__mode == SORT_MODE__DIFF)
2786 keys = prefix_if_not_in("overhead", keys);
2788 if (symbol_conf.cumulate_callchain)
2789 keys = prefix_if_not_in("overhead_children", keys);
2794 static int __setup_sorting(struct evlist *evlist)
2797 const char *sort_keys;
2800 ret = setup_sort_order(evlist);
2804 sort_keys = sort_order;
2805 if (sort_keys == NULL) {
2806 if (is_strict_order(field_order)) {
2808 * If user specified field order but no sort order,
2809 * we'll honor it and not add default sort orders.
2814 sort_keys = get_default_sort_order(evlist);
2817 str = strdup(sort_keys);
2819 pr_err("Not enough memory to setup sort keys");
2824 * Prepend overhead fields for backward compatibility.
2826 if (!is_strict_order(field_order)) {
2827 str = setup_overhead(str);
2829 pr_err("Not enough memory to setup overhead keys");
2834 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2840 void perf_hpp__set_elide(int idx, bool elide)
2842 struct perf_hpp_fmt *fmt;
2843 struct hpp_sort_entry *hse;
2845 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2846 if (!perf_hpp__is_sort_entry(fmt))
2849 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2850 if (hse->se->se_width_idx == idx) {
2857 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2859 if (list && strlist__nr_entries(list) == 1) {
2861 fprintf(fp, "# %s: %s\n", list_name,
2862 strlist__entry(list, 0)->s);
2868 static bool get_elide(int idx, FILE *output)
2872 return __get_elide(symbol_conf.sym_list, "symbol", output);
2874 return __get_elide(symbol_conf.dso_list, "dso", output);
2876 return __get_elide(symbol_conf.comm_list, "comm", output);
2881 if (sort__mode != SORT_MODE__BRANCH)
2885 case HISTC_SYMBOL_FROM:
2886 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2887 case HISTC_SYMBOL_TO:
2888 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2889 case HISTC_DSO_FROM:
2890 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2892 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2900 void sort__setup_elide(FILE *output)
2902 struct perf_hpp_fmt *fmt;
2903 struct hpp_sort_entry *hse;
2905 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2906 if (!perf_hpp__is_sort_entry(fmt))
2909 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2910 fmt->elide = get_elide(hse->se->se_width_idx, output);
2914 * It makes no sense to elide all of sort entries.
2915 * Just revert them to show up again.
2917 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2918 if (!perf_hpp__is_sort_entry(fmt))
2925 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2926 if (!perf_hpp__is_sort_entry(fmt))
2933 int output_field_add(struct perf_hpp_list *list, char *tok)
2937 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2938 struct sort_dimension *sd = &common_sort_dimensions[i];
2940 if (strncasecmp(tok, sd->name, strlen(tok)))
2943 return __sort_dimension__add_output(list, sd);
2946 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2947 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2949 if (strncasecmp(tok, hd->name, strlen(tok)))
2952 return __hpp_dimension__add_output(list, hd);
2955 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2956 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2958 if (strncasecmp(tok, sd->name, strlen(tok)))
2961 if (sort__mode != SORT_MODE__MEMORY)
2964 return __sort_dimension__add_output(list, sd);
2967 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2968 struct sort_dimension *sd = &memory_sort_dimensions[i];
2970 if (strncasecmp(tok, sd->name, strlen(tok)))
2973 if (sort__mode != SORT_MODE__BRANCH)
2976 return __sort_dimension__add_output(list, sd);
2982 static int setup_output_list(struct perf_hpp_list *list, char *str)
2987 for (tok = strtok_r(str, ", ", &tmp);
2988 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2989 ret = output_field_add(list, tok);
2990 if (ret == -EINVAL) {
2991 ui__error("Invalid --fields key: `%s'", tok);
2993 } else if (ret == -ESRCH) {
2994 ui__error("Unknown --fields key: `%s'", tok);
3002 void reset_dimensions(void)
3006 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3007 common_sort_dimensions[i].taken = 0;
3009 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3010 hpp_sort_dimensions[i].taken = 0;
3012 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3013 bstack_sort_dimensions[i].taken = 0;
3015 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3016 memory_sort_dimensions[i].taken = 0;
3019 bool is_strict_order(const char *order)
3021 return order && (*order != '+');
3024 static int __setup_output_field(void)
3029 if (field_order == NULL)
3032 strp = str = strdup(field_order);
3034 pr_err("Not enough memory to setup output fields");
3038 if (!is_strict_order(field_order))
3041 if (!strlen(strp)) {
3042 ui__error("Invalid --fields key: `+'");
3046 ret = setup_output_list(&perf_hpp_list, strp);
3053 int setup_sorting(struct evlist *evlist)
3057 err = __setup_sorting(evlist);
3061 if (parent_pattern != default_parent_pattern) {
3062 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3070 * perf diff doesn't use default hpp output fields.
3072 if (sort__mode != SORT_MODE__DIFF)
3075 err = __setup_output_field();
3079 /* copy sort keys to output fields */
3080 perf_hpp__setup_output_field(&perf_hpp_list);
3081 /* and then copy output fields to sort keys */
3082 perf_hpp__append_sort_keys(&perf_hpp_list);
3084 /* setup hists-specific output fields */
3085 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3091 void reset_output_field(void)
3093 perf_hpp_list.need_collapse = 0;
3094 perf_hpp_list.parent = 0;
3095 perf_hpp_list.sym = 0;
3096 perf_hpp_list.dso = 0;
3102 perf_hpp__reset_output_field(&perf_hpp_list);
3105 #define INDENT (3*8 + 1)
3107 static void add_key(struct strbuf *sb, const char *str, int *llen)
3110 strbuf_addstr(sb, "\n\t\t\t ");
3113 strbuf_addf(sb, " %s", str);
3114 *llen += strlen(str) + 1;
3117 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3122 for (i = 0; i < n; i++)
3123 add_key(sb, s[i].name, llen);
3126 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3131 for (i = 0; i < n; i++)
3132 add_key(sb, s[i].name, llen);
3135 const char *sort_help(const char *prefix)
3139 int len = strlen(prefix) + INDENT;
3141 strbuf_init(&sb, 300);
3142 strbuf_addstr(&sb, prefix);
3143 add_hpp_sort_string(&sb, hpp_sort_dimensions,
3144 ARRAY_SIZE(hpp_sort_dimensions), &len);
3145 add_sort_string(&sb, common_sort_dimensions,
3146 ARRAY_SIZE(common_sort_dimensions), &len);
3147 add_sort_string(&sb, bstack_sort_dimensions,
3148 ARRAY_SIZE(bstack_sort_dimensions), &len);
3149 add_sort_string(&sb, memory_sort_dimensions,
3150 ARRAY_SIZE(memory_sort_dimensions), &len);
3151 s = strbuf_detach(&sb, NULL);
3152 strbuf_release(&sb);