1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/string.h>
6 #include "../../util/callchain.h"
7 #include "../../util/debug.h"
8 #include "../../util/hist.h"
9 #include "../../util/map.h"
10 #include "../../util/map_groups.h"
11 #include "../../util/symbol.h"
12 #include "../../util/sort.h"
13 #include "../../util/evsel.h"
14 #include "../../util/srcline.h"
15 #include "../../util/string2.h"
16 #include "../../util/thread.h"
17 #include <linux/ctype.h>
18 #include <linux/zalloc.h>
20 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
23 int ret = fprintf(fp, " ");
25 for (i = 0; i < left_margin; i++)
26 ret += fprintf(fp, " ");
31 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
35 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
37 for (i = 0; i < depth; i++)
38 if (depth_mask & (1 << i))
39 ret += fprintf(fp, "| ");
41 ret += fprintf(fp, " ");
43 ret += fprintf(fp, "\n");
48 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
49 struct callchain_list *chain,
50 int depth, int depth_mask, int period,
51 u64 total_samples, int left_margin)
55 char bf[1024], *alloc_str = NULL;
59 ret += callchain__fprintf_left_margin(fp, left_margin);
60 for (i = 0; i < depth; i++) {
61 if (depth_mask & (1 << i))
62 ret += fprintf(fp, "|");
64 ret += fprintf(fp, " ");
65 if (!period && i == depth - 1) {
66 ret += fprintf(fp, "--");
67 ret += callchain_node__fprintf_value(node, fp, total_samples);
68 ret += fprintf(fp, "--");
70 ret += fprintf(fp, "%s", " ");
73 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
75 if (symbol_conf.show_branchflag_count) {
76 callchain_list_counts__printf_value(chain, NULL,
79 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
80 str = "Not enough memory!";
92 static struct symbol *rem_sq_bracket;
93 static struct callchain_list rem_hits;
95 static void init_rem_hits(void)
97 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
98 if (!rem_sq_bracket) {
99 fprintf(stderr, "Not enough memory to display remaining hits\n");
103 strcpy(rem_sq_bracket->name, "[...]");
104 rem_hits.ms.sym = rem_sq_bracket;
107 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
108 u64 total_samples, int depth,
109 int depth_mask, int left_margin)
111 struct rb_node *node, *next;
112 struct callchain_node *child = NULL;
113 struct callchain_list *chain;
114 int new_depth_mask = depth_mask;
118 uint entries_printed = 0;
121 remaining = total_samples;
123 node = rb_first(root);
128 child = rb_entry(node, struct callchain_node, rb_node);
129 cumul = callchain_cumul_hits(child);
131 cumul_count += callchain_cumul_counts(child);
134 * The depth mask manages the output of pipes that show
135 * the depth. We don't want to keep the pipes of the current
136 * level for the last child of this depth.
137 * Except if we have remaining filtered hits. They will
138 * supersede the last child
140 next = rb_next(node);
141 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
142 new_depth_mask &= ~(1 << (depth - 1));
145 * But we keep the older depth mask for the line separator
146 * to keep the level link until we reach the last child
148 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
151 list_for_each_entry(chain, &child->val, list) {
152 ret += ipchain__fprintf_graph(fp, child, chain, depth,
158 if (callchain_param.mode == CHAIN_GRAPH_REL)
159 new_total = child->children_hit;
161 new_total = total_samples;
163 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
165 new_depth_mask | (1 << depth),
168 if (++entries_printed == callchain_param.print_limit)
172 if (callchain_param.mode == CHAIN_GRAPH_REL &&
173 remaining && remaining != total_samples) {
174 struct callchain_node rem_node = {
181 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
182 rem_node.count = child->parent->children_count - cumul_count;
183 if (rem_node.count <= 0)
187 new_depth_mask &= ~(1 << (depth - 1));
188 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
189 new_depth_mask, 0, total_samples,
197 * If have one single callchain root, don't bother printing
198 * its percentage (100 % in fractal mode and the same percentage
199 * than the hist in graph mode). This also avoid one level of column.
201 * However when percent-limit applied, it's possible that single callchain
202 * node have different (non-100% in fractal mode) percentage.
204 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
206 struct callchain_node *cnode;
211 cnode = rb_entry(node, struct callchain_node, rb_node);
212 return callchain_cumul_hits(cnode) != parent_samples;
215 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
216 u64 total_samples, u64 parent_samples,
219 struct callchain_node *cnode;
220 struct callchain_list *chain;
221 u32 entries_printed = 0;
222 bool printed = false;
223 struct rb_node *node;
228 node = rb_first(root);
229 if (node && !need_percent_display(node, parent_samples)) {
230 cnode = rb_entry(node, struct callchain_node, rb_node);
231 list_for_each_entry(chain, &cnode->val, list) {
233 * If we sort by symbol, the first entry is the same than
234 * the symbol. No need to print it otherwise it appears as
237 if (!i++ && field_order == NULL &&
238 sort_order && strstarts(sort_order, "sym"))
242 ret += callchain__fprintf_left_margin(fp, left_margin);
243 ret += fprintf(fp, "|\n");
244 ret += callchain__fprintf_left_margin(fp, left_margin);
245 ret += fprintf(fp, "---");
249 ret += callchain__fprintf_left_margin(fp, left_margin);
251 ret += fprintf(fp, "%s",
252 callchain_list__sym_name(chain, bf,
256 if (symbol_conf.show_branchflag_count)
257 ret += callchain_list_counts__printf_value(
259 ret += fprintf(fp, "\n");
261 if (++entries_printed == callchain_param.print_limit)
264 root = &cnode->rb_root;
267 if (callchain_param.mode == CHAIN_GRAPH_REL)
268 total_samples = parent_samples;
270 ret += __callchain__fprintf_graph(fp, root, total_samples,
273 /* do not add a blank line if it printed nothing */
274 ret += fprintf(fp, "\n");
280 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
283 struct callchain_list *chain;
290 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
293 list_for_each_entry(chain, &node->val, list) {
294 if (chain->ip >= PERF_CONTEXT_MAX)
296 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
297 bf, sizeof(bf), false));
303 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
307 u32 entries_printed = 0;
308 struct callchain_node *chain;
309 struct rb_node *rb_node = rb_first(tree);
312 chain = rb_entry(rb_node, struct callchain_node, rb_node);
314 ret += fprintf(fp, " ");
315 ret += callchain_node__fprintf_value(chain, fp, total_samples);
316 ret += fprintf(fp, "\n");
317 ret += __callchain__fprintf_flat(fp, chain, total_samples);
318 ret += fprintf(fp, "\n");
319 if (++entries_printed == callchain_param.print_limit)
322 rb_node = rb_next(rb_node);
328 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
330 const char *sep = symbol_conf.field_sep ?: ";";
331 struct callchain_list *chain;
339 ret += __callchain__fprintf_folded(fp, node->parent);
342 list_for_each_entry(chain, &node->val, list) {
343 if (chain->ip >= PERF_CONTEXT_MAX)
345 ret += fprintf(fp, "%s%s", first ? "" : sep,
346 callchain_list__sym_name(chain,
347 bf, sizeof(bf), false));
354 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
358 u32 entries_printed = 0;
359 struct callchain_node *chain;
360 struct rb_node *rb_node = rb_first(tree);
364 chain = rb_entry(rb_node, struct callchain_node, rb_node);
366 ret += callchain_node__fprintf_value(chain, fp, total_samples);
367 ret += fprintf(fp, " ");
368 ret += __callchain__fprintf_folded(fp, chain);
369 ret += fprintf(fp, "\n");
370 if (++entries_printed == callchain_param.print_limit)
373 rb_node = rb_next(rb_node);
379 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
380 u64 total_samples, int left_margin,
383 u64 parent_samples = he->stat.period;
385 if (symbol_conf.cumulate_callchain)
386 parent_samples = he->stat_acc->period;
388 switch (callchain_param.mode) {
389 case CHAIN_GRAPH_REL:
390 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
391 parent_samples, left_margin);
393 case CHAIN_GRAPH_ABS:
394 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
395 parent_samples, left_margin);
398 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
401 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
406 pr_err("Bad callchain mode\n");
412 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
413 struct perf_hpp_list *hpp_list)
415 const char *sep = symbol_conf.field_sep;
416 struct perf_hpp_fmt *fmt;
417 char *start = hpp->buf;
421 if (symbol_conf.exclude_other && !he->parent)
424 perf_hpp_list__for_each_format(hpp_list, fmt) {
425 if (perf_hpp__should_skip(fmt, he->hists))
429 * If there's no field_sep, we still need
430 * to display initial ' '.
432 if (!sep || !first) {
433 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
434 advance_hpp(hpp, ret);
438 if (perf_hpp__use_color() && fmt->color)
439 ret = fmt->color(fmt, hpp, he);
441 ret = fmt->entry(fmt, hpp, he);
443 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
444 advance_hpp(hpp, ret);
447 return hpp->buf - start;
450 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
452 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
455 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
456 struct perf_hpp *hpp,
460 const char *sep = symbol_conf.field_sep;
461 struct perf_hpp_fmt *fmt;
462 struct perf_hpp_list_node *fmt_node;
463 char *buf = hpp->buf;
464 size_t size = hpp->size;
465 int ret, printed = 0;
468 if (symbol_conf.exclude_other && !he->parent)
471 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
472 advance_hpp(hpp, ret);
474 /* the first hpp_list_node is for overhead columns */
475 fmt_node = list_first_entry(&hists->hpp_formats,
476 struct perf_hpp_list_node, list);
477 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
479 * If there's no field_sep, we still need
480 * to display initial ' '.
482 if (!sep || !first) {
483 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
484 advance_hpp(hpp, ret);
488 if (perf_hpp__use_color() && fmt->color)
489 ret = fmt->color(fmt, hpp, he);
491 ret = fmt->entry(fmt, hpp, he);
493 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
494 advance_hpp(hpp, ret);
498 ret = scnprintf(hpp->buf, hpp->size, "%*s",
499 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
500 advance_hpp(hpp, ret);
502 printed += fprintf(fp, "%s", buf);
504 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
509 * No need to call hist_entry__snprintf_alignment() since this
510 * fmt is always the last column in the hierarchy mode.
512 if (perf_hpp__use_color() && fmt->color)
513 fmt->color(fmt, hpp, he);
515 fmt->entry(fmt, hpp, he);
518 * dynamic entries are right-aligned but we want left-aligned
519 * in the hierarchy mode
521 printed += fprintf(fp, "%s%s", sep ?: " ", skip_spaces(buf));
523 printed += putc('\n', fp);
525 if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
526 u64 total = hists__total_period(hists);
528 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
536 static int hist_entry__block_fprintf(struct hist_entry *he,
537 char *bf, size_t size,
540 struct block_hist *bh = container_of(he, struct block_hist, he);
543 for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
544 struct perf_hpp hpp = {
551 hist_entry__snprintf(he, &hpp);
554 ret += fprintf(fp, "%s\n", bf);
560 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
561 char *bf, size_t bfsz, FILE *fp,
562 bool ignore_callchains)
565 int callchain_ret = 0;
566 struct perf_hpp hpp = {
570 struct hists *hists = he->hists;
571 u64 total_period = hists->stats.total_period;
573 if (size == 0 || size > bfsz)
574 size = hpp.size = bfsz;
576 if (symbol_conf.report_hierarchy)
577 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
579 if (symbol_conf.report_block)
580 return hist_entry__block_fprintf(he, bf, size, fp);
582 hist_entry__snprintf(he, &hpp);
584 ret = fprintf(fp, "%s\n", bf);
586 if (hist_entry__has_callchains(he) && !ignore_callchains)
587 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
590 ret += callchain_ret;
595 static int print_hierarchy_indent(const char *sep, int indent,
596 const char *line, FILE *fp)
600 if (sep != NULL || indent < 2)
603 width = (indent - 2) * HIERARCHY_INDENT;
605 return fprintf(fp, "%-*.*s", width, width, line);
608 static int hists__fprintf_hierarchy_headers(struct hists *hists,
609 struct perf_hpp *hpp, FILE *fp)
611 bool first_node, first_col;
615 unsigned header_width = 0;
616 struct perf_hpp_fmt *fmt;
617 struct perf_hpp_list_node *fmt_node;
618 const char *sep = symbol_conf.field_sep;
620 indent = hists->nr_hpp_node;
622 /* preserve max indent depth for column headers */
623 print_hierarchy_indent(sep, indent, " ", fp);
625 /* the first hpp_list_node is for overhead columns */
626 fmt_node = list_first_entry(&hists->hpp_formats,
627 struct perf_hpp_list_node, list);
629 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
630 fmt->header(fmt, hpp, hists, 0, NULL);
631 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
634 /* combine sort headers with ' / ' */
636 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
638 header_width += fprintf(fp, " / ");
642 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
643 if (perf_hpp__should_skip(fmt, hists))
647 header_width += fprintf(fp, "+");
650 fmt->header(fmt, hpp, hists, 0, NULL);
652 header_width += fprintf(fp, "%s", strim(hpp->buf));
658 /* preserve max indent depth for initial dots */
659 print_hierarchy_indent(sep, indent, dots, fp);
661 /* the first hpp_list_node is for overhead columns */
662 fmt_node = list_first_entry(&hists->hpp_formats,
663 struct perf_hpp_list_node, list);
666 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
668 fprintf(fp, "%s", sep ?: "..");
671 width = fmt->width(fmt, hpp, hists);
672 fprintf(fp, "%.*s", width, dots);
676 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
678 width = depth * HIERARCHY_INDENT;
680 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
681 if (perf_hpp__should_skip(fmt, hists))
685 width++; /* for '+' sign between column header */
688 width += fmt->width(fmt, hpp, hists);
691 if (width > header_width)
692 header_width = width;
697 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
699 fprintf(fp, "\n#\n");
704 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
707 struct perf_hpp_fmt *fmt;
708 const char *sep = symbol_conf.field_sep;
712 hists__for_each_format(hists, fmt) {
713 if (perf_hpp__should_skip(fmt, hists))
717 fprintf(fp, "%s", sep ?: " ");
721 fmt->header(fmt, hpp, hists, line, &span);
724 fprintf(fp, "%s", hpp->buf);
729 hists__fprintf_standard_headers(struct hists *hists,
730 struct perf_hpp *hpp,
733 struct perf_hpp_list *hpp_list = hists->hpp_list;
734 struct perf_hpp_fmt *fmt;
736 const char *sep = symbol_conf.field_sep;
740 for (line = 0; line < hpp_list->nr_header_lines; line++) {
741 /* first # is displayed one level up */
744 fprintf_line(hists, hpp, line, fp);
749 return hpp_list->nr_header_lines;
755 hists__for_each_format(hists, fmt) {
758 if (perf_hpp__should_skip(fmt, hists))
762 fprintf(fp, "%s", sep ?: " ");
766 width = fmt->width(fmt, hpp, hists);
767 for (i = 0; i < width; i++)
773 return hpp_list->nr_header_lines + 2;
776 int hists__fprintf_headers(struct hists *hists, FILE *fp)
779 struct perf_hpp dummy_hpp = {
786 if (symbol_conf.report_hierarchy)
787 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
789 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
793 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
794 int max_cols, float min_pcnt, FILE *fp,
795 bool ignore_callchains)
799 const char *sep = symbol_conf.field_sep;
807 hists__reset_column_width(hists);
809 if (symbol_conf.col_width_list_str)
810 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
813 nr_rows += hists__fprintf_headers(hists, fp);
815 if (max_rows && nr_rows >= max_rows)
818 linesz = hists__sort_list_width(hists) + 3 + 1;
819 linesz += perf_hpp__color_overhead();
820 line = malloc(linesz);
826 indent = hists__overhead_width(hists) + 4;
828 for (nd = rb_first_cached(&hists->entries); nd;
829 nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
830 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
836 percent = hist_entry__get_percent_limit(h);
837 if (percent < min_pcnt)
840 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
842 if (max_rows && ++nr_rows >= max_rows)
846 * If all children are filtered out or percent-limited,
847 * display "no entry >= x.xx%" message.
849 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
850 int depth = hists->nr_hpp_node + h->depth + 1;
852 print_hierarchy_indent(sep, depth, " ", fp);
853 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
855 if (max_rows && ++nr_rows >= max_rows)
859 if (h->ms.map == NULL && verbose > 1) {
860 map_groups__fprintf(h->thread->mg, fp);
861 fprintf(fp, "%.10s end\n", graph_dotted_line);
867 zfree(&rem_sq_bracket);
872 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
877 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
880 name = perf_event__name(i);
881 if (!strcmp(name, "UNKNOWN"))
884 ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);