1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/compiler.h>
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
14 #include "../util/thread.h"
15 #include "../util/util.h"
17 /* hist period print (hpp) functions */
19 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
21 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
22 advance_hpp(hpp, __ret); \
26 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
27 hpp_field_fn get_field, const char *fmt, int len,
28 hpp_snprint_fn print_fn, bool fmt_percent)
31 struct hists *hists = he->hists;
32 struct evsel *evsel = hists_to_evsel(hists);
34 size_t size = hpp->size;
38 u64 total = hists__total_period(hists);
41 percent = 100.0 * get_field(he) / total;
43 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
45 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
47 if (evsel__is_group_event(evsel)) {
48 int prev_idx, idx_delta;
49 struct hist_entry *pair;
50 int nr_members = evsel->core.nr_members;
52 prev_idx = evsel__group_idx(evsel);
54 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
55 u64 period = get_field(pair);
56 u64 total = hists__total_period(pair->hists);
61 evsel = hists_to_evsel(pair->hists);
62 idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
66 * zero-fill group members in the middle which
70 ret += hpp__call_print_fn(hpp, print_fn,
73 ret += hpp__call_print_fn(hpp, print_fn,
79 ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
80 100.0 * period / total);
82 ret += hpp__call_print_fn(hpp, print_fn, fmt,
86 prev_idx = evsel__group_idx(evsel);
89 idx_delta = nr_members - prev_idx - 1;
93 * zero-fill group members at last which have no sample
96 ret += hpp__call_print_fn(hpp, print_fn,
99 ret += hpp__call_print_fn(hpp, print_fn,
106 * Restore original buf and size as it's where caller expects
107 * the result will be saved.
115 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
116 struct hist_entry *he, hpp_field_fn get_field,
117 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
119 int len = fmt->user_len ?: fmt->len;
121 if (symbol_conf.field_sep) {
122 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
123 print_fn, fmt_percent);
127 len -= 2; /* 2 for a space and a % sign */
131 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
134 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
135 struct hist_entry *he, hpp_field_fn get_field,
136 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
138 if (!symbol_conf.cumulate_callchain) {
139 int len = fmt->user_len ?: fmt->len;
140 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
143 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
146 static int field_cmp(u64 field_a, u64 field_b)
148 if (field_a > field_b)
150 if (field_a < field_b)
155 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
156 hpp_field_fn get_field, int nr_members,
157 u64 **fields_a, u64 **fields_b)
159 u64 *fa = calloc(nr_members, sizeof(*fa)),
160 *fb = calloc(nr_members, sizeof(*fb));
161 struct hist_entry *pair;
166 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
167 struct evsel *evsel = hists_to_evsel(pair->hists);
168 fa[evsel__group_idx(evsel)] = get_field(pair);
171 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
172 struct evsel *evsel = hists_to_evsel(pair->hists);
173 fb[evsel__group_idx(evsel)] = get_field(pair);
182 *fields_a = *fields_b = NULL;
186 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
187 hpp_field_fn get_field, int idx)
189 struct evsel *evsel = hists_to_evsel(a->hists);
190 u64 *fields_a, *fields_b;
191 int cmp, nr_members, ret, i;
193 cmp = field_cmp(get_field(a), get_field(b));
194 if (!evsel__is_group_event(evsel))
197 nr_members = evsel->core.nr_members;
198 if (idx < 1 || idx >= nr_members)
201 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
207 ret = field_cmp(fields_a[idx], fields_b[idx]);
211 for (i = 1; i < nr_members; i++) {
213 ret = field_cmp(fields_a[i], fields_b[i]);
226 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
227 hpp_field_fn get_field)
232 u64 *fields_a, *fields_b;
234 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
235 return __hpp__group_sort_idx(a, b, get_field,
236 symbol_conf.group_sort_idx);
239 ret = field_cmp(get_field(a), get_field(b));
240 if (ret || !symbol_conf.event_group)
243 evsel = hists_to_evsel(a->hists);
244 if (!evsel__is_group_event(evsel))
247 nr_members = evsel->core.nr_members;
248 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
252 for (i = 1; i < nr_members; i++) {
253 ret = field_cmp(fields_a[i], fields_b[i]);
265 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
266 hpp_field_fn get_field)
270 if (symbol_conf.cumulate_callchain) {
272 * Put caller above callee when they have equal period.
274 ret = field_cmp(get_field(a), get_field(b));
278 if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
279 (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
280 !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
283 ret = b->callchain->max_depth - a->callchain->max_depth;
284 if (callchain_param.order == ORDER_CALLER)
290 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
291 struct perf_hpp *hpp __maybe_unused,
294 int len = fmt->user_len ?: fmt->len;
295 struct evsel *evsel = hists_to_evsel(hists);
297 if (symbol_conf.event_group)
298 len = max(len, evsel->core.nr_members * fmt->len);
300 if (len < (int)strlen(fmt->name))
301 len = strlen(fmt->name);
306 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
307 struct hists *hists, int line __maybe_unused,
308 int *span __maybe_unused)
310 int len = hpp__width_fn(fmt, hpp, hists);
311 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
314 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
317 ssize_t ssize = hpp->size;
322 len = va_arg(args, int);
323 percent = va_arg(args, double);
324 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
327 return (ret >= ssize) ? (ssize - 1) : ret;
330 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
333 ssize_t ssize = hpp->size;
337 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
340 return (ret >= ssize) ? (ssize - 1) : ret;
343 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
344 static u64 he_get_##_field(struct hist_entry *he) \
346 return he->stat._field; \
349 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
350 struct perf_hpp *hpp, struct hist_entry *he) \
352 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
353 hpp_color_scnprintf, true); \
356 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
357 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
358 struct perf_hpp *hpp, struct hist_entry *he) \
360 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
361 hpp_entry_scnprintf, true); \
364 #define __HPP_SORT_FN(_type, _field) \
365 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
366 struct hist_entry *a, struct hist_entry *b) \
368 return __hpp__sort(a, b, he_get_##_field); \
371 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
372 static u64 he_get_acc_##_field(struct hist_entry *he) \
374 return he->stat_acc->_field; \
377 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
378 struct perf_hpp *hpp, struct hist_entry *he) \
380 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
381 hpp_color_scnprintf, true); \
384 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
385 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
386 struct perf_hpp *hpp, struct hist_entry *he) \
388 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
389 hpp_entry_scnprintf, true); \
392 #define __HPP_SORT_ACC_FN(_type, _field) \
393 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
394 struct hist_entry *a, struct hist_entry *b) \
396 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
399 #define __HPP_ENTRY_RAW_FN(_type, _field) \
400 static u64 he_get_raw_##_field(struct hist_entry *he) \
402 return he->stat._field; \
405 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
406 struct perf_hpp *hpp, struct hist_entry *he) \
408 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
409 hpp_entry_scnprintf, false); \
412 #define __HPP_SORT_RAW_FN(_type, _field) \
413 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
414 struct hist_entry *a, struct hist_entry *b) \
416 return __hpp__sort(a, b, he_get_raw_##_field); \
420 #define HPP_PERCENT_FNS(_type, _field) \
421 __HPP_COLOR_PERCENT_FN(_type, _field) \
422 __HPP_ENTRY_PERCENT_FN(_type, _field) \
423 __HPP_SORT_FN(_type, _field)
425 #define HPP_PERCENT_ACC_FNS(_type, _field) \
426 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
427 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
428 __HPP_SORT_ACC_FN(_type, _field)
430 #define HPP_RAW_FNS(_type, _field) \
431 __HPP_ENTRY_RAW_FN(_type, _field) \
432 __HPP_SORT_RAW_FN(_type, _field)
434 HPP_PERCENT_FNS(overhead, period)
435 HPP_PERCENT_FNS(overhead_sys, period_sys)
436 HPP_PERCENT_FNS(overhead_us, period_us)
437 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
438 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
439 HPP_PERCENT_ACC_FNS(overhead_acc, period)
441 HPP_RAW_FNS(samples, nr_events)
442 HPP_RAW_FNS(period, period)
444 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
445 struct hist_entry *a __maybe_unused,
446 struct hist_entry *b __maybe_unused)
451 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
453 return a->header == hpp__header_fn;
456 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
458 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
461 return a->idx == b->idx;
464 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
467 .header = hpp__header_fn, \
468 .width = hpp__width_fn, \
469 .color = hpp__color_ ## _fn, \
470 .entry = hpp__entry_ ## _fn, \
471 .cmp = hpp__nop_cmp, \
472 .collapse = hpp__nop_cmp, \
473 .sort = hpp__sort_ ## _fn, \
474 .idx = PERF_HPP__ ## _idx, \
475 .equal = hpp__equal, \
478 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
481 .header = hpp__header_fn, \
482 .width = hpp__width_fn, \
483 .color = hpp__color_ ## _fn, \
484 .entry = hpp__entry_ ## _fn, \
485 .cmp = hpp__nop_cmp, \
486 .collapse = hpp__nop_cmp, \
487 .sort = hpp__sort_ ## _fn, \
488 .idx = PERF_HPP__ ## _idx, \
489 .equal = hpp__equal, \
492 #define HPP__PRINT_FNS(_name, _fn, _idx) \
495 .header = hpp__header_fn, \
496 .width = hpp__width_fn, \
497 .entry = hpp__entry_ ## _fn, \
498 .cmp = hpp__nop_cmp, \
499 .collapse = hpp__nop_cmp, \
500 .sort = hpp__sort_ ## _fn, \
501 .idx = PERF_HPP__ ## _idx, \
502 .equal = hpp__equal, \
505 struct perf_hpp_fmt perf_hpp__format[] = {
506 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
507 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
508 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
509 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
510 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
511 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
512 HPP__PRINT_FNS("Samples", samples, SAMPLES),
513 HPP__PRINT_FNS("Period", period, PERIOD)
516 struct perf_hpp_list perf_hpp_list = {
517 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
518 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
519 .nr_header_lines = 1,
522 #undef HPP__COLOR_PRINT_FNS
523 #undef HPP__COLOR_ACC_PRINT_FNS
524 #undef HPP__PRINT_FNS
526 #undef HPP_PERCENT_FNS
527 #undef HPP_PERCENT_ACC_FNS
530 #undef __HPP_HEADER_FN
531 #undef __HPP_WIDTH_FN
532 #undef __HPP_COLOR_PERCENT_FN
533 #undef __HPP_ENTRY_PERCENT_FN
534 #undef __HPP_COLOR_ACC_PERCENT_FN
535 #undef __HPP_ENTRY_ACC_PERCENT_FN
536 #undef __HPP_ENTRY_RAW_FN
538 #undef __HPP_SORT_ACC_FN
539 #undef __HPP_SORT_RAW_FN
541 static void fmt_free(struct perf_hpp_fmt *fmt)
544 * At this point fmt should be completely
545 * unhooked, if not it's a bug.
547 BUG_ON(!list_empty(&fmt->list));
548 BUG_ON(!list_empty(&fmt->sort_list));
554 void perf_hpp__init(void)
558 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
559 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
561 INIT_LIST_HEAD(&fmt->list);
563 /* sort_list may be linked by setup_sorting() */
564 if (fmt->sort_list.next == NULL)
565 INIT_LIST_HEAD(&fmt->sort_list);
569 * If user specified field order, no need to setup default fields.
571 if (is_strict_order(field_order))
574 if (symbol_conf.cumulate_callchain) {
575 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
576 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
579 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
581 if (symbol_conf.show_cpu_utilization) {
582 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
583 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
586 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
587 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
591 if (symbol_conf.show_nr_samples)
592 hpp_dimension__add_output(PERF_HPP__SAMPLES);
594 if (symbol_conf.show_total_period)
595 hpp_dimension__add_output(PERF_HPP__PERIOD);
598 void perf_hpp_list__column_register(struct perf_hpp_list *list,
599 struct perf_hpp_fmt *format)
601 list_add_tail(&format->list, &list->fields);
604 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
605 struct perf_hpp_fmt *format)
607 list_add_tail(&format->sort_list, &list->sorts);
610 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
611 struct perf_hpp_fmt *format)
613 list_add(&format->sort_list, &list->sorts);
616 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
618 list_del_init(&format->list);
622 void perf_hpp__cancel_cumulate(void)
624 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
626 if (is_strict_order(field_order))
629 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
630 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
632 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
633 if (acc->equal(acc, fmt)) {
634 perf_hpp__column_unregister(fmt);
638 if (ovh->equal(ovh, fmt))
639 fmt->name = "Overhead";
643 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
645 return a->equal && a->equal(a, b);
648 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
650 struct perf_hpp_fmt *fmt;
652 /* append sort keys to output field */
653 perf_hpp_list__for_each_sort_list(list, fmt) {
654 struct perf_hpp_fmt *pos;
656 /* skip sort-only fields ("sort_compute" in perf diff) */
657 if (!fmt->entry && !fmt->color)
660 perf_hpp_list__for_each_format(list, pos) {
661 if (fmt_equal(fmt, pos))
665 perf_hpp__column_register(fmt);
671 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
673 struct perf_hpp_fmt *fmt;
675 /* append output fields to sort keys */
676 perf_hpp_list__for_each_format(list, fmt) {
677 struct perf_hpp_fmt *pos;
679 perf_hpp_list__for_each_sort_list(list, pos) {
680 if (fmt_equal(fmt, pos))
684 perf_hpp__register_sort_field(fmt);
691 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
693 struct perf_hpp_fmt *fmt, *tmp;
695 /* reset output fields */
696 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
697 list_del_init(&fmt->list);
698 list_del_init(&fmt->sort_list);
702 /* reset sort keys */
703 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
704 list_del_init(&fmt->list);
705 list_del_init(&fmt->sort_list);
711 * See hists__fprintf to match the column widths
713 unsigned int hists__sort_list_width(struct hists *hists)
715 struct perf_hpp_fmt *fmt;
718 struct perf_hpp dummy_hpp;
720 hists__for_each_format(hists, fmt) {
721 if (perf_hpp__should_skip(fmt, hists))
729 ret += fmt->width(fmt, &dummy_hpp, hists);
732 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
733 ret += 3 + BITS_PER_LONG / 4;
738 unsigned int hists__overhead_width(struct hists *hists)
740 struct perf_hpp_fmt *fmt;
743 struct perf_hpp dummy_hpp;
745 hists__for_each_format(hists, fmt) {
746 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
754 ret += fmt->width(fmt, &dummy_hpp, hists);
760 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
762 if (perf_hpp__is_sort_entry(fmt))
763 return perf_hpp__reset_sort_width(fmt, hists);
765 if (perf_hpp__is_dynamic_entry(fmt))
768 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
771 case PERF_HPP__OVERHEAD:
772 case PERF_HPP__OVERHEAD_SYS:
773 case PERF_HPP__OVERHEAD_US:
774 case PERF_HPP__OVERHEAD_ACC:
778 case PERF_HPP__OVERHEAD_GUEST_SYS:
779 case PERF_HPP__OVERHEAD_GUEST_US:
783 case PERF_HPP__SAMPLES:
784 case PERF_HPP__PERIOD:
793 void hists__reset_column_width(struct hists *hists)
795 struct perf_hpp_fmt *fmt;
796 struct perf_hpp_list_node *node;
798 hists__for_each_format(hists, fmt)
799 perf_hpp__reset_width(fmt, hists);
801 /* hierarchy entries have their own hpp list */
802 list_for_each_entry(node, &hists->hpp_formats, list) {
803 perf_hpp_list__for_each_format(&node->hpp, fmt)
804 perf_hpp__reset_width(fmt, hists);
808 void perf_hpp__set_user_width(const char *width_list_str)
810 struct perf_hpp_fmt *fmt;
811 const char *ptr = width_list_str;
813 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
816 int len = strtol(ptr, &p, 10);
826 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
828 struct perf_hpp_list_node *node = NULL;
829 struct perf_hpp_fmt *fmt_copy;
831 bool skip = perf_hpp__should_skip(fmt, hists);
833 list_for_each_entry(node, &hists->hpp_formats, list) {
834 if (node->level == fmt->level) {
841 node = malloc(sizeof(*node));
846 node->level = fmt->level;
847 perf_hpp_list__init(&node->hpp);
849 hists->nr_hpp_node++;
850 list_add_tail(&node->list, &hists->hpp_formats);
853 fmt_copy = perf_hpp_fmt__dup(fmt);
854 if (fmt_copy == NULL)
860 list_add_tail(&fmt_copy->list, &node->hpp.fields);
861 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
866 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
867 struct evlist *evlist)
870 struct perf_hpp_fmt *fmt;
874 if (!symbol_conf.report_hierarchy)
877 evlist__for_each_entry(evlist, evsel) {
878 hists = evsel__hists(evsel);
880 perf_hpp_list__for_each_sort_list(list, fmt) {
881 if (perf_hpp__is_dynamic_entry(fmt) &&
882 !perf_hpp__defined_dynamic_entry(fmt, hists))
885 ret = add_hierarchy_fmt(hists, fmt);