1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
5 * Handle the callchains from the stream in an ad-hoc radix tree and then
6 * sort them in an rbtree.
8 * Using a radix for code path provides a fast retrieval and factorizes
9 * memory use. Also that lets us use the paths in a hierarchical graph view.
19 #include <linux/string.h>
20 #include <linux/zalloc.h>
30 #include "callchain.h"
35 #define CALLCHAIN_PARAM_DEFAULT \
36 .mode = CHAIN_GRAPH_ABS, \
38 .order = ORDER_CALLEE, \
39 .key = CCKEY_FUNCTION, \
40 .value = CCVAL_PERCENT, \
42 struct callchain_param callchain_param = {
43 CALLCHAIN_PARAM_DEFAULT
47 * Are there any events usind DWARF callchains?
51 * -e cycles/call-graph=dwarf/
53 bool dwarf_callchain_users;
55 struct callchain_param callchain_param_default = {
56 CALLCHAIN_PARAM_DEFAULT
59 __thread struct callchain_cursor callchain_cursor;
61 int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
63 return parse_callchain_record(arg, param);
66 static int parse_callchain_mode(const char *value)
68 if (!strncmp(value, "graph", strlen(value))) {
69 callchain_param.mode = CHAIN_GRAPH_ABS;
72 if (!strncmp(value, "flat", strlen(value))) {
73 callchain_param.mode = CHAIN_FLAT;
76 if (!strncmp(value, "fractal", strlen(value))) {
77 callchain_param.mode = CHAIN_GRAPH_REL;
80 if (!strncmp(value, "folded", strlen(value))) {
81 callchain_param.mode = CHAIN_FOLDED;
87 static int parse_callchain_order(const char *value)
89 if (!strncmp(value, "caller", strlen(value))) {
90 callchain_param.order = ORDER_CALLER;
91 callchain_param.order_set = true;
94 if (!strncmp(value, "callee", strlen(value))) {
95 callchain_param.order = ORDER_CALLEE;
96 callchain_param.order_set = true;
102 static int parse_callchain_sort_key(const char *value)
104 if (!strncmp(value, "function", strlen(value))) {
105 callchain_param.key = CCKEY_FUNCTION;
108 if (!strncmp(value, "address", strlen(value))) {
109 callchain_param.key = CCKEY_ADDRESS;
112 if (!strncmp(value, "srcline", strlen(value))) {
113 callchain_param.key = CCKEY_SRCLINE;
116 if (!strncmp(value, "branch", strlen(value))) {
117 callchain_param.branch_callstack = 1;
123 static int parse_callchain_value(const char *value)
125 if (!strncmp(value, "percent", strlen(value))) {
126 callchain_param.value = CCVAL_PERCENT;
129 if (!strncmp(value, "period", strlen(value))) {
130 callchain_param.value = CCVAL_PERIOD;
133 if (!strncmp(value, "count", strlen(value))) {
134 callchain_param.value = CCVAL_COUNT;
140 static int get_stack_size(const char *str, unsigned long *_size)
144 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
146 size = strtoul(str, &endptr, 0);
152 size = round_up(size, sizeof(u64));
153 if (!size || size > max_size)
161 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
167 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
170 char *endptr, *saveptr = NULL;
171 bool minpcnt_set = false;
172 bool record_opt_set = false;
173 bool try_stack_size = false;
175 callchain_param.enabled = true;
176 symbol_conf.use_callchain = true;
181 while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
182 if (!strncmp(tok, "none", strlen(tok))) {
183 callchain_param.mode = CHAIN_NONE;
184 callchain_param.enabled = false;
185 symbol_conf.use_callchain = false;
189 if (!parse_callchain_mode(tok) ||
190 !parse_callchain_order(tok) ||
191 !parse_callchain_sort_key(tok) ||
192 !parse_callchain_value(tok)) {
193 /* parsing ok - move on to the next */
194 try_stack_size = false;
196 } else if (allow_record_opt && !record_opt_set) {
197 if (parse_callchain_record(tok, &callchain_param))
200 /* assume that number followed by 'dwarf' is stack size */
201 if (callchain_param.record_mode == CALLCHAIN_DWARF)
202 try_stack_size = true;
204 record_opt_set = true;
209 if (try_stack_size) {
210 unsigned long size = 0;
212 if (get_stack_size(tok, &size) < 0)
214 callchain_param.dump_size = size;
215 try_stack_size = false;
216 } else if (!minpcnt_set) {
217 /* try to get the min percent */
218 callchain_param.min_percent = strtod(tok, &endptr);
223 /* try print limit at last */
224 callchain_param.print_limit = strtoul(tok, &endptr, 0);
232 if (callchain_register_param(&callchain_param) < 0) {
233 pr_err("Can't register callchain params\n");
239 int parse_callchain_report_opt(const char *arg)
241 return __parse_callchain_report_opt(arg, false);
244 int parse_callchain_top_opt(const char *arg)
246 return __parse_callchain_report_opt(arg, true);
249 int parse_callchain_record(const char *arg, struct callchain_param *param)
251 char *tok, *name, *saveptr = NULL;
255 /* We need buffer that we know we can write to. */
256 buf = malloc(strlen(arg) + 1);
262 tok = strtok_r((char *)buf, ",", &saveptr);
263 name = tok ? : (char *)buf;
266 /* Framepointer style */
267 if (!strncmp(name, "fp", sizeof("fp"))) {
268 if (!strtok_r(NULL, ",", &saveptr)) {
269 param->record_mode = CALLCHAIN_FP;
272 pr_err("callchain: No more arguments "
273 "needed for --call-graph fp\n");
277 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
278 const unsigned long default_stack_dump_size = 8192;
281 param->record_mode = CALLCHAIN_DWARF;
282 param->dump_size = default_stack_dump_size;
283 dwarf_callchain_users = true;
285 tok = strtok_r(NULL, ",", &saveptr);
287 unsigned long size = 0;
289 ret = get_stack_size(tok, &size);
290 param->dump_size = size;
292 } else if (!strncmp(name, "lbr", sizeof("lbr"))) {
293 if (!strtok_r(NULL, ",", &saveptr)) {
294 param->record_mode = CALLCHAIN_LBR;
297 pr_err("callchain: No more arguments "
298 "needed for --call-graph lbr\n");
301 pr_err("callchain: Unknown --call-graph option "
312 int perf_callchain_config(const char *var, const char *value)
316 if (!strstarts(var, "call-graph."))
318 var += sizeof("call-graph.") - 1;
320 if (!strcmp(var, "record-mode"))
321 return parse_callchain_record_opt(value, &callchain_param);
322 if (!strcmp(var, "dump-size")) {
323 unsigned long size = 0;
326 ret = get_stack_size(value, &size);
327 callchain_param.dump_size = size;
331 if (!strcmp(var, "print-type")){
333 ret = parse_callchain_mode(value);
335 pr_err("Invalid callchain mode: %s\n", value);
338 if (!strcmp(var, "order")){
340 ret = parse_callchain_order(value);
342 pr_err("Invalid callchain order: %s\n", value);
345 if (!strcmp(var, "sort-key")){
347 ret = parse_callchain_sort_key(value);
349 pr_err("Invalid callchain sort key: %s\n", value);
352 if (!strcmp(var, "threshold")) {
353 callchain_param.min_percent = strtod(value, &endptr);
354 if (value == endptr) {
355 pr_err("Invalid callchain threshold: %s\n", value);
359 if (!strcmp(var, "print-limit")) {
360 callchain_param.print_limit = strtod(value, &endptr);
361 if (value == endptr) {
362 pr_err("Invalid callchain print limit: %s\n", value);
371 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
372 enum chain_mode mode)
374 struct rb_node **p = &root->rb_node;
375 struct rb_node *parent = NULL;
376 struct callchain_node *rnode;
377 u64 chain_cumul = callchain_cumul_hits(chain);
383 rnode = rb_entry(parent, struct callchain_node, rb_node);
384 rnode_cumul = callchain_cumul_hits(rnode);
389 if (rnode->hit < chain->hit)
394 case CHAIN_GRAPH_ABS: /* Falldown */
395 case CHAIN_GRAPH_REL:
396 if (rnode_cumul < chain_cumul)
407 rb_link_node(&chain->rb_node, parent, p);
408 rb_insert_color(&chain->rb_node, root);
412 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
416 struct callchain_node *child;
418 n = rb_first(&node->rb_root_in);
420 child = rb_entry(n, struct callchain_node, rb_node_in);
423 __sort_chain_flat(rb_root, child, min_hit);
426 if (node->hit && node->hit >= min_hit)
427 rb_insert_callchain(rb_root, node, CHAIN_FLAT);
431 * Once we get every callchains from the stream, we can now
435 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
436 u64 min_hit, struct callchain_param *param __maybe_unused)
439 __sort_chain_flat(rb_root, &root->node, min_hit);
442 static void __sort_chain_graph_abs(struct callchain_node *node,
446 struct callchain_node *child;
448 node->rb_root = RB_ROOT;
449 n = rb_first(&node->rb_root_in);
452 child = rb_entry(n, struct callchain_node, rb_node_in);
455 __sort_chain_graph_abs(child, min_hit);
456 if (callchain_cumul_hits(child) >= min_hit)
457 rb_insert_callchain(&node->rb_root, child,
463 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
464 u64 min_hit, struct callchain_param *param __maybe_unused)
466 __sort_chain_graph_abs(&chain_root->node, min_hit);
467 rb_root->rb_node = chain_root->node.rb_root.rb_node;
470 static void __sort_chain_graph_rel(struct callchain_node *node,
474 struct callchain_node *child;
477 node->rb_root = RB_ROOT;
478 min_hit = ceil(node->children_hit * min_percent);
480 n = rb_first(&node->rb_root_in);
482 child = rb_entry(n, struct callchain_node, rb_node_in);
485 __sort_chain_graph_rel(child, min_percent);
486 if (callchain_cumul_hits(child) >= min_hit)
487 rb_insert_callchain(&node->rb_root, child,
493 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
494 u64 min_hit __maybe_unused, struct callchain_param *param)
496 __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
497 rb_root->rb_node = chain_root->node.rb_root.rb_node;
500 int callchain_register_param(struct callchain_param *param)
502 switch (param->mode) {
503 case CHAIN_GRAPH_ABS:
504 param->sort = sort_chain_graph_abs;
506 case CHAIN_GRAPH_REL:
507 param->sort = sort_chain_graph_rel;
511 param->sort = sort_chain_flat;
521 * Create a child for a parent. If inherit_children, then the new child
522 * will become the new parent of it's parent children
524 static struct callchain_node *
525 create_child(struct callchain_node *parent, bool inherit_children)
527 struct callchain_node *new;
529 new = zalloc(sizeof(*new));
531 perror("not enough memory to create child for code path tree");
534 new->parent = parent;
535 INIT_LIST_HEAD(&new->val);
536 INIT_LIST_HEAD(&new->parent_val);
538 if (inherit_children) {
540 struct callchain_node *child;
542 new->rb_root_in = parent->rb_root_in;
543 parent->rb_root_in = RB_ROOT;
545 n = rb_first(&new->rb_root_in);
547 child = rb_entry(n, struct callchain_node, rb_node_in);
552 /* make it the first child */
553 rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
554 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
562 * Fill the node with callchain values
565 fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
567 struct callchain_cursor_node *cursor_node;
569 node->val_nr = cursor->nr - cursor->pos;
571 pr_warning("Warning: empty node in callchain tree\n");
573 cursor_node = callchain_cursor_current(cursor);
575 while (cursor_node) {
576 struct callchain_list *call;
578 call = zalloc(sizeof(*call));
580 perror("not enough memory for the code path tree");
583 call->ip = cursor_node->ip;
584 call->ms.sym = cursor_node->sym;
585 call->ms.map = map__get(cursor_node->map);
586 call->srcline = cursor_node->srcline;
588 if (cursor_node->branch) {
589 call->branch_count = 1;
591 if (cursor_node->branch_from) {
593 * branch_from is set with value somewhere else
594 * to imply it's "to" of a branch.
596 call->brtype_stat.branch_to = true;
598 if (cursor_node->branch_flags.predicted)
599 call->predicted_count = 1;
601 if (cursor_node->branch_flags.abort)
602 call->abort_count = 1;
604 branch_type_count(&call->brtype_stat,
605 &cursor_node->branch_flags,
606 cursor_node->branch_from,
610 * It's "from" of a branch
612 call->brtype_stat.branch_to = false;
614 cursor_node->branch_flags.cycles;
615 call->iter_count = cursor_node->nr_loop_iter;
616 call->iter_cycles = cursor_node->iter_cycles;
620 list_add_tail(&call->list, &node->val);
622 callchain_cursor_advance(cursor);
623 cursor_node = callchain_cursor_current(cursor);
628 static struct callchain_node *
629 add_child(struct callchain_node *parent,
630 struct callchain_cursor *cursor,
633 struct callchain_node *new;
635 new = create_child(parent, false);
639 if (fill_node(new, cursor) < 0) {
640 struct callchain_list *call, *tmp;
642 list_for_each_entry_safe(call, tmp, &new->val, list) {
643 list_del_init(&call->list);
644 map__zput(call->ms.map);
651 new->children_hit = 0;
653 new->children_count = 0;
665 static enum match_result match_chain_strings(const char *left,
668 enum match_result ret = MATCH_EQ;
672 cmp = strcmp(left, right);
673 else if (!left && right)
675 else if (left && !right)
681 ret = cmp < 0 ? MATCH_LT : MATCH_GT;
687 * We need to always use relative addresses because we're aggregating
688 * callchains from multiple threads, i.e. different address spaces, so
689 * comparing absolute addresses make no sense as a symbol in a DSO may end up
690 * in a different address when used in a different binary or even the same
691 * binary but with some sort of address randomization technique, thus we need
692 * to compare just relative addresses. -acme
694 static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
695 struct map *right_map, u64 right_ip)
697 struct dso *left_dso = left_map ? left_map->dso : NULL;
698 struct dso *right_dso = right_map ? right_map->dso : NULL;
700 if (left_dso != right_dso)
701 return left_dso < right_dso ? MATCH_LT : MATCH_GT;
703 if (left_ip != right_ip)
704 return left_ip < right_ip ? MATCH_LT : MATCH_GT;
709 static enum match_result match_chain(struct callchain_cursor_node *node,
710 struct callchain_list *cnode)
712 enum match_result match = MATCH_ERROR;
714 switch (callchain_param.key) {
716 match = match_chain_strings(cnode->srcline, node->srcline);
717 if (match != MATCH_ERROR)
719 /* otherwise fall-back to symbol-based comparison below */
722 if (node->sym && cnode->ms.sym) {
724 * Compare inlined frames based on their symbol name
725 * because different inlined frames will have the same
726 * symbol start. Otherwise do a faster comparison based
727 * on the symbol start address.
729 if (cnode->ms.sym->inlined || node->sym->inlined) {
730 match = match_chain_strings(cnode->ms.sym->name,
732 if (match != MATCH_ERROR)
735 match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
736 node->map, node->sym->start);
740 /* otherwise fall-back to IP-based comparison below */
744 match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
748 if (match == MATCH_EQ && node->branch) {
749 cnode->branch_count++;
751 if (node->branch_from) {
753 * It's "to" of a branch
755 cnode->brtype_stat.branch_to = true;
757 if (node->branch_flags.predicted)
758 cnode->predicted_count++;
760 if (node->branch_flags.abort)
761 cnode->abort_count++;
763 branch_type_count(&cnode->brtype_stat,
769 * It's "from" of a branch
771 cnode->brtype_stat.branch_to = false;
772 cnode->cycles_count += node->branch_flags.cycles;
773 cnode->iter_count += node->nr_loop_iter;
774 cnode->iter_cycles += node->iter_cycles;
783 * Split the parent in two parts (a new child is created) and
784 * give a part of its callchain to the created child.
785 * Then create another child to host the given callchain of new branch
788 split_add_child(struct callchain_node *parent,
789 struct callchain_cursor *cursor,
790 struct callchain_list *to_split,
791 u64 idx_parents, u64 idx_local, u64 period)
793 struct callchain_node *new;
794 struct list_head *old_tail;
795 unsigned int idx_total = idx_parents + idx_local;
798 new = create_child(parent, true);
802 /* split the callchain and move a part to the new child */
803 old_tail = parent->val.prev;
804 list_del_range(&to_split->list, old_tail);
805 new->val.next = &to_split->list;
806 new->val.prev = old_tail;
807 to_split->list.prev = &new->val;
808 old_tail->next = &new->val;
811 new->hit = parent->hit;
812 new->children_hit = parent->children_hit;
813 parent->children_hit = callchain_cumul_hits(new);
814 new->val_nr = parent->val_nr - idx_local;
815 parent->val_nr = idx_local;
816 new->count = parent->count;
817 new->children_count = parent->children_count;
818 parent->children_count = callchain_cumul_counts(new);
820 /* create a new child for the new branch if any */
821 if (idx_total < cursor->nr) {
822 struct callchain_node *first;
823 struct callchain_list *cnode;
824 struct callchain_cursor_node *node;
825 struct rb_node *p, **pp;
828 parent->children_hit += period;
830 parent->children_count += 1;
832 node = callchain_cursor_current(cursor);
833 new = add_child(parent, cursor, period);
838 * This is second child since we moved parent's children
839 * to new (first) child above.
841 p = parent->rb_root_in.rb_node;
842 first = rb_entry(p, struct callchain_node, rb_node_in);
843 cnode = list_first_entry(&first->val, struct callchain_list,
846 if (match_chain(node, cnode) == MATCH_LT)
851 rb_link_node(&new->rb_node_in, p, pp);
852 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
854 parent->hit = period;
860 static enum match_result
861 append_chain(struct callchain_node *root,
862 struct callchain_cursor *cursor,
866 append_chain_children(struct callchain_node *root,
867 struct callchain_cursor *cursor,
870 struct callchain_node *rnode;
871 struct callchain_cursor_node *node;
872 struct rb_node **p = &root->rb_root_in.rb_node;
873 struct rb_node *parent = NULL;
875 node = callchain_cursor_current(cursor);
879 /* lookup in childrens */
881 enum match_result ret;
884 rnode = rb_entry(parent, struct callchain_node, rb_node_in);
886 /* If at least first entry matches, rely to children */
887 ret = append_chain(rnode, cursor, period);
889 goto inc_children_hit;
890 if (ret == MATCH_ERROR)
894 p = &parent->rb_left;
896 p = &parent->rb_right;
898 /* nothing in children, add to the current node */
899 rnode = add_child(root, cursor, period);
903 rb_link_node(&rnode->rb_node_in, parent, p);
904 rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
907 root->children_hit += period;
908 root->children_count++;
912 static enum match_result
913 append_chain(struct callchain_node *root,
914 struct callchain_cursor *cursor,
917 struct callchain_list *cnode;
918 u64 start = cursor->pos;
921 enum match_result cmp = MATCH_ERROR;
924 * Lookup in the current node
925 * If we have a symbol, then compare the start to match
926 * anywhere inside a function, unless function
929 list_for_each_entry(cnode, &root->val, list) {
930 struct callchain_cursor_node *node;
932 node = callchain_cursor_current(cursor);
936 cmp = match_chain(node, cnode);
942 callchain_cursor_advance(cursor);
945 /* matches not, relay no the parent */
947 WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
951 matches = cursor->pos - start;
953 /* we match only a part of the node. Split it and add the new chain */
954 if (matches < root->val_nr) {
955 if (split_add_child(root, cursor, cnode, start, matches,
962 /* we match 100% of the path, increment the hit */
963 if (matches == root->val_nr && cursor->pos == cursor->nr) {
969 /* We match the node and still have a part remaining */
970 if (append_chain_children(root, cursor, period) < 0)
976 int callchain_append(struct callchain_root *root,
977 struct callchain_cursor *cursor,
983 callchain_cursor_commit(cursor);
985 if (append_chain_children(&root->node, cursor, period) < 0)
988 if (cursor->nr > root->max_depth)
989 root->max_depth = cursor->nr;
995 merge_chain_branch(struct callchain_cursor *cursor,
996 struct callchain_node *dst, struct callchain_node *src)
998 struct callchain_cursor_node **old_last = cursor->last;
999 struct callchain_node *child;
1000 struct callchain_list *list, *next_list;
1002 int old_pos = cursor->nr;
1005 list_for_each_entry_safe(list, next_list, &src->val, list) {
1006 callchain_cursor_append(cursor, list->ip,
1007 list->ms.map, list->ms.sym,
1008 false, NULL, 0, 0, 0, list->srcline);
1009 list_del_init(&list->list);
1010 map__zput(list->ms.map);
1015 callchain_cursor_commit(cursor);
1016 if (append_chain_children(dst, cursor, src->hit) < 0)
1020 n = rb_first(&src->rb_root_in);
1022 child = container_of(n, struct callchain_node, rb_node_in);
1024 rb_erase(&child->rb_node_in, &src->rb_root_in);
1026 err = merge_chain_branch(cursor, dst, child);
1033 cursor->nr = old_pos;
1034 cursor->last = old_last;
1039 int callchain_merge(struct callchain_cursor *cursor,
1040 struct callchain_root *dst, struct callchain_root *src)
1042 return merge_chain_branch(cursor, &dst->node, &src->node);
1045 int callchain_cursor_append(struct callchain_cursor *cursor,
1046 u64 ip, struct map *map, struct symbol *sym,
1047 bool branch, struct branch_flags *flags,
1048 int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1049 const char *srcline)
1051 struct callchain_cursor_node *node = *cursor->last;
1054 node = calloc(1, sizeof(*node));
1058 *cursor->last = node;
1062 map__zput(node->map);
1063 node->map = map__get(map);
1065 node->branch = branch;
1066 node->nr_loop_iter = nr_loop_iter;
1067 node->iter_cycles = iter_cycles;
1068 node->srcline = srcline;
1071 memcpy(&node->branch_flags, flags,
1072 sizeof(struct branch_flags));
1074 node->branch_from = branch_from;
1077 cursor->last = &node->next;
1082 int sample__resolve_callchain(struct perf_sample *sample,
1083 struct callchain_cursor *cursor, struct symbol **parent,
1084 struct evsel *evsel, struct addr_location *al,
1087 if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1090 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1091 perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1092 return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1093 parent, al, max_stack);
1098 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1100 if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1101 !symbol_conf.show_branchflag_count)
1103 return callchain_append(he->callchain, &callchain_cursor, sample->period);
1106 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1107 bool hide_unresolved)
1109 al->map = node->map;
1110 al->sym = node->sym;
1111 al->srcline = node->srcline;
1112 al->addr = node->ip;
1114 if (al->sym == NULL) {
1115 if (hide_unresolved)
1117 if (al->map == NULL)
1121 if (al->map->groups == &al->machine->kmaps) {
1122 if (machine__is_host(al->machine)) {
1123 al->cpumode = PERF_RECORD_MISC_KERNEL;
1126 al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1130 if (machine__is_host(al->machine)) {
1131 al->cpumode = PERF_RECORD_MISC_USER;
1133 } else if (perf_guest) {
1134 al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1137 al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1146 char *callchain_list__sym_name(struct callchain_list *cl,
1147 char *bf, size_t bfsize, bool show_dso)
1149 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1150 bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1154 const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1156 if (show_srcline && cl->srcline)
1157 printed = scnprintf(bf, bfsize, "%s %s%s",
1158 cl->ms.sym->name, cl->srcline,
1161 printed = scnprintf(bf, bfsize, "%s%s",
1162 cl->ms.sym->name, inlined);
1164 printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1167 scnprintf(bf + printed, bfsize - printed, " %s",
1169 cl->ms.map->dso->short_name :
1175 char *callchain_node__scnprintf_value(struct callchain_node *node,
1176 char *bf, size_t bfsize, u64 total)
1178 double percent = 0.0;
1179 u64 period = callchain_cumul_hits(node);
1180 unsigned count = callchain_cumul_counts(node);
1182 if (callchain_param.mode == CHAIN_FOLDED) {
1184 count = node->count;
1187 switch (callchain_param.value) {
1189 scnprintf(bf, bfsize, "%"PRIu64, period);
1192 scnprintf(bf, bfsize, "%u", count);
1197 percent = period * 100.0 / total;
1198 scnprintf(bf, bfsize, "%.2f%%", percent);
1204 int callchain_node__fprintf_value(struct callchain_node *node,
1205 FILE *fp, u64 total)
1207 double percent = 0.0;
1208 u64 period = callchain_cumul_hits(node);
1209 unsigned count = callchain_cumul_counts(node);
1211 if (callchain_param.mode == CHAIN_FOLDED) {
1213 count = node->count;
1216 switch (callchain_param.value) {
1218 return fprintf(fp, "%"PRIu64, period);
1220 return fprintf(fp, "%u", count);
1224 percent = period * 100.0 / total;
1225 return percent_color_fprintf(fp, "%.2f%%", percent);
1230 static void callchain_counts_value(struct callchain_node *node,
1231 u64 *branch_count, u64 *predicted_count,
1232 u64 *abort_count, u64 *cycles_count)
1234 struct callchain_list *clist;
1236 list_for_each_entry(clist, &node->val, list) {
1238 *branch_count += clist->branch_count;
1240 if (predicted_count)
1241 *predicted_count += clist->predicted_count;
1244 *abort_count += clist->abort_count;
1247 *cycles_count += clist->cycles_count;
1251 static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1253 u64 *predicted_count,
1257 struct callchain_node *child;
1260 n = rb_first(&node->rb_root_in);
1262 child = rb_entry(n, struct callchain_node, rb_node_in);
1265 callchain_node_branch_counts_cumul(child, branch_count,
1270 callchain_counts_value(child, branch_count,
1271 predicted_count, abort_count,
1278 int callchain_branch_counts(struct callchain_root *root,
1279 u64 *branch_count, u64 *predicted_count,
1280 u64 *abort_count, u64 *cycles_count)
1285 if (predicted_count)
1286 *predicted_count = 0;
1294 return callchain_node_branch_counts_cumul(&root->node,
1301 static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1305 printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1310 static int count_float_printf(int idx, const char *str, float value,
1311 char *bf, int bfsize, float threshold)
1315 if (threshold != 0.0 && value < threshold)
1318 printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1323 static int branch_to_str(char *bf, int bfsize,
1324 u64 branch_count, u64 predicted_count,
1326 struct branch_type_stat *brtype_stat)
1330 printed = branch_type_str(brtype_stat, bf, bfsize);
1334 if (predicted_count < branch_count) {
1335 printed += count_float_printf(i++, "predicted",
1336 predicted_count * 100.0 / branch_count,
1337 bf + printed, bfsize - printed, 0.0);
1341 printed += count_float_printf(i++, "abort",
1342 abort_count * 100.0 / branch_count,
1343 bf + printed, bfsize - printed, 0.1);
1347 printed += scnprintf(bf + printed, bfsize - printed, ")");
1352 static int branch_from_str(char *bf, int bfsize,
1354 u64 cycles_count, u64 iter_count,
1355 u64 iter_cycles, u64 from_count)
1357 int printed = 0, i = 0;
1360 cycles = cycles_count / branch_count;
1362 printed += count_pri64_printf(i++, "cycles",
1364 bf + printed, bfsize - printed);
1367 if (iter_count && from_count) {
1368 v = iter_count / from_count;
1370 printed += count_pri64_printf(i++, "iter",
1371 v, bf + printed, bfsize - printed);
1373 printed += count_pri64_printf(i++, "avg_cycles",
1374 iter_cycles / iter_count,
1375 bf + printed, bfsize - printed);
1380 printed += scnprintf(bf + printed, bfsize - printed, ")");
1385 static int counts_str_build(char *bf, int bfsize,
1386 u64 branch_count, u64 predicted_count,
1387 u64 abort_count, u64 cycles_count,
1388 u64 iter_count, u64 iter_cycles,
1390 struct branch_type_stat *brtype_stat)
1394 if (branch_count == 0)
1395 return scnprintf(bf, bfsize, " (calltrace)");
1397 if (brtype_stat->branch_to) {
1398 printed = branch_to_str(bf, bfsize, branch_count,
1399 predicted_count, abort_count, brtype_stat);
1401 printed = branch_from_str(bf, bfsize, branch_count,
1402 cycles_count, iter_count, iter_cycles,
1412 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1413 u64 branch_count, u64 predicted_count,
1414 u64 abort_count, u64 cycles_count,
1415 u64 iter_count, u64 iter_cycles,
1417 struct branch_type_stat *brtype_stat)
1421 counts_str_build(str, sizeof(str), branch_count,
1422 predicted_count, abort_count, cycles_count,
1423 iter_count, iter_cycles, from_count, brtype_stat);
1426 return fprintf(fp, "%s", str);
1428 return scnprintf(bf, bfsize, "%s", str);
1431 int callchain_list_counts__printf_value(struct callchain_list *clist,
1432 FILE *fp, char *bf, int bfsize)
1434 u64 branch_count, predicted_count;
1435 u64 abort_count, cycles_count;
1436 u64 iter_count, iter_cycles;
1439 branch_count = clist->branch_count;
1440 predicted_count = clist->predicted_count;
1441 abort_count = clist->abort_count;
1442 cycles_count = clist->cycles_count;
1443 iter_count = clist->iter_count;
1444 iter_cycles = clist->iter_cycles;
1445 from_count = clist->from_count;
1447 return callchain_counts_printf(fp, bf, bfsize, branch_count,
1448 predicted_count, abort_count,
1449 cycles_count, iter_count, iter_cycles,
1450 from_count, &clist->brtype_stat);
1453 static void free_callchain_node(struct callchain_node *node)
1455 struct callchain_list *list, *tmp;
1456 struct callchain_node *child;
1459 list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1460 list_del_init(&list->list);
1461 map__zput(list->ms.map);
1465 list_for_each_entry_safe(list, tmp, &node->val, list) {
1466 list_del_init(&list->list);
1467 map__zput(list->ms.map);
1471 n = rb_first(&node->rb_root_in);
1473 child = container_of(n, struct callchain_node, rb_node_in);
1475 rb_erase(&child->rb_node_in, &node->rb_root_in);
1477 free_callchain_node(child);
1482 void free_callchain(struct callchain_root *root)
1484 if (!symbol_conf.use_callchain)
1487 free_callchain_node(&root->node);
1490 static u64 decay_callchain_node(struct callchain_node *node)
1492 struct callchain_node *child;
1496 n = rb_first(&node->rb_root_in);
1498 child = container_of(n, struct callchain_node, rb_node_in);
1500 child_hits += decay_callchain_node(child);
1504 node->hit = (node->hit * 7) / 8;
1505 node->children_hit = child_hits;
1510 void decay_callchain(struct callchain_root *root)
1512 if (!symbol_conf.use_callchain)
1515 decay_callchain_node(&root->node);
1518 int callchain_node__make_parent_list(struct callchain_node *node)
1520 struct callchain_node *parent = node->parent;
1521 struct callchain_list *chain, *new;
1525 list_for_each_entry_reverse(chain, &parent->val, list) {
1526 new = malloc(sizeof(*new));
1530 new->has_children = false;
1531 map__get(new->ms.map);
1532 list_add_tail(&new->list, &head);
1534 parent = parent->parent;
1537 list_for_each_entry_safe_reverse(chain, new, &head, list)
1538 list_move_tail(&chain->list, &node->parent_val);
1540 if (!list_empty(&node->parent_val)) {
1541 chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1542 chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1544 chain = list_first_entry(&node->val, struct callchain_list, list);
1545 chain->has_children = false;
1550 list_for_each_entry_safe(chain, new, &head, list) {
1551 list_del_init(&chain->list);
1552 map__zput(chain->ms.map);
1558 int callchain_cursor__copy(struct callchain_cursor *dst,
1559 struct callchain_cursor *src)
1563 callchain_cursor_reset(dst);
1564 callchain_cursor_commit(src);
1567 struct callchain_cursor_node *node;
1569 node = callchain_cursor_current(src);
1573 rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
1574 node->branch, &node->branch_flags,
1577 node->branch_from, node->srcline);
1581 callchain_cursor_advance(src);
1588 * Initialize a cursor before adding entries inside, but keep
1589 * the previously allocated entries as a cache.
1591 void callchain_cursor_reset(struct callchain_cursor *cursor)
1593 struct callchain_cursor_node *node;
1596 cursor->last = &cursor->first;
1598 for (node = cursor->first; node != NULL; node = node->next)
1599 map__zput(node->map);