Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / tools / perf / util / hist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "cgroup.h"
14 #include "sort.h"
15 #include "units.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "annotate.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "thread.h"
22 #include "block-info.h"
23 #include "ui/progress.h"
24 #include <errno.h>
25 #include <math.h>
26 #include <inttypes.h>
27 #include <sys/param.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/time64.h>
31 #include <linux/zalloc.h>
32
33 static bool hists__filter_entry_by_dso(struct hists *hists,
34                                        struct hist_entry *he);
35 static bool hists__filter_entry_by_thread(struct hists *hists,
36                                           struct hist_entry *he);
37 static bool hists__filter_entry_by_symbol(struct hists *hists,
38                                           struct hist_entry *he);
39 static bool hists__filter_entry_by_socket(struct hists *hists,
40                                           struct hist_entry *he);
41
42 u16 hists__col_len(struct hists *hists, enum hist_column col)
43 {
44         return hists->col_len[col];
45 }
46
47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
48 {
49         hists->col_len[col] = len;
50 }
51
52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
53 {
54         if (len > hists__col_len(hists, col)) {
55                 hists__set_col_len(hists, col, len);
56                 return true;
57         }
58         return false;
59 }
60
61 void hists__reset_col_len(struct hists *hists)
62 {
63         enum hist_column col;
64
65         for (col = 0; col < HISTC_NR_COLS; ++col)
66                 hists__set_col_len(hists, col, 0);
67 }
68
69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
70 {
71         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72
73         if (hists__col_len(hists, dso) < unresolved_col_width &&
74             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
75             !symbol_conf.dso_list)
76                 hists__set_col_len(hists, dso, unresolved_col_width);
77 }
78
79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
80 {
81         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
82         int symlen;
83         u16 len;
84
85         if (h->block_info)
86                 return;
87         /*
88          * +4 accounts for '[x] ' priv level info
89          * +2 accounts for 0x prefix on raw addresses
90          * +3 accounts for ' y ' symtab origin info
91          */
92         if (h->ms.sym) {
93                 symlen = h->ms.sym->namelen + 4;
94                 if (verbose > 0)
95                         symlen += BITS_PER_LONG / 4 + 2 + 3;
96                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
97         } else {
98                 symlen = unresolved_col_width + 4 + 2;
99                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
100                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
101         }
102
103         len = thread__comm_len(h->thread);
104         if (hists__new_col_len(hists, HISTC_COMM, len))
105                 hists__set_col_len(hists, HISTC_THREAD, len + 8);
106
107         if (h->ms.map) {
108                 len = dso__name_len(h->ms.map->dso);
109                 hists__new_col_len(hists, HISTC_DSO, len);
110         }
111
112         if (h->parent)
113                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
114
115         if (h->branch_info) {
116                 if (h->branch_info->from.ms.sym) {
117                         symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
118                         if (verbose > 0)
119                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
120                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
121
122                         symlen = dso__name_len(h->branch_info->from.ms.map->dso);
123                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
124                 } else {
125                         symlen = unresolved_col_width + 4 + 2;
126                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
127                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
128                 }
129
130                 if (h->branch_info->to.ms.sym) {
131                         symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
132                         if (verbose > 0)
133                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
134                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
135
136                         symlen = dso__name_len(h->branch_info->to.ms.map->dso);
137                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
138                 } else {
139                         symlen = unresolved_col_width + 4 + 2;
140                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
141                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
142                 }
143
144                 if (h->branch_info->srcline_from)
145                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
146                                         strlen(h->branch_info->srcline_from));
147                 if (h->branch_info->srcline_to)
148                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
149                                         strlen(h->branch_info->srcline_to));
150         }
151
152         if (h->mem_info) {
153                 if (h->mem_info->daddr.ms.sym) {
154                         symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
155                                + unresolved_col_width + 2;
156                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
157                                            symlen);
158                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
159                                            symlen + 1);
160                 } else {
161                         symlen = unresolved_col_width + 4 + 2;
162                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
163                                            symlen);
164                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
165                                            symlen);
166                 }
167
168                 if (h->mem_info->iaddr.ms.sym) {
169                         symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
170                                + unresolved_col_width + 2;
171                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
172                                            symlen);
173                 } else {
174                         symlen = unresolved_col_width + 4 + 2;
175                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
176                                            symlen);
177                 }
178
179                 if (h->mem_info->daddr.ms.map) {
180                         symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
181                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
182                                            symlen);
183                 } else {
184                         symlen = unresolved_col_width + 4 + 2;
185                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
186                 }
187
188                 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
189                                    unresolved_col_width + 4 + 2);
190
191                 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
192                                    unresolved_col_width + 4 + 2);
193
194         } else {
195                 symlen = unresolved_col_width + 4 + 2;
196                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
197                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
198                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
199         }
200
201         hists__new_col_len(hists, HISTC_CGROUP, 6);
202         hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
203         hists__new_col_len(hists, HISTC_CPU, 3);
204         hists__new_col_len(hists, HISTC_SOCKET, 6);
205         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
206         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
207         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
208         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
209         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
210         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
211         if (symbol_conf.nanosecs)
212                 hists__new_col_len(hists, HISTC_TIME, 16);
213         else
214                 hists__new_col_len(hists, HISTC_TIME, 12);
215
216         if (h->srcline) {
217                 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
218                 hists__new_col_len(hists, HISTC_SRCLINE, len);
219         }
220
221         if (h->srcfile)
222                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
223
224         if (h->transaction)
225                 hists__new_col_len(hists, HISTC_TRANSACTION,
226                                    hist_entry__transaction_len());
227
228         if (h->trace_output)
229                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
230
231         if (h->cgroup) {
232                 const char *cgrp_name = "unknown";
233                 struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
234                                                    h->cgroup);
235                 if (cgrp != NULL)
236                         cgrp_name = cgrp->name;
237
238                 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
239         }
240 }
241
242 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
243 {
244         struct rb_node *next = rb_first_cached(&hists->entries);
245         struct hist_entry *n;
246         int row = 0;
247
248         hists__reset_col_len(hists);
249
250         while (next && row++ < max_rows) {
251                 n = rb_entry(next, struct hist_entry, rb_node);
252                 if (!n->filtered)
253                         hists__calc_col_len(hists, n);
254                 next = rb_next(&n->rb_node);
255         }
256 }
257
258 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
259                                         unsigned int cpumode, u64 period)
260 {
261         switch (cpumode) {
262         case PERF_RECORD_MISC_KERNEL:
263                 he_stat->period_sys += period;
264                 break;
265         case PERF_RECORD_MISC_USER:
266                 he_stat->period_us += period;
267                 break;
268         case PERF_RECORD_MISC_GUEST_KERNEL:
269                 he_stat->period_guest_sys += period;
270                 break;
271         case PERF_RECORD_MISC_GUEST_USER:
272                 he_stat->period_guest_us += period;
273                 break;
274         default:
275                 break;
276         }
277 }
278
279 static long hist_time(unsigned long htime)
280 {
281         unsigned long time_quantum = symbol_conf.time_quantum;
282         if (time_quantum)
283                 return (htime / time_quantum) * time_quantum;
284         return htime;
285 }
286
287 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
288                                 u64 weight)
289 {
290
291         he_stat->period         += period;
292         he_stat->weight         += weight;
293         he_stat->nr_events      += 1;
294 }
295
296 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
297 {
298         dest->period            += src->period;
299         dest->period_sys        += src->period_sys;
300         dest->period_us         += src->period_us;
301         dest->period_guest_sys  += src->period_guest_sys;
302         dest->period_guest_us   += src->period_guest_us;
303         dest->nr_events         += src->nr_events;
304         dest->weight            += src->weight;
305 }
306
307 static void he_stat__decay(struct he_stat *he_stat)
308 {
309         he_stat->period = (he_stat->period * 7) / 8;
310         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
311         /* XXX need decay for weight too? */
312 }
313
314 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
315
316 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
317 {
318         u64 prev_period = he->stat.period;
319         u64 diff;
320
321         if (prev_period == 0)
322                 return true;
323
324         he_stat__decay(&he->stat);
325         if (symbol_conf.cumulate_callchain)
326                 he_stat__decay(he->stat_acc);
327         decay_callchain(he->callchain);
328
329         diff = prev_period - he->stat.period;
330
331         if (!he->depth) {
332                 hists->stats.total_period -= diff;
333                 if (!he->filtered)
334                         hists->stats.total_non_filtered_period -= diff;
335         }
336
337         if (!he->leaf) {
338                 struct hist_entry *child;
339                 struct rb_node *node = rb_first_cached(&he->hroot_out);
340                 while (node) {
341                         child = rb_entry(node, struct hist_entry, rb_node);
342                         node = rb_next(node);
343
344                         if (hists__decay_entry(hists, child))
345                                 hists__delete_entry(hists, child);
346                 }
347         }
348
349         return he->stat.period == 0;
350 }
351
352 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
353 {
354         struct rb_root_cached *root_in;
355         struct rb_root_cached *root_out;
356
357         if (he->parent_he) {
358                 root_in  = &he->parent_he->hroot_in;
359                 root_out = &he->parent_he->hroot_out;
360         } else {
361                 if (hists__has(hists, need_collapse))
362                         root_in = &hists->entries_collapsed;
363                 else
364                         root_in = hists->entries_in;
365                 root_out = &hists->entries;
366         }
367
368         rb_erase_cached(&he->rb_node_in, root_in);
369         rb_erase_cached(&he->rb_node, root_out);
370
371         --hists->nr_entries;
372         if (!he->filtered)
373                 --hists->nr_non_filtered_entries;
374
375         hist_entry__delete(he);
376 }
377
378 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
379 {
380         struct rb_node *next = rb_first_cached(&hists->entries);
381         struct hist_entry *n;
382
383         while (next) {
384                 n = rb_entry(next, struct hist_entry, rb_node);
385                 next = rb_next(&n->rb_node);
386                 if (((zap_user && n->level == '.') ||
387                      (zap_kernel && n->level != '.') ||
388                      hists__decay_entry(hists, n))) {
389                         hists__delete_entry(hists, n);
390                 }
391         }
392 }
393
394 void hists__delete_entries(struct hists *hists)
395 {
396         struct rb_node *next = rb_first_cached(&hists->entries);
397         struct hist_entry *n;
398
399         while (next) {
400                 n = rb_entry(next, struct hist_entry, rb_node);
401                 next = rb_next(&n->rb_node);
402
403                 hists__delete_entry(hists, n);
404         }
405 }
406
407 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
408 {
409         struct rb_node *next = rb_first_cached(&hists->entries);
410         struct hist_entry *n;
411         int i = 0;
412
413         while (next) {
414                 n = rb_entry(next, struct hist_entry, rb_node);
415                 if (i == idx)
416                         return n;
417
418                 next = rb_next(&n->rb_node);
419                 i++;
420         }
421
422         return NULL;
423 }
424
425 /*
426  * histogram, sorted on item, collects periods
427  */
428
429 static int hist_entry__init(struct hist_entry *he,
430                             struct hist_entry *template,
431                             bool sample_self,
432                             size_t callchain_size)
433 {
434         *he = *template;
435         he->callchain_size = callchain_size;
436
437         if (symbol_conf.cumulate_callchain) {
438                 he->stat_acc = malloc(sizeof(he->stat));
439                 if (he->stat_acc == NULL)
440                         return -ENOMEM;
441                 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
442                 if (!sample_self)
443                         memset(&he->stat, 0, sizeof(he->stat));
444         }
445
446         map__get(he->ms.map);
447
448         if (he->branch_info) {
449                 /*
450                  * This branch info is (a part of) allocated from
451                  * sample__resolve_bstack() and will be freed after
452                  * adding new entries.  So we need to save a copy.
453                  */
454                 he->branch_info = malloc(sizeof(*he->branch_info));
455                 if (he->branch_info == NULL)
456                         goto err;
457
458                 memcpy(he->branch_info, template->branch_info,
459                        sizeof(*he->branch_info));
460
461                 map__get(he->branch_info->from.ms.map);
462                 map__get(he->branch_info->to.ms.map);
463         }
464
465         if (he->mem_info) {
466                 map__get(he->mem_info->iaddr.ms.map);
467                 map__get(he->mem_info->daddr.ms.map);
468         }
469
470         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
471                 callchain_init(he->callchain);
472
473         if (he->raw_data) {
474                 he->raw_data = memdup(he->raw_data, he->raw_size);
475                 if (he->raw_data == NULL)
476                         goto err_infos;
477         }
478
479         if (he->srcline) {
480                 he->srcline = strdup(he->srcline);
481                 if (he->srcline == NULL)
482                         goto err_rawdata;
483         }
484
485         if (symbol_conf.res_sample) {
486                 he->res_samples = calloc(sizeof(struct res_sample),
487                                         symbol_conf.res_sample);
488                 if (!he->res_samples)
489                         goto err_srcline;
490         }
491
492         INIT_LIST_HEAD(&he->pairs.node);
493         thread__get(he->thread);
494         he->hroot_in  = RB_ROOT_CACHED;
495         he->hroot_out = RB_ROOT_CACHED;
496
497         if (!symbol_conf.report_hierarchy)
498                 he->leaf = true;
499
500         return 0;
501
502 err_srcline:
503         zfree(&he->srcline);
504
505 err_rawdata:
506         zfree(&he->raw_data);
507
508 err_infos:
509         if (he->branch_info) {
510                 map__put(he->branch_info->from.ms.map);
511                 map__put(he->branch_info->to.ms.map);
512                 zfree(&he->branch_info);
513         }
514         if (he->mem_info) {
515                 map__put(he->mem_info->iaddr.ms.map);
516                 map__put(he->mem_info->daddr.ms.map);
517         }
518 err:
519         map__zput(he->ms.map);
520         zfree(&he->stat_acc);
521         return -ENOMEM;
522 }
523
524 static void *hist_entry__zalloc(size_t size)
525 {
526         return zalloc(size + sizeof(struct hist_entry));
527 }
528
529 static void hist_entry__free(void *ptr)
530 {
531         free(ptr);
532 }
533
534 static struct hist_entry_ops default_ops = {
535         .new    = hist_entry__zalloc,
536         .free   = hist_entry__free,
537 };
538
539 static struct hist_entry *hist_entry__new(struct hist_entry *template,
540                                           bool sample_self)
541 {
542         struct hist_entry_ops *ops = template->ops;
543         size_t callchain_size = 0;
544         struct hist_entry *he;
545         int err = 0;
546
547         if (!ops)
548                 ops = template->ops = &default_ops;
549
550         if (symbol_conf.use_callchain)
551                 callchain_size = sizeof(struct callchain_root);
552
553         he = ops->new(callchain_size);
554         if (he) {
555                 err = hist_entry__init(he, template, sample_self, callchain_size);
556                 if (err) {
557                         ops->free(he);
558                         he = NULL;
559                 }
560         }
561
562         return he;
563 }
564
565 static u8 symbol__parent_filter(const struct symbol *parent)
566 {
567         if (symbol_conf.exclude_other && parent == NULL)
568                 return 1 << HIST_FILTER__PARENT;
569         return 0;
570 }
571
572 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
573 {
574         if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
575                 return;
576
577         he->hists->callchain_period += period;
578         if (!he->filtered)
579                 he->hists->callchain_non_filtered_period += period;
580 }
581
582 static struct hist_entry *hists__findnew_entry(struct hists *hists,
583                                                struct hist_entry *entry,
584                                                struct addr_location *al,
585                                                bool sample_self)
586 {
587         struct rb_node **p;
588         struct rb_node *parent = NULL;
589         struct hist_entry *he;
590         int64_t cmp;
591         u64 period = entry->stat.period;
592         u64 weight = entry->stat.weight;
593         bool leftmost = true;
594
595         p = &hists->entries_in->rb_root.rb_node;
596
597         while (*p != NULL) {
598                 parent = *p;
599                 he = rb_entry(parent, struct hist_entry, rb_node_in);
600
601                 /*
602                  * Make sure that it receives arguments in a same order as
603                  * hist_entry__collapse() so that we can use an appropriate
604                  * function when searching an entry regardless which sort
605                  * keys were used.
606                  */
607                 cmp = hist_entry__cmp(he, entry);
608
609                 if (!cmp) {
610                         if (sample_self) {
611                                 he_stat__add_period(&he->stat, period, weight);
612                                 hist_entry__add_callchain_period(he, period);
613                         }
614                         if (symbol_conf.cumulate_callchain)
615                                 he_stat__add_period(he->stat_acc, period, weight);
616
617                         /*
618                          * This mem info was allocated from sample__resolve_mem
619                          * and will not be used anymore.
620                          */
621                         mem_info__zput(entry->mem_info);
622
623                         block_info__zput(entry->block_info);
624
625                         /* If the map of an existing hist_entry has
626                          * become out-of-date due to an exec() or
627                          * similar, update it.  Otherwise we will
628                          * mis-adjust symbol addresses when computing
629                          * the history counter to increment.
630                          */
631                         if (he->ms.map != entry->ms.map) {
632                                 map__put(he->ms.map);
633                                 he->ms.map = map__get(entry->ms.map);
634                         }
635                         goto out;
636                 }
637
638                 if (cmp < 0)
639                         p = &(*p)->rb_left;
640                 else {
641                         p = &(*p)->rb_right;
642                         leftmost = false;
643                 }
644         }
645
646         he = hist_entry__new(entry, sample_self);
647         if (!he)
648                 return NULL;
649
650         if (sample_self)
651                 hist_entry__add_callchain_period(he, period);
652         hists->nr_entries++;
653
654         rb_link_node(&he->rb_node_in, parent, p);
655         rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
656 out:
657         if (sample_self)
658                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
659         if (symbol_conf.cumulate_callchain)
660                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
661         return he;
662 }
663
664 static unsigned random_max(unsigned high)
665 {
666         unsigned thresh = -high % high;
667         for (;;) {
668                 unsigned r = random();
669                 if (r >= thresh)
670                         return r % high;
671         }
672 }
673
674 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
675 {
676         struct res_sample *r;
677         int j;
678
679         if (he->num_res < symbol_conf.res_sample) {
680                 j = he->num_res++;
681         } else {
682                 j = random_max(symbol_conf.res_sample);
683         }
684         r = &he->res_samples[j];
685         r->time = sample->time;
686         r->cpu = sample->cpu;
687         r->tid = sample->tid;
688 }
689
690 static struct hist_entry*
691 __hists__add_entry(struct hists *hists,
692                    struct addr_location *al,
693                    struct symbol *sym_parent,
694                    struct branch_info *bi,
695                    struct mem_info *mi,
696                    struct block_info *block_info,
697                    struct perf_sample *sample,
698                    bool sample_self,
699                    struct hist_entry_ops *ops)
700 {
701         struct namespaces *ns = thread__namespaces(al->thread);
702         struct hist_entry entry = {
703                 .thread = al->thread,
704                 .comm = thread__comm(al->thread),
705                 .cgroup_id = {
706                         .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
707                         .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
708                 },
709                 .cgroup = sample->cgroup,
710                 .ms = {
711                         .maps   = al->maps,
712                         .map    = al->map,
713                         .sym    = al->sym,
714                 },
715                 .srcline = (char *) al->srcline,
716                 .socket  = al->socket,
717                 .cpu     = al->cpu,
718                 .cpumode = al->cpumode,
719                 .ip      = al->addr,
720                 .level   = al->level,
721                 .stat = {
722                         .nr_events = 1,
723                         .period = sample->period,
724                         .weight = sample->weight,
725                 },
726                 .parent = sym_parent,
727                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
728                 .hists  = hists,
729                 .branch_info = bi,
730                 .mem_info = mi,
731                 .block_info = block_info,
732                 .transaction = sample->transaction,
733                 .raw_data = sample->raw_data,
734                 .raw_size = sample->raw_size,
735                 .ops = ops,
736                 .time = hist_time(sample->time),
737         }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
738
739         if (!hists->has_callchains && he && he->callchain_size != 0)
740                 hists->has_callchains = true;
741         if (he && symbol_conf.res_sample)
742                 hists__res_sample(he, sample);
743         return he;
744 }
745
746 struct hist_entry *hists__add_entry(struct hists *hists,
747                                     struct addr_location *al,
748                                     struct symbol *sym_parent,
749                                     struct branch_info *bi,
750                                     struct mem_info *mi,
751                                     struct perf_sample *sample,
752                                     bool sample_self)
753 {
754         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
755                                   sample, sample_self, NULL);
756 }
757
758 struct hist_entry *hists__add_entry_ops(struct hists *hists,
759                                         struct hist_entry_ops *ops,
760                                         struct addr_location *al,
761                                         struct symbol *sym_parent,
762                                         struct branch_info *bi,
763                                         struct mem_info *mi,
764                                         struct perf_sample *sample,
765                                         bool sample_self)
766 {
767         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
768                                   sample, sample_self, ops);
769 }
770
771 struct hist_entry *hists__add_entry_block(struct hists *hists,
772                                           struct addr_location *al,
773                                           struct block_info *block_info)
774 {
775         struct hist_entry entry = {
776                 .block_info = block_info,
777                 .hists = hists,
778                 .ms = {
779                         .maps = al->maps,
780                         .map = al->map,
781                         .sym = al->sym,
782                 },
783         }, *he = hists__findnew_entry(hists, &entry, al, false);
784
785         return he;
786 }
787
788 static int
789 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
790                     struct addr_location *al __maybe_unused)
791 {
792         return 0;
793 }
794
795 static int
796 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
797                         struct addr_location *al __maybe_unused)
798 {
799         return 0;
800 }
801
802 static int
803 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
804 {
805         struct perf_sample *sample = iter->sample;
806         struct mem_info *mi;
807
808         mi = sample__resolve_mem(sample, al);
809         if (mi == NULL)
810                 return -ENOMEM;
811
812         iter->priv = mi;
813         return 0;
814 }
815
816 static int
817 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
818 {
819         u64 cost;
820         struct mem_info *mi = iter->priv;
821         struct hists *hists = evsel__hists(iter->evsel);
822         struct perf_sample *sample = iter->sample;
823         struct hist_entry *he;
824
825         if (mi == NULL)
826                 return -EINVAL;
827
828         cost = sample->weight;
829         if (!cost)
830                 cost = 1;
831
832         /*
833          * must pass period=weight in order to get the correct
834          * sorting from hists__collapse_resort() which is solely
835          * based on periods. We want sorting be done on nr_events * weight
836          * and this is indirectly achieved by passing period=weight here
837          * and the he_stat__add_period() function.
838          */
839         sample->period = cost;
840
841         he = hists__add_entry(hists, al, iter->parent, NULL, mi,
842                               sample, true);
843         if (!he)
844                 return -ENOMEM;
845
846         iter->he = he;
847         return 0;
848 }
849
850 static int
851 iter_finish_mem_entry(struct hist_entry_iter *iter,
852                       struct addr_location *al __maybe_unused)
853 {
854         struct evsel *evsel = iter->evsel;
855         struct hists *hists = evsel__hists(evsel);
856         struct hist_entry *he = iter->he;
857         int err = -EINVAL;
858
859         if (he == NULL)
860                 goto out;
861
862         hists__inc_nr_samples(hists, he->filtered);
863
864         err = hist_entry__append_callchain(he, iter->sample);
865
866 out:
867         /*
868          * We don't need to free iter->priv (mem_info) here since the mem info
869          * was either already freed in hists__findnew_entry() or passed to a
870          * new hist entry by hist_entry__new().
871          */
872         iter->priv = NULL;
873
874         iter->he = NULL;
875         return err;
876 }
877
878 static int
879 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
880 {
881         struct branch_info *bi;
882         struct perf_sample *sample = iter->sample;
883
884         bi = sample__resolve_bstack(sample, al);
885         if (!bi)
886                 return -ENOMEM;
887
888         iter->curr = 0;
889         iter->total = sample->branch_stack->nr;
890
891         iter->priv = bi;
892         return 0;
893 }
894
895 static int
896 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
897                              struct addr_location *al __maybe_unused)
898 {
899         return 0;
900 }
901
902 static int
903 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
904 {
905         struct branch_info *bi = iter->priv;
906         int i = iter->curr;
907
908         if (bi == NULL)
909                 return 0;
910
911         if (iter->curr >= iter->total)
912                 return 0;
913
914         al->maps = bi[i].to.ms.maps;
915         al->map = bi[i].to.ms.map;
916         al->sym = bi[i].to.ms.sym;
917         al->addr = bi[i].to.addr;
918         return 1;
919 }
920
921 static int
922 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
923 {
924         struct branch_info *bi;
925         struct evsel *evsel = iter->evsel;
926         struct hists *hists = evsel__hists(evsel);
927         struct perf_sample *sample = iter->sample;
928         struct hist_entry *he = NULL;
929         int i = iter->curr;
930         int err = 0;
931
932         bi = iter->priv;
933
934         if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
935                 goto out;
936
937         /*
938          * The report shows the percentage of total branches captured
939          * and not events sampled. Thus we use a pseudo period of 1.
940          */
941         sample->period = 1;
942         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
943
944         he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
945                               sample, true);
946         if (he == NULL)
947                 return -ENOMEM;
948
949         hists__inc_nr_samples(hists, he->filtered);
950
951 out:
952         iter->he = he;
953         iter->curr++;
954         return err;
955 }
956
957 static int
958 iter_finish_branch_entry(struct hist_entry_iter *iter,
959                          struct addr_location *al __maybe_unused)
960 {
961         zfree(&iter->priv);
962         iter->he = NULL;
963
964         return iter->curr >= iter->total ? 0 : -1;
965 }
966
967 static int
968 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
969                           struct addr_location *al __maybe_unused)
970 {
971         return 0;
972 }
973
974 static int
975 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
976 {
977         struct evsel *evsel = iter->evsel;
978         struct perf_sample *sample = iter->sample;
979         struct hist_entry *he;
980
981         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
982                               sample, true);
983         if (he == NULL)
984                 return -ENOMEM;
985
986         iter->he = he;
987         return 0;
988 }
989
990 static int
991 iter_finish_normal_entry(struct hist_entry_iter *iter,
992                          struct addr_location *al __maybe_unused)
993 {
994         struct hist_entry *he = iter->he;
995         struct evsel *evsel = iter->evsel;
996         struct perf_sample *sample = iter->sample;
997
998         if (he == NULL)
999                 return 0;
1000
1001         iter->he = NULL;
1002
1003         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1004
1005         return hist_entry__append_callchain(he, sample);
1006 }
1007
1008 static int
1009 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1010                               struct addr_location *al __maybe_unused)
1011 {
1012         struct hist_entry **he_cache;
1013
1014         callchain_cursor_commit(&callchain_cursor);
1015
1016         /*
1017          * This is for detecting cycles or recursions so that they're
1018          * cumulated only one time to prevent entries more than 100%
1019          * overhead.
1020          */
1021         he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1022         if (he_cache == NULL)
1023                 return -ENOMEM;
1024
1025         iter->priv = he_cache;
1026         iter->curr = 0;
1027
1028         return 0;
1029 }
1030
1031 static int
1032 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1033                                  struct addr_location *al)
1034 {
1035         struct evsel *evsel = iter->evsel;
1036         struct hists *hists = evsel__hists(evsel);
1037         struct perf_sample *sample = iter->sample;
1038         struct hist_entry **he_cache = iter->priv;
1039         struct hist_entry *he;
1040         int err = 0;
1041
1042         he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1043                               sample, true);
1044         if (he == NULL)
1045                 return -ENOMEM;
1046
1047         iter->he = he;
1048         he_cache[iter->curr++] = he;
1049
1050         hist_entry__append_callchain(he, sample);
1051
1052         /*
1053          * We need to re-initialize the cursor since callchain_append()
1054          * advanced the cursor to the end.
1055          */
1056         callchain_cursor_commit(&callchain_cursor);
1057
1058         hists__inc_nr_samples(hists, he->filtered);
1059
1060         return err;
1061 }
1062
1063 static int
1064 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1065                            struct addr_location *al)
1066 {
1067         struct callchain_cursor_node *node;
1068
1069         node = callchain_cursor_current(&callchain_cursor);
1070         if (node == NULL)
1071                 return 0;
1072
1073         return fill_callchain_info(al, node, iter->hide_unresolved);
1074 }
1075
1076 static bool
1077 hist_entry__fast__sym_diff(struct hist_entry *left,
1078                            struct hist_entry *right)
1079 {
1080         struct symbol *sym_l = left->ms.sym;
1081         struct symbol *sym_r = right->ms.sym;
1082
1083         if (!sym_l && !sym_r)
1084                 return left->ip != right->ip;
1085
1086         return !!_sort__sym_cmp(sym_l, sym_r);
1087 }
1088
1089
1090 static int
1091 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1092                                struct addr_location *al)
1093 {
1094         struct evsel *evsel = iter->evsel;
1095         struct perf_sample *sample = iter->sample;
1096         struct hist_entry **he_cache = iter->priv;
1097         struct hist_entry *he;
1098         struct hist_entry he_tmp = {
1099                 .hists = evsel__hists(evsel),
1100                 .cpu = al->cpu,
1101                 .thread = al->thread,
1102                 .comm = thread__comm(al->thread),
1103                 .ip = al->addr,
1104                 .ms = {
1105                         .maps = al->maps,
1106                         .map = al->map,
1107                         .sym = al->sym,
1108                 },
1109                 .srcline = (char *) al->srcline,
1110                 .parent = iter->parent,
1111                 .raw_data = sample->raw_data,
1112                 .raw_size = sample->raw_size,
1113         };
1114         int i;
1115         struct callchain_cursor cursor;
1116         bool fast = hists__has(he_tmp.hists, sym);
1117
1118         callchain_cursor_snapshot(&cursor, &callchain_cursor);
1119
1120         callchain_cursor_advance(&callchain_cursor);
1121
1122         /*
1123          * Check if there's duplicate entries in the callchain.
1124          * It's possible that it has cycles or recursive calls.
1125          */
1126         for (i = 0; i < iter->curr; i++) {
1127                 /*
1128                  * For most cases, there are no duplicate entries in callchain.
1129                  * The symbols are usually different. Do a quick check for
1130                  * symbols first.
1131                  */
1132                 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1133                         continue;
1134
1135                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1136                         /* to avoid calling callback function */
1137                         iter->he = NULL;
1138                         return 0;
1139                 }
1140         }
1141
1142         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1143                               sample, false);
1144         if (he == NULL)
1145                 return -ENOMEM;
1146
1147         iter->he = he;
1148         he_cache[iter->curr++] = he;
1149
1150         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1151                 callchain_append(he->callchain, &cursor, sample->period);
1152         return 0;
1153 }
1154
1155 static int
1156 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1157                              struct addr_location *al __maybe_unused)
1158 {
1159         zfree(&iter->priv);
1160         iter->he = NULL;
1161
1162         return 0;
1163 }
1164
1165 const struct hist_iter_ops hist_iter_mem = {
1166         .prepare_entry          = iter_prepare_mem_entry,
1167         .add_single_entry       = iter_add_single_mem_entry,
1168         .next_entry             = iter_next_nop_entry,
1169         .add_next_entry         = iter_add_next_nop_entry,
1170         .finish_entry           = iter_finish_mem_entry,
1171 };
1172
1173 const struct hist_iter_ops hist_iter_branch = {
1174         .prepare_entry          = iter_prepare_branch_entry,
1175         .add_single_entry       = iter_add_single_branch_entry,
1176         .next_entry             = iter_next_branch_entry,
1177         .add_next_entry         = iter_add_next_branch_entry,
1178         .finish_entry           = iter_finish_branch_entry,
1179 };
1180
1181 const struct hist_iter_ops hist_iter_normal = {
1182         .prepare_entry          = iter_prepare_normal_entry,
1183         .add_single_entry       = iter_add_single_normal_entry,
1184         .next_entry             = iter_next_nop_entry,
1185         .add_next_entry         = iter_add_next_nop_entry,
1186         .finish_entry           = iter_finish_normal_entry,
1187 };
1188
1189 const struct hist_iter_ops hist_iter_cumulative = {
1190         .prepare_entry          = iter_prepare_cumulative_entry,
1191         .add_single_entry       = iter_add_single_cumulative_entry,
1192         .next_entry             = iter_next_cumulative_entry,
1193         .add_next_entry         = iter_add_next_cumulative_entry,
1194         .finish_entry           = iter_finish_cumulative_entry,
1195 };
1196
1197 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1198                          int max_stack_depth, void *arg)
1199 {
1200         int err, err2;
1201         struct map *alm = NULL;
1202
1203         if (al)
1204                 alm = map__get(al->map);
1205
1206         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1207                                         iter->evsel, al, max_stack_depth);
1208         if (err) {
1209                 map__put(alm);
1210                 return err;
1211         }
1212
1213         err = iter->ops->prepare_entry(iter, al);
1214         if (err)
1215                 goto out;
1216
1217         err = iter->ops->add_single_entry(iter, al);
1218         if (err)
1219                 goto out;
1220
1221         if (iter->he && iter->add_entry_cb) {
1222                 err = iter->add_entry_cb(iter, al, true, arg);
1223                 if (err)
1224                         goto out;
1225         }
1226
1227         while (iter->ops->next_entry(iter, al)) {
1228                 err = iter->ops->add_next_entry(iter, al);
1229                 if (err)
1230                         break;
1231
1232                 if (iter->he && iter->add_entry_cb) {
1233                         err = iter->add_entry_cb(iter, al, false, arg);
1234                         if (err)
1235                                 goto out;
1236                 }
1237         }
1238
1239 out:
1240         err2 = iter->ops->finish_entry(iter, al);
1241         if (!err)
1242                 err = err2;
1243
1244         map__put(alm);
1245
1246         return err;
1247 }
1248
1249 int64_t
1250 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1251 {
1252         struct hists *hists = left->hists;
1253         struct perf_hpp_fmt *fmt;
1254         int64_t cmp = 0;
1255
1256         hists__for_each_sort_list(hists, fmt) {
1257                 if (perf_hpp__is_dynamic_entry(fmt) &&
1258                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1259                         continue;
1260
1261                 cmp = fmt->cmp(fmt, left, right);
1262                 if (cmp)
1263                         break;
1264         }
1265
1266         return cmp;
1267 }
1268
1269 int64_t
1270 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1271 {
1272         struct hists *hists = left->hists;
1273         struct perf_hpp_fmt *fmt;
1274         int64_t cmp = 0;
1275
1276         hists__for_each_sort_list(hists, fmt) {
1277                 if (perf_hpp__is_dynamic_entry(fmt) &&
1278                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1279                         continue;
1280
1281                 cmp = fmt->collapse(fmt, left, right);
1282                 if (cmp)
1283                         break;
1284         }
1285
1286         return cmp;
1287 }
1288
1289 void hist_entry__delete(struct hist_entry *he)
1290 {
1291         struct hist_entry_ops *ops = he->ops;
1292
1293         thread__zput(he->thread);
1294         map__zput(he->ms.map);
1295
1296         if (he->branch_info) {
1297                 map__zput(he->branch_info->from.ms.map);
1298                 map__zput(he->branch_info->to.ms.map);
1299                 free_srcline(he->branch_info->srcline_from);
1300                 free_srcline(he->branch_info->srcline_to);
1301                 zfree(&he->branch_info);
1302         }
1303
1304         if (he->mem_info) {
1305                 map__zput(he->mem_info->iaddr.ms.map);
1306                 map__zput(he->mem_info->daddr.ms.map);
1307                 mem_info__zput(he->mem_info);
1308         }
1309
1310         if (he->block_info)
1311                 block_info__zput(he->block_info);
1312
1313         zfree(&he->res_samples);
1314         zfree(&he->stat_acc);
1315         free_srcline(he->srcline);
1316         if (he->srcfile && he->srcfile[0])
1317                 zfree(&he->srcfile);
1318         free_callchain(he->callchain);
1319         zfree(&he->trace_output);
1320         zfree(&he->raw_data);
1321         ops->free(he);
1322 }
1323
1324 /*
1325  * If this is not the last column, then we need to pad it according to the
1326  * pre-calculated max length for this column, otherwise don't bother adding
1327  * spaces because that would break viewing this with, for instance, 'less',
1328  * that would show tons of trailing spaces when a long C++ demangled method
1329  * names is sampled.
1330 */
1331 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1332                                    struct perf_hpp_fmt *fmt, int printed)
1333 {
1334         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1335                 const int width = fmt->width(fmt, hpp, he->hists);
1336                 if (printed < width) {
1337                         advance_hpp(hpp, printed);
1338                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1339                 }
1340         }
1341
1342         return printed;
1343 }
1344
1345 /*
1346  * collapse the histogram
1347  */
1348
1349 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1350 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1351                                        enum hist_filter type);
1352
1353 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1354
1355 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1356 {
1357         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1358 }
1359
1360 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1361                                                 enum hist_filter type,
1362                                                 fmt_chk_fn check)
1363 {
1364         struct perf_hpp_fmt *fmt;
1365         bool type_match = false;
1366         struct hist_entry *parent = he->parent_he;
1367
1368         switch (type) {
1369         case HIST_FILTER__THREAD:
1370                 if (symbol_conf.comm_list == NULL &&
1371                     symbol_conf.pid_list == NULL &&
1372                     symbol_conf.tid_list == NULL)
1373                         return;
1374                 break;
1375         case HIST_FILTER__DSO:
1376                 if (symbol_conf.dso_list == NULL)
1377                         return;
1378                 break;
1379         case HIST_FILTER__SYMBOL:
1380                 if (symbol_conf.sym_list == NULL)
1381                         return;
1382                 break;
1383         case HIST_FILTER__PARENT:
1384         case HIST_FILTER__GUEST:
1385         case HIST_FILTER__HOST:
1386         case HIST_FILTER__SOCKET:
1387         case HIST_FILTER__C2C:
1388         default:
1389                 return;
1390         }
1391
1392         /* if it's filtered by own fmt, it has to have filter bits */
1393         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1394                 if (check(fmt)) {
1395                         type_match = true;
1396                         break;
1397                 }
1398         }
1399
1400         if (type_match) {
1401                 /*
1402                  * If the filter is for current level entry, propagate
1403                  * filter marker to parents.  The marker bit was
1404                  * already set by default so it only needs to clear
1405                  * non-filtered entries.
1406                  */
1407                 if (!(he->filtered & (1 << type))) {
1408                         while (parent) {
1409                                 parent->filtered &= ~(1 << type);
1410                                 parent = parent->parent_he;
1411                         }
1412                 }
1413         } else {
1414                 /*
1415                  * If current entry doesn't have matching formats, set
1416                  * filter marker for upper level entries.  it will be
1417                  * cleared if its lower level entries is not filtered.
1418                  *
1419                  * For lower-level entries, it inherits parent's
1420                  * filter bit so that lower level entries of a
1421                  * non-filtered entry won't set the filter marker.
1422                  */
1423                 if (parent == NULL)
1424                         he->filtered |= (1 << type);
1425                 else
1426                         he->filtered |= (parent->filtered & (1 << type));
1427         }
1428 }
1429
1430 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1431 {
1432         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1433                                             check_thread_entry);
1434
1435         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1436                                             perf_hpp__is_dso_entry);
1437
1438         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1439                                             perf_hpp__is_sym_entry);
1440
1441         hists__apply_filters(he->hists, he);
1442 }
1443
1444 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1445                                                  struct rb_root_cached *root,
1446                                                  struct hist_entry *he,
1447                                                  struct hist_entry *parent_he,
1448                                                  struct perf_hpp_list *hpp_list)
1449 {
1450         struct rb_node **p = &root->rb_root.rb_node;
1451         struct rb_node *parent = NULL;
1452         struct hist_entry *iter, *new;
1453         struct perf_hpp_fmt *fmt;
1454         int64_t cmp;
1455         bool leftmost = true;
1456
1457         while (*p != NULL) {
1458                 parent = *p;
1459                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1460
1461                 cmp = 0;
1462                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1463                         cmp = fmt->collapse(fmt, iter, he);
1464                         if (cmp)
1465                                 break;
1466                 }
1467
1468                 if (!cmp) {
1469                         he_stat__add_stat(&iter->stat, &he->stat);
1470                         return iter;
1471                 }
1472
1473                 if (cmp < 0)
1474                         p = &parent->rb_left;
1475                 else {
1476                         p = &parent->rb_right;
1477                         leftmost = false;
1478                 }
1479         }
1480
1481         new = hist_entry__new(he, true);
1482         if (new == NULL)
1483                 return NULL;
1484
1485         hists->nr_entries++;
1486
1487         /* save related format list for output */
1488         new->hpp_list = hpp_list;
1489         new->parent_he = parent_he;
1490
1491         hist_entry__apply_hierarchy_filters(new);
1492
1493         /* some fields are now passed to 'new' */
1494         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1495                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1496                         he->trace_output = NULL;
1497                 else
1498                         new->trace_output = NULL;
1499
1500                 if (perf_hpp__is_srcline_entry(fmt))
1501                         he->srcline = NULL;
1502                 else
1503                         new->srcline = NULL;
1504
1505                 if (perf_hpp__is_srcfile_entry(fmt))
1506                         he->srcfile = NULL;
1507                 else
1508                         new->srcfile = NULL;
1509         }
1510
1511         rb_link_node(&new->rb_node_in, parent, p);
1512         rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1513         return new;
1514 }
1515
1516 static int hists__hierarchy_insert_entry(struct hists *hists,
1517                                          struct rb_root_cached *root,
1518                                          struct hist_entry *he)
1519 {
1520         struct perf_hpp_list_node *node;
1521         struct hist_entry *new_he = NULL;
1522         struct hist_entry *parent = NULL;
1523         int depth = 0;
1524         int ret = 0;
1525
1526         list_for_each_entry(node, &hists->hpp_formats, list) {
1527                 /* skip period (overhead) and elided columns */
1528                 if (node->level == 0 || node->skip)
1529                         continue;
1530
1531                 /* insert copy of 'he' for each fmt into the hierarchy */
1532                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1533                 if (new_he == NULL) {
1534                         ret = -1;
1535                         break;
1536                 }
1537
1538                 root = &new_he->hroot_in;
1539                 new_he->depth = depth++;
1540                 parent = new_he;
1541         }
1542
1543         if (new_he) {
1544                 new_he->leaf = true;
1545
1546                 if (hist_entry__has_callchains(new_he) &&
1547                     symbol_conf.use_callchain) {
1548                         callchain_cursor_reset(&callchain_cursor);
1549                         if (callchain_merge(&callchain_cursor,
1550                                             new_he->callchain,
1551                                             he->callchain) < 0)
1552                                 ret = -1;
1553                 }
1554         }
1555
1556         /* 'he' is no longer used */
1557         hist_entry__delete(he);
1558
1559         /* return 0 (or -1) since it already applied filters */
1560         return ret;
1561 }
1562
1563 static int hists__collapse_insert_entry(struct hists *hists,
1564                                         struct rb_root_cached *root,
1565                                         struct hist_entry *he)
1566 {
1567         struct rb_node **p = &root->rb_root.rb_node;
1568         struct rb_node *parent = NULL;
1569         struct hist_entry *iter;
1570         int64_t cmp;
1571         bool leftmost = true;
1572
1573         if (symbol_conf.report_hierarchy)
1574                 return hists__hierarchy_insert_entry(hists, root, he);
1575
1576         while (*p != NULL) {
1577                 parent = *p;
1578                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1579
1580                 cmp = hist_entry__collapse(iter, he);
1581
1582                 if (!cmp) {
1583                         int ret = 0;
1584
1585                         he_stat__add_stat(&iter->stat, &he->stat);
1586                         if (symbol_conf.cumulate_callchain)
1587                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1588
1589                         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1590                                 callchain_cursor_reset(&callchain_cursor);
1591                                 if (callchain_merge(&callchain_cursor,
1592                                                     iter->callchain,
1593                                                     he->callchain) < 0)
1594                                         ret = -1;
1595                         }
1596                         hist_entry__delete(he);
1597                         return ret;
1598                 }
1599
1600                 if (cmp < 0)
1601                         p = &(*p)->rb_left;
1602                 else {
1603                         p = &(*p)->rb_right;
1604                         leftmost = false;
1605                 }
1606         }
1607         hists->nr_entries++;
1608
1609         rb_link_node(&he->rb_node_in, parent, p);
1610         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1611         return 1;
1612 }
1613
1614 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1615 {
1616         struct rb_root_cached *root;
1617
1618         pthread_mutex_lock(&hists->lock);
1619
1620         root = hists->entries_in;
1621         if (++hists->entries_in > &hists->entries_in_array[1])
1622                 hists->entries_in = &hists->entries_in_array[0];
1623
1624         pthread_mutex_unlock(&hists->lock);
1625
1626         return root;
1627 }
1628
1629 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1630 {
1631         hists__filter_entry_by_dso(hists, he);
1632         hists__filter_entry_by_thread(hists, he);
1633         hists__filter_entry_by_symbol(hists, he);
1634         hists__filter_entry_by_socket(hists, he);
1635 }
1636
1637 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1638 {
1639         struct rb_root_cached *root;
1640         struct rb_node *next;
1641         struct hist_entry *n;
1642         int ret;
1643
1644         if (!hists__has(hists, need_collapse))
1645                 return 0;
1646
1647         hists->nr_entries = 0;
1648
1649         root = hists__get_rotate_entries_in(hists);
1650
1651         next = rb_first_cached(root);
1652
1653         while (next) {
1654                 if (session_done())
1655                         break;
1656                 n = rb_entry(next, struct hist_entry, rb_node_in);
1657                 next = rb_next(&n->rb_node_in);
1658
1659                 rb_erase_cached(&n->rb_node_in, root);
1660                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1661                 if (ret < 0)
1662                         return -1;
1663
1664                 if (ret) {
1665                         /*
1666                          * If it wasn't combined with one of the entries already
1667                          * collapsed, we need to apply the filters that may have
1668                          * been set by, say, the hist_browser.
1669                          */
1670                         hists__apply_filters(hists, n);
1671                 }
1672                 if (prog)
1673                         ui_progress__update(prog, 1);
1674         }
1675         return 0;
1676 }
1677
1678 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1679 {
1680         struct hists *hists = a->hists;
1681         struct perf_hpp_fmt *fmt;
1682         int64_t cmp = 0;
1683
1684         hists__for_each_sort_list(hists, fmt) {
1685                 if (perf_hpp__should_skip(fmt, a->hists))
1686                         continue;
1687
1688                 cmp = fmt->sort(fmt, a, b);
1689                 if (cmp)
1690                         break;
1691         }
1692
1693         return cmp;
1694 }
1695
1696 static void hists__reset_filter_stats(struct hists *hists)
1697 {
1698         hists->nr_non_filtered_entries = 0;
1699         hists->stats.total_non_filtered_period = 0;
1700 }
1701
1702 void hists__reset_stats(struct hists *hists)
1703 {
1704         hists->nr_entries = 0;
1705         hists->stats.total_period = 0;
1706
1707         hists__reset_filter_stats(hists);
1708 }
1709
1710 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1711 {
1712         hists->nr_non_filtered_entries++;
1713         hists->stats.total_non_filtered_period += h->stat.period;
1714 }
1715
1716 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1717 {
1718         if (!h->filtered)
1719                 hists__inc_filter_stats(hists, h);
1720
1721         hists->nr_entries++;
1722         hists->stats.total_period += h->stat.period;
1723 }
1724
1725 static void hierarchy_recalc_total_periods(struct hists *hists)
1726 {
1727         struct rb_node *node;
1728         struct hist_entry *he;
1729
1730         node = rb_first_cached(&hists->entries);
1731
1732         hists->stats.total_period = 0;
1733         hists->stats.total_non_filtered_period = 0;
1734
1735         /*
1736          * recalculate total period using top-level entries only
1737          * since lower level entries only see non-filtered entries
1738          * but upper level entries have sum of both entries.
1739          */
1740         while (node) {
1741                 he = rb_entry(node, struct hist_entry, rb_node);
1742                 node = rb_next(node);
1743
1744                 hists->stats.total_period += he->stat.period;
1745                 if (!he->filtered)
1746                         hists->stats.total_non_filtered_period += he->stat.period;
1747         }
1748 }
1749
1750 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1751                                           struct hist_entry *he)
1752 {
1753         struct rb_node **p = &root->rb_root.rb_node;
1754         struct rb_node *parent = NULL;
1755         struct hist_entry *iter;
1756         struct perf_hpp_fmt *fmt;
1757         bool leftmost = true;
1758
1759         while (*p != NULL) {
1760                 parent = *p;
1761                 iter = rb_entry(parent, struct hist_entry, rb_node);
1762
1763                 if (hist_entry__sort(he, iter) > 0)
1764                         p = &parent->rb_left;
1765                 else {
1766                         p = &parent->rb_right;
1767                         leftmost = false;
1768                 }
1769         }
1770
1771         rb_link_node(&he->rb_node, parent, p);
1772         rb_insert_color_cached(&he->rb_node, root, leftmost);
1773
1774         /* update column width of dynamic entry */
1775         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1776                 if (perf_hpp__is_dynamic_entry(fmt))
1777                         fmt->sort(fmt, he, NULL);
1778         }
1779 }
1780
1781 static void hists__hierarchy_output_resort(struct hists *hists,
1782                                            struct ui_progress *prog,
1783                                            struct rb_root_cached *root_in,
1784                                            struct rb_root_cached *root_out,
1785                                            u64 min_callchain_hits,
1786                                            bool use_callchain)
1787 {
1788         struct rb_node *node;
1789         struct hist_entry *he;
1790
1791         *root_out = RB_ROOT_CACHED;
1792         node = rb_first_cached(root_in);
1793
1794         while (node) {
1795                 he = rb_entry(node, struct hist_entry, rb_node_in);
1796                 node = rb_next(node);
1797
1798                 hierarchy_insert_output_entry(root_out, he);
1799
1800                 if (prog)
1801                         ui_progress__update(prog, 1);
1802
1803                 hists->nr_entries++;
1804                 if (!he->filtered) {
1805                         hists->nr_non_filtered_entries++;
1806                         hists__calc_col_len(hists, he);
1807                 }
1808
1809                 if (!he->leaf) {
1810                         hists__hierarchy_output_resort(hists, prog,
1811                                                        &he->hroot_in,
1812                                                        &he->hroot_out,
1813                                                        min_callchain_hits,
1814                                                        use_callchain);
1815                         continue;
1816                 }
1817
1818                 if (!use_callchain)
1819                         continue;
1820
1821                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1822                         u64 total = he->stat.period;
1823
1824                         if (symbol_conf.cumulate_callchain)
1825                                 total = he->stat_acc->period;
1826
1827                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1828                 }
1829
1830                 callchain_param.sort(&he->sorted_chain, he->callchain,
1831                                      min_callchain_hits, &callchain_param);
1832         }
1833 }
1834
1835 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1836                                          struct hist_entry *he,
1837                                          u64 min_callchain_hits,
1838                                          bool use_callchain)
1839 {
1840         struct rb_node **p = &entries->rb_root.rb_node;
1841         struct rb_node *parent = NULL;
1842         struct hist_entry *iter;
1843         struct perf_hpp_fmt *fmt;
1844         bool leftmost = true;
1845
1846         if (use_callchain) {
1847                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1848                         u64 total = he->stat.period;
1849
1850                         if (symbol_conf.cumulate_callchain)
1851                                 total = he->stat_acc->period;
1852
1853                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1854                 }
1855                 callchain_param.sort(&he->sorted_chain, he->callchain,
1856                                       min_callchain_hits, &callchain_param);
1857         }
1858
1859         while (*p != NULL) {
1860                 parent = *p;
1861                 iter = rb_entry(parent, struct hist_entry, rb_node);
1862
1863                 if (hist_entry__sort(he, iter) > 0)
1864                         p = &(*p)->rb_left;
1865                 else {
1866                         p = &(*p)->rb_right;
1867                         leftmost = false;
1868                 }
1869         }
1870
1871         rb_link_node(&he->rb_node, parent, p);
1872         rb_insert_color_cached(&he->rb_node, entries, leftmost);
1873
1874         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1875                 if (perf_hpp__is_dynamic_entry(fmt) &&
1876                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1877                         fmt->sort(fmt, he, NULL);  /* update column width */
1878         }
1879 }
1880
1881 static void output_resort(struct hists *hists, struct ui_progress *prog,
1882                           bool use_callchain, hists__resort_cb_t cb,
1883                           void *cb_arg)
1884 {
1885         struct rb_root_cached *root;
1886         struct rb_node *next;
1887         struct hist_entry *n;
1888         u64 callchain_total;
1889         u64 min_callchain_hits;
1890
1891         callchain_total = hists->callchain_period;
1892         if (symbol_conf.filter_relative)
1893                 callchain_total = hists->callchain_non_filtered_period;
1894
1895         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1896
1897         hists__reset_stats(hists);
1898         hists__reset_col_len(hists);
1899
1900         if (symbol_conf.report_hierarchy) {
1901                 hists__hierarchy_output_resort(hists, prog,
1902                                                &hists->entries_collapsed,
1903                                                &hists->entries,
1904                                                min_callchain_hits,
1905                                                use_callchain);
1906                 hierarchy_recalc_total_periods(hists);
1907                 return;
1908         }
1909
1910         if (hists__has(hists, need_collapse))
1911                 root = &hists->entries_collapsed;
1912         else
1913                 root = hists->entries_in;
1914
1915         next = rb_first_cached(root);
1916         hists->entries = RB_ROOT_CACHED;
1917
1918         while (next) {
1919                 n = rb_entry(next, struct hist_entry, rb_node_in);
1920                 next = rb_next(&n->rb_node_in);
1921
1922                 if (cb && cb(n, cb_arg))
1923                         continue;
1924
1925                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1926                 hists__inc_stats(hists, n);
1927
1928                 if (!n->filtered)
1929                         hists__calc_col_len(hists, n);
1930
1931                 if (prog)
1932                         ui_progress__update(prog, 1);
1933         }
1934 }
1935
1936 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1937                              hists__resort_cb_t cb, void *cb_arg)
1938 {
1939         bool use_callchain;
1940
1941         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1942                 use_callchain = evsel__has_callchain(evsel);
1943         else
1944                 use_callchain = symbol_conf.use_callchain;
1945
1946         use_callchain |= symbol_conf.show_branchflag_count;
1947
1948         output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1949 }
1950
1951 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1952 {
1953         return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1954 }
1955
1956 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1957 {
1958         output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1959 }
1960
1961 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1962                              hists__resort_cb_t cb)
1963 {
1964         output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1965 }
1966
1967 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1968 {
1969         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1970                 return false;
1971
1972         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1973                 return true;
1974
1975         return false;
1976 }
1977
1978 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1979 {
1980         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1981
1982         while (can_goto_child(he, HMD_NORMAL)) {
1983                 node = rb_last(&he->hroot_out.rb_root);
1984                 he = rb_entry(node, struct hist_entry, rb_node);
1985         }
1986         return node;
1987 }
1988
1989 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1990 {
1991         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1992
1993         if (can_goto_child(he, hmd))
1994                 node = rb_first_cached(&he->hroot_out);
1995         else
1996                 node = rb_next(node);
1997
1998         while (node == NULL) {
1999                 he = he->parent_he;
2000                 if (he == NULL)
2001                         break;
2002
2003                 node = rb_next(&he->rb_node);
2004         }
2005         return node;
2006 }
2007
2008 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2009 {
2010         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2011
2012         node = rb_prev(node);
2013         if (node)
2014                 return rb_hierarchy_last(node);
2015
2016         he = he->parent_he;
2017         if (he == NULL)
2018                 return NULL;
2019
2020         return &he->rb_node;
2021 }
2022
2023 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2024 {
2025         struct rb_node *node;
2026         struct hist_entry *child;
2027         float percent;
2028
2029         if (he->leaf)
2030                 return false;
2031
2032         node = rb_first_cached(&he->hroot_out);
2033         child = rb_entry(node, struct hist_entry, rb_node);
2034
2035         while (node && child->filtered) {
2036                 node = rb_next(node);
2037                 child = rb_entry(node, struct hist_entry, rb_node);
2038         }
2039
2040         if (node)
2041                 percent = hist_entry__get_percent_limit(child);
2042         else
2043                 percent = 0;
2044
2045         return node && percent >= limit;
2046 }
2047
2048 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2049                                        enum hist_filter filter)
2050 {
2051         h->filtered &= ~(1 << filter);
2052
2053         if (symbol_conf.report_hierarchy) {
2054                 struct hist_entry *parent = h->parent_he;
2055
2056                 while (parent) {
2057                         he_stat__add_stat(&parent->stat, &h->stat);
2058
2059                         parent->filtered &= ~(1 << filter);
2060
2061                         if (parent->filtered)
2062                                 goto next;
2063
2064                         /* force fold unfiltered entry for simplicity */
2065                         parent->unfolded = false;
2066                         parent->has_no_entry = false;
2067                         parent->row_offset = 0;
2068                         parent->nr_rows = 0;
2069 next:
2070                         parent = parent->parent_he;
2071                 }
2072         }
2073
2074         if (h->filtered)
2075                 return;
2076
2077         /* force fold unfiltered entry for simplicity */
2078         h->unfolded = false;
2079         h->has_no_entry = false;
2080         h->row_offset = 0;
2081         h->nr_rows = 0;
2082
2083         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2084
2085         hists__inc_filter_stats(hists, h);
2086         hists__calc_col_len(hists, h);
2087 }
2088
2089
2090 static bool hists__filter_entry_by_dso(struct hists *hists,
2091                                        struct hist_entry *he)
2092 {
2093         if (hists->dso_filter != NULL &&
2094             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2095                 he->filtered |= (1 << HIST_FILTER__DSO);
2096                 return true;
2097         }
2098
2099         return false;
2100 }
2101
2102 static bool hists__filter_entry_by_thread(struct hists *hists,
2103                                           struct hist_entry *he)
2104 {
2105         if (hists->thread_filter != NULL &&
2106             he->thread != hists->thread_filter) {
2107                 he->filtered |= (1 << HIST_FILTER__THREAD);
2108                 return true;
2109         }
2110
2111         return false;
2112 }
2113
2114 static bool hists__filter_entry_by_symbol(struct hists *hists,
2115                                           struct hist_entry *he)
2116 {
2117         if (hists->symbol_filter_str != NULL &&
2118             (!he->ms.sym || strstr(he->ms.sym->name,
2119                                    hists->symbol_filter_str) == NULL)) {
2120                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2121                 return true;
2122         }
2123
2124         return false;
2125 }
2126
2127 static bool hists__filter_entry_by_socket(struct hists *hists,
2128                                           struct hist_entry *he)
2129 {
2130         if ((hists->socket_filter > -1) &&
2131             (he->socket != hists->socket_filter)) {
2132                 he->filtered |= (1 << HIST_FILTER__SOCKET);
2133                 return true;
2134         }
2135
2136         return false;
2137 }
2138
2139 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2140
2141 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2142 {
2143         struct rb_node *nd;
2144
2145         hists->stats.nr_non_filtered_samples = 0;
2146
2147         hists__reset_filter_stats(hists);
2148         hists__reset_col_len(hists);
2149
2150         for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2151                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2152
2153                 if (filter(hists, h))
2154                         continue;
2155
2156                 hists__remove_entry_filter(hists, h, type);
2157         }
2158 }
2159
2160 static void resort_filtered_entry(struct rb_root_cached *root,
2161                                   struct hist_entry *he)
2162 {
2163         struct rb_node **p = &root->rb_root.rb_node;
2164         struct rb_node *parent = NULL;
2165         struct hist_entry *iter;
2166         struct rb_root_cached new_root = RB_ROOT_CACHED;
2167         struct rb_node *nd;
2168         bool leftmost = true;
2169
2170         while (*p != NULL) {
2171                 parent = *p;
2172                 iter = rb_entry(parent, struct hist_entry, rb_node);
2173
2174                 if (hist_entry__sort(he, iter) > 0)
2175                         p = &(*p)->rb_left;
2176                 else {
2177                         p = &(*p)->rb_right;
2178                         leftmost = false;
2179                 }
2180         }
2181
2182         rb_link_node(&he->rb_node, parent, p);
2183         rb_insert_color_cached(&he->rb_node, root, leftmost);
2184
2185         if (he->leaf || he->filtered)
2186                 return;
2187
2188         nd = rb_first_cached(&he->hroot_out);
2189         while (nd) {
2190                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2191
2192                 nd = rb_next(nd);
2193                 rb_erase_cached(&h->rb_node, &he->hroot_out);
2194
2195                 resort_filtered_entry(&new_root, h);
2196         }
2197
2198         he->hroot_out = new_root;
2199 }
2200
2201 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2202 {
2203         struct rb_node *nd;
2204         struct rb_root_cached new_root = RB_ROOT_CACHED;
2205
2206         hists->stats.nr_non_filtered_samples = 0;
2207
2208         hists__reset_filter_stats(hists);
2209         hists__reset_col_len(hists);
2210
2211         nd = rb_first_cached(&hists->entries);
2212         while (nd) {
2213                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2214                 int ret;
2215
2216                 ret = hist_entry__filter(h, type, arg);
2217
2218                 /*
2219                  * case 1. non-matching type
2220                  * zero out the period, set filter marker and move to child
2221                  */
2222                 if (ret < 0) {
2223                         memset(&h->stat, 0, sizeof(h->stat));
2224                         h->filtered |= (1 << type);
2225
2226                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2227                 }
2228                 /*
2229                  * case 2. matched type (filter out)
2230                  * set filter marker and move to next
2231                  */
2232                 else if (ret == 1) {
2233                         h->filtered |= (1 << type);
2234
2235                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2236                 }
2237                 /*
2238                  * case 3. ok (not filtered)
2239                  * add period to hists and parents, erase the filter marker
2240                  * and move to next sibling
2241                  */
2242                 else {
2243                         hists__remove_entry_filter(hists, h, type);
2244
2245                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2246                 }
2247         }
2248
2249         hierarchy_recalc_total_periods(hists);
2250
2251         /*
2252          * resort output after applying a new filter since filter in a lower
2253          * hierarchy can change periods in a upper hierarchy.
2254          */
2255         nd = rb_first_cached(&hists->entries);
2256         while (nd) {
2257                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2258
2259                 nd = rb_next(nd);
2260                 rb_erase_cached(&h->rb_node, &hists->entries);
2261
2262                 resort_filtered_entry(&new_root, h);
2263         }
2264
2265         hists->entries = new_root;
2266 }
2267
2268 void hists__filter_by_thread(struct hists *hists)
2269 {
2270         if (symbol_conf.report_hierarchy)
2271                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2272                                         hists->thread_filter);
2273         else
2274                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2275                                       hists__filter_entry_by_thread);
2276 }
2277
2278 void hists__filter_by_dso(struct hists *hists)
2279 {
2280         if (symbol_conf.report_hierarchy)
2281                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2282                                         hists->dso_filter);
2283         else
2284                 hists__filter_by_type(hists, HIST_FILTER__DSO,
2285                                       hists__filter_entry_by_dso);
2286 }
2287
2288 void hists__filter_by_symbol(struct hists *hists)
2289 {
2290         if (symbol_conf.report_hierarchy)
2291                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2292                                         hists->symbol_filter_str);
2293         else
2294                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2295                                       hists__filter_entry_by_symbol);
2296 }
2297
2298 void hists__filter_by_socket(struct hists *hists)
2299 {
2300         if (symbol_conf.report_hierarchy)
2301                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2302                                         &hists->socket_filter);
2303         else
2304                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2305                                       hists__filter_entry_by_socket);
2306 }
2307
2308 void events_stats__inc(struct events_stats *stats, u32 type)
2309 {
2310         ++stats->nr_events[0];
2311         ++stats->nr_events[type];
2312 }
2313
2314 void hists__inc_nr_events(struct hists *hists, u32 type)
2315 {
2316         events_stats__inc(&hists->stats, type);
2317 }
2318
2319 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2320 {
2321         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2322         if (!filtered)
2323                 hists->stats.nr_non_filtered_samples++;
2324 }
2325
2326 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2327                                                  struct hist_entry *pair)
2328 {
2329         struct rb_root_cached *root;
2330         struct rb_node **p;
2331         struct rb_node *parent = NULL;
2332         struct hist_entry *he;
2333         int64_t cmp;
2334         bool leftmost = true;
2335
2336         if (hists__has(hists, need_collapse))
2337                 root = &hists->entries_collapsed;
2338         else
2339                 root = hists->entries_in;
2340
2341         p = &root->rb_root.rb_node;
2342
2343         while (*p != NULL) {
2344                 parent = *p;
2345                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2346
2347                 cmp = hist_entry__collapse(he, pair);
2348
2349                 if (!cmp)
2350                         goto out;
2351
2352                 if (cmp < 0)
2353                         p = &(*p)->rb_left;
2354                 else {
2355                         p = &(*p)->rb_right;
2356                         leftmost = false;
2357                 }
2358         }
2359
2360         he = hist_entry__new(pair, true);
2361         if (he) {
2362                 memset(&he->stat, 0, sizeof(he->stat));
2363                 he->hists = hists;
2364                 if (symbol_conf.cumulate_callchain)
2365                         memset(he->stat_acc, 0, sizeof(he->stat));
2366                 rb_link_node(&he->rb_node_in, parent, p);
2367                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2368                 hists__inc_stats(hists, he);
2369                 he->dummy = true;
2370         }
2371 out:
2372         return he;
2373 }
2374
2375 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2376                                                     struct rb_root_cached *root,
2377                                                     struct hist_entry *pair)
2378 {
2379         struct rb_node **p;
2380         struct rb_node *parent = NULL;
2381         struct hist_entry *he;
2382         struct perf_hpp_fmt *fmt;
2383         bool leftmost = true;
2384
2385         p = &root->rb_root.rb_node;
2386         while (*p != NULL) {
2387                 int64_t cmp = 0;
2388
2389                 parent = *p;
2390                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2391
2392                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2393                         cmp = fmt->collapse(fmt, he, pair);
2394                         if (cmp)
2395                                 break;
2396                 }
2397                 if (!cmp)
2398                         goto out;
2399
2400                 if (cmp < 0)
2401                         p = &parent->rb_left;
2402                 else {
2403                         p = &parent->rb_right;
2404                         leftmost = false;
2405                 }
2406         }
2407
2408         he = hist_entry__new(pair, true);
2409         if (he) {
2410                 rb_link_node(&he->rb_node_in, parent, p);
2411                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2412
2413                 he->dummy = true;
2414                 he->hists = hists;
2415                 memset(&he->stat, 0, sizeof(he->stat));
2416                 hists__inc_stats(hists, he);
2417         }
2418 out:
2419         return he;
2420 }
2421
2422 static struct hist_entry *hists__find_entry(struct hists *hists,
2423                                             struct hist_entry *he)
2424 {
2425         struct rb_node *n;
2426
2427         if (hists__has(hists, need_collapse))
2428                 n = hists->entries_collapsed.rb_root.rb_node;
2429         else
2430                 n = hists->entries_in->rb_root.rb_node;
2431
2432         while (n) {
2433                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2434                 int64_t cmp = hist_entry__collapse(iter, he);
2435
2436                 if (cmp < 0)
2437                         n = n->rb_left;
2438                 else if (cmp > 0)
2439                         n = n->rb_right;
2440                 else
2441                         return iter;
2442         }
2443
2444         return NULL;
2445 }
2446
2447 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2448                                                       struct hist_entry *he)
2449 {
2450         struct rb_node *n = root->rb_root.rb_node;
2451
2452         while (n) {
2453                 struct hist_entry *iter;
2454                 struct perf_hpp_fmt *fmt;
2455                 int64_t cmp = 0;
2456
2457                 iter = rb_entry(n, struct hist_entry, rb_node_in);
2458                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2459                         cmp = fmt->collapse(fmt, iter, he);
2460                         if (cmp)
2461                                 break;
2462                 }
2463
2464                 if (cmp < 0)
2465                         n = n->rb_left;
2466                 else if (cmp > 0)
2467                         n = n->rb_right;
2468                 else
2469                         return iter;
2470         }
2471
2472         return NULL;
2473 }
2474
2475 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2476                                    struct rb_root_cached *other_root)
2477 {
2478         struct rb_node *nd;
2479         struct hist_entry *pos, *pair;
2480
2481         for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2482                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2483                 pair = hists__find_hierarchy_entry(other_root, pos);
2484
2485                 if (pair) {
2486                         hist_entry__add_pair(pair, pos);
2487                         hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2488                 }
2489         }
2490 }
2491
2492 /*
2493  * Look for pairs to link to the leader buckets (hist_entries):
2494  */
2495 void hists__match(struct hists *leader, struct hists *other)
2496 {
2497         struct rb_root_cached *root;
2498         struct rb_node *nd;
2499         struct hist_entry *pos, *pair;
2500
2501         if (symbol_conf.report_hierarchy) {
2502                 /* hierarchy report always collapses entries */
2503                 return hists__match_hierarchy(&leader->entries_collapsed,
2504                                               &other->entries_collapsed);
2505         }
2506
2507         if (hists__has(leader, need_collapse))
2508                 root = &leader->entries_collapsed;
2509         else
2510                 root = leader->entries_in;
2511
2512         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2513                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2514                 pair = hists__find_entry(other, pos);
2515
2516                 if (pair)
2517                         hist_entry__add_pair(pair, pos);
2518         }
2519 }
2520
2521 static int hists__link_hierarchy(struct hists *leader_hists,
2522                                  struct hist_entry *parent,
2523                                  struct rb_root_cached *leader_root,
2524                                  struct rb_root_cached *other_root)
2525 {
2526         struct rb_node *nd;
2527         struct hist_entry *pos, *leader;
2528
2529         for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2530                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2531
2532                 if (hist_entry__has_pairs(pos)) {
2533                         bool found = false;
2534
2535                         list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2536                                 if (leader->hists == leader_hists) {
2537                                         found = true;
2538                                         break;
2539                                 }
2540                         }
2541                         if (!found)
2542                                 return -1;
2543                 } else {
2544                         leader = add_dummy_hierarchy_entry(leader_hists,
2545                                                            leader_root, pos);
2546                         if (leader == NULL)
2547                                 return -1;
2548
2549                         /* do not point parent in the pos */
2550                         leader->parent_he = parent;
2551
2552                         hist_entry__add_pair(pos, leader);
2553                 }
2554
2555                 if (!pos->leaf) {
2556                         if (hists__link_hierarchy(leader_hists, leader,
2557                                                   &leader->hroot_in,
2558                                                   &pos->hroot_in) < 0)
2559                                 return -1;
2560                 }
2561         }
2562         return 0;
2563 }
2564
2565 /*
2566  * Look for entries in the other hists that are not present in the leader, if
2567  * we find them, just add a dummy entry on the leader hists, with period=0,
2568  * nr_events=0, to serve as the list header.
2569  */
2570 int hists__link(struct hists *leader, struct hists *other)
2571 {
2572         struct rb_root_cached *root;
2573         struct rb_node *nd;
2574         struct hist_entry *pos, *pair;
2575
2576         if (symbol_conf.report_hierarchy) {
2577                 /* hierarchy report always collapses entries */
2578                 return hists__link_hierarchy(leader, NULL,
2579                                              &leader->entries_collapsed,
2580                                              &other->entries_collapsed);
2581         }
2582
2583         if (hists__has(other, need_collapse))
2584                 root = &other->entries_collapsed;
2585         else
2586                 root = other->entries_in;
2587
2588         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2589                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2590
2591                 if (!hist_entry__has_pairs(pos)) {
2592                         pair = hists__add_dummy_entry(leader, pos);
2593                         if (pair == NULL)
2594                                 return -1;
2595                         hist_entry__add_pair(pos, pair);
2596                 }
2597         }
2598
2599         return 0;
2600 }
2601
2602 int hists__unlink(struct hists *hists)
2603 {
2604         struct rb_root_cached *root;
2605         struct rb_node *nd;
2606         struct hist_entry *pos;
2607
2608         if (hists__has(hists, need_collapse))
2609                 root = &hists->entries_collapsed;
2610         else
2611                 root = hists->entries_in;
2612
2613         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2614                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2615                 list_del_init(&pos->pairs.node);
2616         }
2617
2618         return 0;
2619 }
2620
2621 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2622                           struct perf_sample *sample, bool nonany_branch_mode,
2623                           u64 *total_cycles)
2624 {
2625         struct branch_info *bi;
2626         struct branch_entry *entries = perf_sample__branch_entries(sample);
2627
2628         /* If we have branch cycles always annotate them. */
2629         if (bs && bs->nr && entries[0].flags.cycles) {
2630                 int i;
2631
2632                 bi = sample__resolve_bstack(sample, al);
2633                 if (bi) {
2634                         struct addr_map_symbol *prev = NULL;
2635
2636                         /*
2637                          * Ignore errors, still want to process the
2638                          * other entries.
2639                          *
2640                          * For non standard branch modes always
2641                          * force no IPC (prev == NULL)
2642                          *
2643                          * Note that perf stores branches reversed from
2644                          * program order!
2645                          */
2646                         for (i = bs->nr - 1; i >= 0; i--) {
2647                                 addr_map_symbol__account_cycles(&bi[i].from,
2648                                         nonany_branch_mode ? NULL : prev,
2649                                         bi[i].flags.cycles);
2650                                 prev = &bi[i].to;
2651
2652                                 if (total_cycles)
2653                                         *total_cycles += bi[i].flags.cycles;
2654                         }
2655                         free(bi);
2656                 }
2657         }
2658 }
2659
2660 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2661 {
2662         struct evsel *pos;
2663         size_t ret = 0;
2664
2665         evlist__for_each_entry(evlist, pos) {
2666                 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2667                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2668         }
2669
2670         return ret;
2671 }
2672
2673
2674 u64 hists__total_period(struct hists *hists)
2675 {
2676         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2677                 hists->stats.total_period;
2678 }
2679
2680 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2681 {
2682         char unit;
2683         int printed;
2684         const struct dso *dso = hists->dso_filter;
2685         struct thread *thread = hists->thread_filter;
2686         int socket_id = hists->socket_filter;
2687         unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2688         u64 nr_events = hists->stats.total_period;
2689         struct evsel *evsel = hists_to_evsel(hists);
2690         const char *ev_name = evsel__name(evsel);
2691         char buf[512], sample_freq_str[64] = "";
2692         size_t buflen = sizeof(buf);
2693         char ref[30] = " show reference callgraph, ";
2694         bool enable_ref = false;
2695
2696         if (symbol_conf.filter_relative) {
2697                 nr_samples = hists->stats.nr_non_filtered_samples;
2698                 nr_events = hists->stats.total_non_filtered_period;
2699         }
2700
2701         if (evsel__is_group_event(evsel)) {
2702                 struct evsel *pos;
2703
2704                 evsel__group_desc(evsel, buf, buflen);
2705                 ev_name = buf;
2706
2707                 for_each_group_member(pos, evsel) {
2708                         struct hists *pos_hists = evsel__hists(pos);
2709
2710                         if (symbol_conf.filter_relative) {
2711                                 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2712                                 nr_events += pos_hists->stats.total_non_filtered_period;
2713                         } else {
2714                                 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2715                                 nr_events += pos_hists->stats.total_period;
2716                         }
2717                 }
2718         }
2719
2720         if (symbol_conf.show_ref_callgraph &&
2721             strstr(ev_name, "call-graph=no"))
2722                 enable_ref = true;
2723
2724         if (show_freq)
2725                 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2726
2727         nr_samples = convert_unit(nr_samples, &unit);
2728         printed = scnprintf(bf, size,
2729                            "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2730                            nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2731                            ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2732
2733
2734         if (hists->uid_filter_str)
2735                 printed += snprintf(bf + printed, size - printed,
2736                                     ", UID: %s", hists->uid_filter_str);
2737         if (thread) {
2738                 if (hists__has(hists, thread)) {
2739                         printed += scnprintf(bf + printed, size - printed,
2740                                     ", Thread: %s(%d)",
2741                                      (thread->comm_set ? thread__comm_str(thread) : ""),
2742                                     thread->tid);
2743                 } else {
2744                         printed += scnprintf(bf + printed, size - printed,
2745                                     ", Thread: %s",
2746                                      (thread->comm_set ? thread__comm_str(thread) : ""));
2747                 }
2748         }
2749         if (dso)
2750                 printed += scnprintf(bf + printed, size - printed,
2751                                     ", DSO: %s", dso->short_name);
2752         if (socket_id > -1)
2753                 printed += scnprintf(bf + printed, size - printed,
2754                                     ", Processor Socket: %d", socket_id);
2755
2756         return printed;
2757 }
2758
2759 int parse_filter_percentage(const struct option *opt __maybe_unused,
2760                             const char *arg, int unset __maybe_unused)
2761 {
2762         if (!strcmp(arg, "relative"))
2763                 symbol_conf.filter_relative = true;
2764         else if (!strcmp(arg, "absolute"))
2765                 symbol_conf.filter_relative = false;
2766         else {
2767                 pr_debug("Invalid percentage: %s\n", arg);
2768                 return -1;
2769         }
2770
2771         return 0;
2772 }
2773
2774 int perf_hist_config(const char *var, const char *value)
2775 {
2776         if (!strcmp(var, "hist.percentage"))
2777                 return parse_filter_percentage(NULL, value, 0);
2778
2779         return 0;
2780 }
2781
2782 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2783 {
2784         memset(hists, 0, sizeof(*hists));
2785         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2786         hists->entries_in = &hists->entries_in_array[0];
2787         hists->entries_collapsed = RB_ROOT_CACHED;
2788         hists->entries = RB_ROOT_CACHED;
2789         pthread_mutex_init(&hists->lock, NULL);
2790         hists->socket_filter = -1;
2791         hists->hpp_list = hpp_list;
2792         INIT_LIST_HEAD(&hists->hpp_formats);
2793         return 0;
2794 }
2795
2796 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2797 {
2798         struct rb_node *node;
2799         struct hist_entry *he;
2800
2801         while (!RB_EMPTY_ROOT(&root->rb_root)) {
2802                 node = rb_first_cached(root);
2803                 rb_erase_cached(node, root);
2804
2805                 he = rb_entry(node, struct hist_entry, rb_node_in);
2806                 hist_entry__delete(he);
2807         }
2808 }
2809
2810 static void hists__delete_all_entries(struct hists *hists)
2811 {
2812         hists__delete_entries(hists);
2813         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2814         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2815         hists__delete_remaining_entries(&hists->entries_collapsed);
2816 }
2817
2818 static void hists_evsel__exit(struct evsel *evsel)
2819 {
2820         struct hists *hists = evsel__hists(evsel);
2821         struct perf_hpp_fmt *fmt, *pos;
2822         struct perf_hpp_list_node *node, *tmp;
2823
2824         hists__delete_all_entries(hists);
2825
2826         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2827                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2828                         list_del_init(&fmt->list);
2829                         free(fmt);
2830                 }
2831                 list_del_init(&node->list);
2832                 free(node);
2833         }
2834 }
2835
2836 static int hists_evsel__init(struct evsel *evsel)
2837 {
2838         struct hists *hists = evsel__hists(evsel);
2839
2840         __hists__init(hists, &perf_hpp_list);
2841         return 0;
2842 }
2843
2844 /*
2845  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2846  * stored in the rbtree...
2847  */
2848
2849 int hists__init(void)
2850 {
2851         int err = evsel__object_config(sizeof(struct hists_evsel),
2852                                        hists_evsel__init, hists_evsel__exit);
2853         if (err)
2854                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2855
2856         return err;
2857 }
2858
2859 void perf_hpp_list__init(struct perf_hpp_list *list)
2860 {
2861         INIT_LIST_HEAD(&list->fields);
2862         INIT_LIST_HEAD(&list->sorts);
2863 }