Merge tag 'keystone_dts_for_5.7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / tools / perf / util / hist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "cgroup.h"
14 #include "sort.h"
15 #include "units.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "annotate.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "thread.h"
22 #include "block-info.h"
23 #include "ui/progress.h"
24 #include <errno.h>
25 #include <math.h>
26 #include <inttypes.h>
27 #include <sys/param.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/time64.h>
31 #include <linux/zalloc.h>
32
33 static bool hists__filter_entry_by_dso(struct hists *hists,
34                                        struct hist_entry *he);
35 static bool hists__filter_entry_by_thread(struct hists *hists,
36                                           struct hist_entry *he);
37 static bool hists__filter_entry_by_symbol(struct hists *hists,
38                                           struct hist_entry *he);
39 static bool hists__filter_entry_by_socket(struct hists *hists,
40                                           struct hist_entry *he);
41
42 u16 hists__col_len(struct hists *hists, enum hist_column col)
43 {
44         return hists->col_len[col];
45 }
46
47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
48 {
49         hists->col_len[col] = len;
50 }
51
52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
53 {
54         if (len > hists__col_len(hists, col)) {
55                 hists__set_col_len(hists, col, len);
56                 return true;
57         }
58         return false;
59 }
60
61 void hists__reset_col_len(struct hists *hists)
62 {
63         enum hist_column col;
64
65         for (col = 0; col < HISTC_NR_COLS; ++col)
66                 hists__set_col_len(hists, col, 0);
67 }
68
69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
70 {
71         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72
73         if (hists__col_len(hists, dso) < unresolved_col_width &&
74             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
75             !symbol_conf.dso_list)
76                 hists__set_col_len(hists, dso, unresolved_col_width);
77 }
78
79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
80 {
81         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
82         int symlen;
83         u16 len;
84
85         if (h->block_info)
86                 return;
87         /*
88          * +4 accounts for '[x] ' priv level info
89          * +2 accounts for 0x prefix on raw addresses
90          * +3 accounts for ' y ' symtab origin info
91          */
92         if (h->ms.sym) {
93                 symlen = h->ms.sym->namelen + 4;
94                 if (verbose > 0)
95                         symlen += BITS_PER_LONG / 4 + 2 + 3;
96                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
97         } else {
98                 symlen = unresolved_col_width + 4 + 2;
99                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
100                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
101         }
102
103         len = thread__comm_len(h->thread);
104         if (hists__new_col_len(hists, HISTC_COMM, len))
105                 hists__set_col_len(hists, HISTC_THREAD, len + 8);
106
107         if (h->ms.map) {
108                 len = dso__name_len(h->ms.map->dso);
109                 hists__new_col_len(hists, HISTC_DSO, len);
110         }
111
112         if (h->parent)
113                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
114
115         if (h->branch_info) {
116                 if (h->branch_info->from.ms.sym) {
117                         symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
118                         if (verbose > 0)
119                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
120                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
121
122                         symlen = dso__name_len(h->branch_info->from.ms.map->dso);
123                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
124                 } else {
125                         symlen = unresolved_col_width + 4 + 2;
126                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
127                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
128                 }
129
130                 if (h->branch_info->to.ms.sym) {
131                         symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
132                         if (verbose > 0)
133                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
134                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
135
136                         symlen = dso__name_len(h->branch_info->to.ms.map->dso);
137                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
138                 } else {
139                         symlen = unresolved_col_width + 4 + 2;
140                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
141                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
142                 }
143
144                 if (h->branch_info->srcline_from)
145                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
146                                         strlen(h->branch_info->srcline_from));
147                 if (h->branch_info->srcline_to)
148                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
149                                         strlen(h->branch_info->srcline_to));
150         }
151
152         if (h->mem_info) {
153                 if (h->mem_info->daddr.ms.sym) {
154                         symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
155                                + unresolved_col_width + 2;
156                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
157                                            symlen);
158                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
159                                            symlen + 1);
160                 } else {
161                         symlen = unresolved_col_width + 4 + 2;
162                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
163                                            symlen);
164                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
165                                            symlen);
166                 }
167
168                 if (h->mem_info->iaddr.ms.sym) {
169                         symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
170                                + unresolved_col_width + 2;
171                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
172                                            symlen);
173                 } else {
174                         symlen = unresolved_col_width + 4 + 2;
175                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
176                                            symlen);
177                 }
178
179                 if (h->mem_info->daddr.ms.map) {
180                         symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
181                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
182                                            symlen);
183                 } else {
184                         symlen = unresolved_col_width + 4 + 2;
185                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
186                 }
187
188                 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
189                                    unresolved_col_width + 4 + 2);
190
191         } else {
192                 symlen = unresolved_col_width + 4 + 2;
193                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
194                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
195                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
196         }
197
198         hists__new_col_len(hists, HISTC_CGROUP, 6);
199         hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
200         hists__new_col_len(hists, HISTC_CPU, 3);
201         hists__new_col_len(hists, HISTC_SOCKET, 6);
202         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
203         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
204         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
205         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
206         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
207         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
208         if (symbol_conf.nanosecs)
209                 hists__new_col_len(hists, HISTC_TIME, 16);
210         else
211                 hists__new_col_len(hists, HISTC_TIME, 12);
212
213         if (h->srcline) {
214                 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
215                 hists__new_col_len(hists, HISTC_SRCLINE, len);
216         }
217
218         if (h->srcfile)
219                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
220
221         if (h->transaction)
222                 hists__new_col_len(hists, HISTC_TRANSACTION,
223                                    hist_entry__transaction_len());
224
225         if (h->trace_output)
226                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
227
228         if (h->cgroup) {
229                 const char *cgrp_name = "unknown";
230                 struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
231                                                    h->cgroup);
232                 if (cgrp != NULL)
233                         cgrp_name = cgrp->name;
234
235                 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
236         }
237 }
238
239 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
240 {
241         struct rb_node *next = rb_first_cached(&hists->entries);
242         struct hist_entry *n;
243         int row = 0;
244
245         hists__reset_col_len(hists);
246
247         while (next && row++ < max_rows) {
248                 n = rb_entry(next, struct hist_entry, rb_node);
249                 if (!n->filtered)
250                         hists__calc_col_len(hists, n);
251                 next = rb_next(&n->rb_node);
252         }
253 }
254
255 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
256                                         unsigned int cpumode, u64 period)
257 {
258         switch (cpumode) {
259         case PERF_RECORD_MISC_KERNEL:
260                 he_stat->period_sys += period;
261                 break;
262         case PERF_RECORD_MISC_USER:
263                 he_stat->period_us += period;
264                 break;
265         case PERF_RECORD_MISC_GUEST_KERNEL:
266                 he_stat->period_guest_sys += period;
267                 break;
268         case PERF_RECORD_MISC_GUEST_USER:
269                 he_stat->period_guest_us += period;
270                 break;
271         default:
272                 break;
273         }
274 }
275
276 static long hist_time(unsigned long htime)
277 {
278         unsigned long time_quantum = symbol_conf.time_quantum;
279         if (time_quantum)
280                 return (htime / time_quantum) * time_quantum;
281         return htime;
282 }
283
284 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
285                                 u64 weight)
286 {
287
288         he_stat->period         += period;
289         he_stat->weight         += weight;
290         he_stat->nr_events      += 1;
291 }
292
293 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
294 {
295         dest->period            += src->period;
296         dest->period_sys        += src->period_sys;
297         dest->period_us         += src->period_us;
298         dest->period_guest_sys  += src->period_guest_sys;
299         dest->period_guest_us   += src->period_guest_us;
300         dest->nr_events         += src->nr_events;
301         dest->weight            += src->weight;
302 }
303
304 static void he_stat__decay(struct he_stat *he_stat)
305 {
306         he_stat->period = (he_stat->period * 7) / 8;
307         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
308         /* XXX need decay for weight too? */
309 }
310
311 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
312
313 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
314 {
315         u64 prev_period = he->stat.period;
316         u64 diff;
317
318         if (prev_period == 0)
319                 return true;
320
321         he_stat__decay(&he->stat);
322         if (symbol_conf.cumulate_callchain)
323                 he_stat__decay(he->stat_acc);
324         decay_callchain(he->callchain);
325
326         diff = prev_period - he->stat.period;
327
328         if (!he->depth) {
329                 hists->stats.total_period -= diff;
330                 if (!he->filtered)
331                         hists->stats.total_non_filtered_period -= diff;
332         }
333
334         if (!he->leaf) {
335                 struct hist_entry *child;
336                 struct rb_node *node = rb_first_cached(&he->hroot_out);
337                 while (node) {
338                         child = rb_entry(node, struct hist_entry, rb_node);
339                         node = rb_next(node);
340
341                         if (hists__decay_entry(hists, child))
342                                 hists__delete_entry(hists, child);
343                 }
344         }
345
346         return he->stat.period == 0;
347 }
348
349 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
350 {
351         struct rb_root_cached *root_in;
352         struct rb_root_cached *root_out;
353
354         if (he->parent_he) {
355                 root_in  = &he->parent_he->hroot_in;
356                 root_out = &he->parent_he->hroot_out;
357         } else {
358                 if (hists__has(hists, need_collapse))
359                         root_in = &hists->entries_collapsed;
360                 else
361                         root_in = hists->entries_in;
362                 root_out = &hists->entries;
363         }
364
365         rb_erase_cached(&he->rb_node_in, root_in);
366         rb_erase_cached(&he->rb_node, root_out);
367
368         --hists->nr_entries;
369         if (!he->filtered)
370                 --hists->nr_non_filtered_entries;
371
372         hist_entry__delete(he);
373 }
374
375 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
376 {
377         struct rb_node *next = rb_first_cached(&hists->entries);
378         struct hist_entry *n;
379
380         while (next) {
381                 n = rb_entry(next, struct hist_entry, rb_node);
382                 next = rb_next(&n->rb_node);
383                 if (((zap_user && n->level == '.') ||
384                      (zap_kernel && n->level != '.') ||
385                      hists__decay_entry(hists, n))) {
386                         hists__delete_entry(hists, n);
387                 }
388         }
389 }
390
391 void hists__delete_entries(struct hists *hists)
392 {
393         struct rb_node *next = rb_first_cached(&hists->entries);
394         struct hist_entry *n;
395
396         while (next) {
397                 n = rb_entry(next, struct hist_entry, rb_node);
398                 next = rb_next(&n->rb_node);
399
400                 hists__delete_entry(hists, n);
401         }
402 }
403
404 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
405 {
406         struct rb_node *next = rb_first_cached(&hists->entries);
407         struct hist_entry *n;
408         int i = 0;
409
410         while (next) {
411                 n = rb_entry(next, struct hist_entry, rb_node);
412                 if (i == idx)
413                         return n;
414
415                 next = rb_next(&n->rb_node);
416                 i++;
417         }
418
419         return NULL;
420 }
421
422 /*
423  * histogram, sorted on item, collects periods
424  */
425
426 static int hist_entry__init(struct hist_entry *he,
427                             struct hist_entry *template,
428                             bool sample_self,
429                             size_t callchain_size)
430 {
431         *he = *template;
432         he->callchain_size = callchain_size;
433
434         if (symbol_conf.cumulate_callchain) {
435                 he->stat_acc = malloc(sizeof(he->stat));
436                 if (he->stat_acc == NULL)
437                         return -ENOMEM;
438                 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
439                 if (!sample_self)
440                         memset(&he->stat, 0, sizeof(he->stat));
441         }
442
443         map__get(he->ms.map);
444
445         if (he->branch_info) {
446                 /*
447                  * This branch info is (a part of) allocated from
448                  * sample__resolve_bstack() and will be freed after
449                  * adding new entries.  So we need to save a copy.
450                  */
451                 he->branch_info = malloc(sizeof(*he->branch_info));
452                 if (he->branch_info == NULL)
453                         goto err;
454
455                 memcpy(he->branch_info, template->branch_info,
456                        sizeof(*he->branch_info));
457
458                 map__get(he->branch_info->from.ms.map);
459                 map__get(he->branch_info->to.ms.map);
460         }
461
462         if (he->mem_info) {
463                 map__get(he->mem_info->iaddr.ms.map);
464                 map__get(he->mem_info->daddr.ms.map);
465         }
466
467         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
468                 callchain_init(he->callchain);
469
470         if (he->raw_data) {
471                 he->raw_data = memdup(he->raw_data, he->raw_size);
472                 if (he->raw_data == NULL)
473                         goto err_infos;
474         }
475
476         if (he->srcline) {
477                 he->srcline = strdup(he->srcline);
478                 if (he->srcline == NULL)
479                         goto err_rawdata;
480         }
481
482         if (symbol_conf.res_sample) {
483                 he->res_samples = calloc(sizeof(struct res_sample),
484                                         symbol_conf.res_sample);
485                 if (!he->res_samples)
486                         goto err_srcline;
487         }
488
489         INIT_LIST_HEAD(&he->pairs.node);
490         thread__get(he->thread);
491         he->hroot_in  = RB_ROOT_CACHED;
492         he->hroot_out = RB_ROOT_CACHED;
493
494         if (!symbol_conf.report_hierarchy)
495                 he->leaf = true;
496
497         return 0;
498
499 err_srcline:
500         zfree(&he->srcline);
501
502 err_rawdata:
503         zfree(&he->raw_data);
504
505 err_infos:
506         if (he->branch_info) {
507                 map__put(he->branch_info->from.ms.map);
508                 map__put(he->branch_info->to.ms.map);
509                 zfree(&he->branch_info);
510         }
511         if (he->mem_info) {
512                 map__put(he->mem_info->iaddr.ms.map);
513                 map__put(he->mem_info->daddr.ms.map);
514         }
515 err:
516         map__zput(he->ms.map);
517         zfree(&he->stat_acc);
518         return -ENOMEM;
519 }
520
521 static void *hist_entry__zalloc(size_t size)
522 {
523         return zalloc(size + sizeof(struct hist_entry));
524 }
525
526 static void hist_entry__free(void *ptr)
527 {
528         free(ptr);
529 }
530
531 static struct hist_entry_ops default_ops = {
532         .new    = hist_entry__zalloc,
533         .free   = hist_entry__free,
534 };
535
536 static struct hist_entry *hist_entry__new(struct hist_entry *template,
537                                           bool sample_self)
538 {
539         struct hist_entry_ops *ops = template->ops;
540         size_t callchain_size = 0;
541         struct hist_entry *he;
542         int err = 0;
543
544         if (!ops)
545                 ops = template->ops = &default_ops;
546
547         if (symbol_conf.use_callchain)
548                 callchain_size = sizeof(struct callchain_root);
549
550         he = ops->new(callchain_size);
551         if (he) {
552                 err = hist_entry__init(he, template, sample_self, callchain_size);
553                 if (err) {
554                         ops->free(he);
555                         he = NULL;
556                 }
557         }
558
559         return he;
560 }
561
562 static u8 symbol__parent_filter(const struct symbol *parent)
563 {
564         if (symbol_conf.exclude_other && parent == NULL)
565                 return 1 << HIST_FILTER__PARENT;
566         return 0;
567 }
568
569 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
570 {
571         if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
572                 return;
573
574         he->hists->callchain_period += period;
575         if (!he->filtered)
576                 he->hists->callchain_non_filtered_period += period;
577 }
578
579 static struct hist_entry *hists__findnew_entry(struct hists *hists,
580                                                struct hist_entry *entry,
581                                                struct addr_location *al,
582                                                bool sample_self)
583 {
584         struct rb_node **p;
585         struct rb_node *parent = NULL;
586         struct hist_entry *he;
587         int64_t cmp;
588         u64 period = entry->stat.period;
589         u64 weight = entry->stat.weight;
590         bool leftmost = true;
591
592         p = &hists->entries_in->rb_root.rb_node;
593
594         while (*p != NULL) {
595                 parent = *p;
596                 he = rb_entry(parent, struct hist_entry, rb_node_in);
597
598                 /*
599                  * Make sure that it receives arguments in a same order as
600                  * hist_entry__collapse() so that we can use an appropriate
601                  * function when searching an entry regardless which sort
602                  * keys were used.
603                  */
604                 cmp = hist_entry__cmp(he, entry);
605
606                 if (!cmp) {
607                         if (sample_self) {
608                                 he_stat__add_period(&he->stat, period, weight);
609                                 hist_entry__add_callchain_period(he, period);
610                         }
611                         if (symbol_conf.cumulate_callchain)
612                                 he_stat__add_period(he->stat_acc, period, weight);
613
614                         /*
615                          * This mem info was allocated from sample__resolve_mem
616                          * and will not be used anymore.
617                          */
618                         mem_info__zput(entry->mem_info);
619
620                         block_info__zput(entry->block_info);
621
622                         /* If the map of an existing hist_entry has
623                          * become out-of-date due to an exec() or
624                          * similar, update it.  Otherwise we will
625                          * mis-adjust symbol addresses when computing
626                          * the history counter to increment.
627                          */
628                         if (he->ms.map != entry->ms.map) {
629                                 map__put(he->ms.map);
630                                 he->ms.map = map__get(entry->ms.map);
631                         }
632                         goto out;
633                 }
634
635                 if (cmp < 0)
636                         p = &(*p)->rb_left;
637                 else {
638                         p = &(*p)->rb_right;
639                         leftmost = false;
640                 }
641         }
642
643         he = hist_entry__new(entry, sample_self);
644         if (!he)
645                 return NULL;
646
647         if (sample_self)
648                 hist_entry__add_callchain_period(he, period);
649         hists->nr_entries++;
650
651         rb_link_node(&he->rb_node_in, parent, p);
652         rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
653 out:
654         if (sample_self)
655                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
656         if (symbol_conf.cumulate_callchain)
657                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
658         return he;
659 }
660
661 static unsigned random_max(unsigned high)
662 {
663         unsigned thresh = -high % high;
664         for (;;) {
665                 unsigned r = random();
666                 if (r >= thresh)
667                         return r % high;
668         }
669 }
670
671 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
672 {
673         struct res_sample *r;
674         int j;
675
676         if (he->num_res < symbol_conf.res_sample) {
677                 j = he->num_res++;
678         } else {
679                 j = random_max(symbol_conf.res_sample);
680         }
681         r = &he->res_samples[j];
682         r->time = sample->time;
683         r->cpu = sample->cpu;
684         r->tid = sample->tid;
685 }
686
687 static struct hist_entry*
688 __hists__add_entry(struct hists *hists,
689                    struct addr_location *al,
690                    struct symbol *sym_parent,
691                    struct branch_info *bi,
692                    struct mem_info *mi,
693                    struct block_info *block_info,
694                    struct perf_sample *sample,
695                    bool sample_self,
696                    struct hist_entry_ops *ops)
697 {
698         struct namespaces *ns = thread__namespaces(al->thread);
699         struct hist_entry entry = {
700                 .thread = al->thread,
701                 .comm = thread__comm(al->thread),
702                 .cgroup_id = {
703                         .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
704                         .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
705                 },
706                 .cgroup = sample->cgroup,
707                 .ms = {
708                         .maps   = al->maps,
709                         .map    = al->map,
710                         .sym    = al->sym,
711                 },
712                 .srcline = (char *) al->srcline,
713                 .socket  = al->socket,
714                 .cpu     = al->cpu,
715                 .cpumode = al->cpumode,
716                 .ip      = al->addr,
717                 .level   = al->level,
718                 .stat = {
719                         .nr_events = 1,
720                         .period = sample->period,
721                         .weight = sample->weight,
722                 },
723                 .parent = sym_parent,
724                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
725                 .hists  = hists,
726                 .branch_info = bi,
727                 .mem_info = mi,
728                 .block_info = block_info,
729                 .transaction = sample->transaction,
730                 .raw_data = sample->raw_data,
731                 .raw_size = sample->raw_size,
732                 .ops = ops,
733                 .time = hist_time(sample->time),
734         }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
735
736         if (!hists->has_callchains && he && he->callchain_size != 0)
737                 hists->has_callchains = true;
738         if (he && symbol_conf.res_sample)
739                 hists__res_sample(he, sample);
740         return he;
741 }
742
743 struct hist_entry *hists__add_entry(struct hists *hists,
744                                     struct addr_location *al,
745                                     struct symbol *sym_parent,
746                                     struct branch_info *bi,
747                                     struct mem_info *mi,
748                                     struct perf_sample *sample,
749                                     bool sample_self)
750 {
751         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
752                                   sample, sample_self, NULL);
753 }
754
755 struct hist_entry *hists__add_entry_ops(struct hists *hists,
756                                         struct hist_entry_ops *ops,
757                                         struct addr_location *al,
758                                         struct symbol *sym_parent,
759                                         struct branch_info *bi,
760                                         struct mem_info *mi,
761                                         struct perf_sample *sample,
762                                         bool sample_self)
763 {
764         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
765                                   sample, sample_self, ops);
766 }
767
768 struct hist_entry *hists__add_entry_block(struct hists *hists,
769                                           struct addr_location *al,
770                                           struct block_info *block_info)
771 {
772         struct hist_entry entry = {
773                 .block_info = block_info,
774                 .hists = hists,
775                 .ms = {
776                         .maps = al->maps,
777                         .map = al->map,
778                         .sym = al->sym,
779                 },
780         }, *he = hists__findnew_entry(hists, &entry, al, false);
781
782         return he;
783 }
784
785 static int
786 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
787                     struct addr_location *al __maybe_unused)
788 {
789         return 0;
790 }
791
792 static int
793 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
794                         struct addr_location *al __maybe_unused)
795 {
796         return 0;
797 }
798
799 static int
800 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
801 {
802         struct perf_sample *sample = iter->sample;
803         struct mem_info *mi;
804
805         mi = sample__resolve_mem(sample, al);
806         if (mi == NULL)
807                 return -ENOMEM;
808
809         iter->priv = mi;
810         return 0;
811 }
812
813 static int
814 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
815 {
816         u64 cost;
817         struct mem_info *mi = iter->priv;
818         struct hists *hists = evsel__hists(iter->evsel);
819         struct perf_sample *sample = iter->sample;
820         struct hist_entry *he;
821
822         if (mi == NULL)
823                 return -EINVAL;
824
825         cost = sample->weight;
826         if (!cost)
827                 cost = 1;
828
829         /*
830          * must pass period=weight in order to get the correct
831          * sorting from hists__collapse_resort() which is solely
832          * based on periods. We want sorting be done on nr_events * weight
833          * and this is indirectly achieved by passing period=weight here
834          * and the he_stat__add_period() function.
835          */
836         sample->period = cost;
837
838         he = hists__add_entry(hists, al, iter->parent, NULL, mi,
839                               sample, true);
840         if (!he)
841                 return -ENOMEM;
842
843         iter->he = he;
844         return 0;
845 }
846
847 static int
848 iter_finish_mem_entry(struct hist_entry_iter *iter,
849                       struct addr_location *al __maybe_unused)
850 {
851         struct evsel *evsel = iter->evsel;
852         struct hists *hists = evsel__hists(evsel);
853         struct hist_entry *he = iter->he;
854         int err = -EINVAL;
855
856         if (he == NULL)
857                 goto out;
858
859         hists__inc_nr_samples(hists, he->filtered);
860
861         err = hist_entry__append_callchain(he, iter->sample);
862
863 out:
864         /*
865          * We don't need to free iter->priv (mem_info) here since the mem info
866          * was either already freed in hists__findnew_entry() or passed to a
867          * new hist entry by hist_entry__new().
868          */
869         iter->priv = NULL;
870
871         iter->he = NULL;
872         return err;
873 }
874
875 static int
876 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
877 {
878         struct branch_info *bi;
879         struct perf_sample *sample = iter->sample;
880
881         bi = sample__resolve_bstack(sample, al);
882         if (!bi)
883                 return -ENOMEM;
884
885         iter->curr = 0;
886         iter->total = sample->branch_stack->nr;
887
888         iter->priv = bi;
889         return 0;
890 }
891
892 static int
893 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
894                              struct addr_location *al __maybe_unused)
895 {
896         return 0;
897 }
898
899 static int
900 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
901 {
902         struct branch_info *bi = iter->priv;
903         int i = iter->curr;
904
905         if (bi == NULL)
906                 return 0;
907
908         if (iter->curr >= iter->total)
909                 return 0;
910
911         al->maps = bi[i].to.ms.maps;
912         al->map = bi[i].to.ms.map;
913         al->sym = bi[i].to.ms.sym;
914         al->addr = bi[i].to.addr;
915         return 1;
916 }
917
918 static int
919 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
920 {
921         struct branch_info *bi;
922         struct evsel *evsel = iter->evsel;
923         struct hists *hists = evsel__hists(evsel);
924         struct perf_sample *sample = iter->sample;
925         struct hist_entry *he = NULL;
926         int i = iter->curr;
927         int err = 0;
928
929         bi = iter->priv;
930
931         if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
932                 goto out;
933
934         /*
935          * The report shows the percentage of total branches captured
936          * and not events sampled. Thus we use a pseudo period of 1.
937          */
938         sample->period = 1;
939         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
940
941         he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
942                               sample, true);
943         if (he == NULL)
944                 return -ENOMEM;
945
946         hists__inc_nr_samples(hists, he->filtered);
947
948 out:
949         iter->he = he;
950         iter->curr++;
951         return err;
952 }
953
954 static int
955 iter_finish_branch_entry(struct hist_entry_iter *iter,
956                          struct addr_location *al __maybe_unused)
957 {
958         zfree(&iter->priv);
959         iter->he = NULL;
960
961         return iter->curr >= iter->total ? 0 : -1;
962 }
963
964 static int
965 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
966                           struct addr_location *al __maybe_unused)
967 {
968         return 0;
969 }
970
971 static int
972 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
973 {
974         struct evsel *evsel = iter->evsel;
975         struct perf_sample *sample = iter->sample;
976         struct hist_entry *he;
977
978         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
979                               sample, true);
980         if (he == NULL)
981                 return -ENOMEM;
982
983         iter->he = he;
984         return 0;
985 }
986
987 static int
988 iter_finish_normal_entry(struct hist_entry_iter *iter,
989                          struct addr_location *al __maybe_unused)
990 {
991         struct hist_entry *he = iter->he;
992         struct evsel *evsel = iter->evsel;
993         struct perf_sample *sample = iter->sample;
994
995         if (he == NULL)
996                 return 0;
997
998         iter->he = NULL;
999
1000         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1001
1002         return hist_entry__append_callchain(he, sample);
1003 }
1004
1005 static int
1006 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1007                               struct addr_location *al __maybe_unused)
1008 {
1009         struct hist_entry **he_cache;
1010
1011         callchain_cursor_commit(&callchain_cursor);
1012
1013         /*
1014          * This is for detecting cycles or recursions so that they're
1015          * cumulated only one time to prevent entries more than 100%
1016          * overhead.
1017          */
1018         he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1019         if (he_cache == NULL)
1020                 return -ENOMEM;
1021
1022         iter->priv = he_cache;
1023         iter->curr = 0;
1024
1025         return 0;
1026 }
1027
1028 static int
1029 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1030                                  struct addr_location *al)
1031 {
1032         struct evsel *evsel = iter->evsel;
1033         struct hists *hists = evsel__hists(evsel);
1034         struct perf_sample *sample = iter->sample;
1035         struct hist_entry **he_cache = iter->priv;
1036         struct hist_entry *he;
1037         int err = 0;
1038
1039         he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1040                               sample, true);
1041         if (he == NULL)
1042                 return -ENOMEM;
1043
1044         iter->he = he;
1045         he_cache[iter->curr++] = he;
1046
1047         hist_entry__append_callchain(he, sample);
1048
1049         /*
1050          * We need to re-initialize the cursor since callchain_append()
1051          * advanced the cursor to the end.
1052          */
1053         callchain_cursor_commit(&callchain_cursor);
1054
1055         hists__inc_nr_samples(hists, he->filtered);
1056
1057         return err;
1058 }
1059
1060 static int
1061 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1062                            struct addr_location *al)
1063 {
1064         struct callchain_cursor_node *node;
1065
1066         node = callchain_cursor_current(&callchain_cursor);
1067         if (node == NULL)
1068                 return 0;
1069
1070         return fill_callchain_info(al, node, iter->hide_unresolved);
1071 }
1072
1073 static int
1074 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1075                                struct addr_location *al)
1076 {
1077         struct evsel *evsel = iter->evsel;
1078         struct perf_sample *sample = iter->sample;
1079         struct hist_entry **he_cache = iter->priv;
1080         struct hist_entry *he;
1081         struct hist_entry he_tmp = {
1082                 .hists = evsel__hists(evsel),
1083                 .cpu = al->cpu,
1084                 .thread = al->thread,
1085                 .comm = thread__comm(al->thread),
1086                 .ip = al->addr,
1087                 .ms = {
1088                         .maps = al->maps,
1089                         .map = al->map,
1090                         .sym = al->sym,
1091                 },
1092                 .srcline = (char *) al->srcline,
1093                 .parent = iter->parent,
1094                 .raw_data = sample->raw_data,
1095                 .raw_size = sample->raw_size,
1096         };
1097         int i;
1098         struct callchain_cursor cursor;
1099
1100         callchain_cursor_snapshot(&cursor, &callchain_cursor);
1101
1102         callchain_cursor_advance(&callchain_cursor);
1103
1104         /*
1105          * Check if there's duplicate entries in the callchain.
1106          * It's possible that it has cycles or recursive calls.
1107          */
1108         for (i = 0; i < iter->curr; i++) {
1109                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1110                         /* to avoid calling callback function */
1111                         iter->he = NULL;
1112                         return 0;
1113                 }
1114         }
1115
1116         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1117                               sample, false);
1118         if (he == NULL)
1119                 return -ENOMEM;
1120
1121         iter->he = he;
1122         he_cache[iter->curr++] = he;
1123
1124         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1125                 callchain_append(he->callchain, &cursor, sample->period);
1126         return 0;
1127 }
1128
1129 static int
1130 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1131                              struct addr_location *al __maybe_unused)
1132 {
1133         zfree(&iter->priv);
1134         iter->he = NULL;
1135
1136         return 0;
1137 }
1138
1139 const struct hist_iter_ops hist_iter_mem = {
1140         .prepare_entry          = iter_prepare_mem_entry,
1141         .add_single_entry       = iter_add_single_mem_entry,
1142         .next_entry             = iter_next_nop_entry,
1143         .add_next_entry         = iter_add_next_nop_entry,
1144         .finish_entry           = iter_finish_mem_entry,
1145 };
1146
1147 const struct hist_iter_ops hist_iter_branch = {
1148         .prepare_entry          = iter_prepare_branch_entry,
1149         .add_single_entry       = iter_add_single_branch_entry,
1150         .next_entry             = iter_next_branch_entry,
1151         .add_next_entry         = iter_add_next_branch_entry,
1152         .finish_entry           = iter_finish_branch_entry,
1153 };
1154
1155 const struct hist_iter_ops hist_iter_normal = {
1156         .prepare_entry          = iter_prepare_normal_entry,
1157         .add_single_entry       = iter_add_single_normal_entry,
1158         .next_entry             = iter_next_nop_entry,
1159         .add_next_entry         = iter_add_next_nop_entry,
1160         .finish_entry           = iter_finish_normal_entry,
1161 };
1162
1163 const struct hist_iter_ops hist_iter_cumulative = {
1164         .prepare_entry          = iter_prepare_cumulative_entry,
1165         .add_single_entry       = iter_add_single_cumulative_entry,
1166         .next_entry             = iter_next_cumulative_entry,
1167         .add_next_entry         = iter_add_next_cumulative_entry,
1168         .finish_entry           = iter_finish_cumulative_entry,
1169 };
1170
1171 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1172                          int max_stack_depth, void *arg)
1173 {
1174         int err, err2;
1175         struct map *alm = NULL;
1176
1177         if (al)
1178                 alm = map__get(al->map);
1179
1180         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1181                                         iter->evsel, al, max_stack_depth);
1182         if (err) {
1183                 map__put(alm);
1184                 return err;
1185         }
1186
1187         err = iter->ops->prepare_entry(iter, al);
1188         if (err)
1189                 goto out;
1190
1191         err = iter->ops->add_single_entry(iter, al);
1192         if (err)
1193                 goto out;
1194
1195         if (iter->he && iter->add_entry_cb) {
1196                 err = iter->add_entry_cb(iter, al, true, arg);
1197                 if (err)
1198                         goto out;
1199         }
1200
1201         while (iter->ops->next_entry(iter, al)) {
1202                 err = iter->ops->add_next_entry(iter, al);
1203                 if (err)
1204                         break;
1205
1206                 if (iter->he && iter->add_entry_cb) {
1207                         err = iter->add_entry_cb(iter, al, false, arg);
1208                         if (err)
1209                                 goto out;
1210                 }
1211         }
1212
1213 out:
1214         err2 = iter->ops->finish_entry(iter, al);
1215         if (!err)
1216                 err = err2;
1217
1218         map__put(alm);
1219
1220         return err;
1221 }
1222
1223 int64_t
1224 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1225 {
1226         struct hists *hists = left->hists;
1227         struct perf_hpp_fmt *fmt;
1228         int64_t cmp = 0;
1229
1230         hists__for_each_sort_list(hists, fmt) {
1231                 if (perf_hpp__is_dynamic_entry(fmt) &&
1232                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1233                         continue;
1234
1235                 cmp = fmt->cmp(fmt, left, right);
1236                 if (cmp)
1237                         break;
1238         }
1239
1240         return cmp;
1241 }
1242
1243 int64_t
1244 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1245 {
1246         struct hists *hists = left->hists;
1247         struct perf_hpp_fmt *fmt;
1248         int64_t cmp = 0;
1249
1250         hists__for_each_sort_list(hists, fmt) {
1251                 if (perf_hpp__is_dynamic_entry(fmt) &&
1252                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1253                         continue;
1254
1255                 cmp = fmt->collapse(fmt, left, right);
1256                 if (cmp)
1257                         break;
1258         }
1259
1260         return cmp;
1261 }
1262
1263 void hist_entry__delete(struct hist_entry *he)
1264 {
1265         struct hist_entry_ops *ops = he->ops;
1266
1267         thread__zput(he->thread);
1268         map__zput(he->ms.map);
1269
1270         if (he->branch_info) {
1271                 map__zput(he->branch_info->from.ms.map);
1272                 map__zput(he->branch_info->to.ms.map);
1273                 free_srcline(he->branch_info->srcline_from);
1274                 free_srcline(he->branch_info->srcline_to);
1275                 zfree(&he->branch_info);
1276         }
1277
1278         if (he->mem_info) {
1279                 map__zput(he->mem_info->iaddr.ms.map);
1280                 map__zput(he->mem_info->daddr.ms.map);
1281                 mem_info__zput(he->mem_info);
1282         }
1283
1284         if (he->block_info)
1285                 block_info__zput(he->block_info);
1286
1287         zfree(&he->res_samples);
1288         zfree(&he->stat_acc);
1289         free_srcline(he->srcline);
1290         if (he->srcfile && he->srcfile[0])
1291                 zfree(&he->srcfile);
1292         free_callchain(he->callchain);
1293         zfree(&he->trace_output);
1294         zfree(&he->raw_data);
1295         ops->free(he);
1296 }
1297
1298 /*
1299  * If this is not the last column, then we need to pad it according to the
1300  * pre-calculated max length for this column, otherwise don't bother adding
1301  * spaces because that would break viewing this with, for instance, 'less',
1302  * that would show tons of trailing spaces when a long C++ demangled method
1303  * names is sampled.
1304 */
1305 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1306                                    struct perf_hpp_fmt *fmt, int printed)
1307 {
1308         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1309                 const int width = fmt->width(fmt, hpp, he->hists);
1310                 if (printed < width) {
1311                         advance_hpp(hpp, printed);
1312                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1313                 }
1314         }
1315
1316         return printed;
1317 }
1318
1319 /*
1320  * collapse the histogram
1321  */
1322
1323 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1324 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1325                                        enum hist_filter type);
1326
1327 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1328
1329 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1330 {
1331         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1332 }
1333
1334 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1335                                                 enum hist_filter type,
1336                                                 fmt_chk_fn check)
1337 {
1338         struct perf_hpp_fmt *fmt;
1339         bool type_match = false;
1340         struct hist_entry *parent = he->parent_he;
1341
1342         switch (type) {
1343         case HIST_FILTER__THREAD:
1344                 if (symbol_conf.comm_list == NULL &&
1345                     symbol_conf.pid_list == NULL &&
1346                     symbol_conf.tid_list == NULL)
1347                         return;
1348                 break;
1349         case HIST_FILTER__DSO:
1350                 if (symbol_conf.dso_list == NULL)
1351                         return;
1352                 break;
1353         case HIST_FILTER__SYMBOL:
1354                 if (symbol_conf.sym_list == NULL)
1355                         return;
1356                 break;
1357         case HIST_FILTER__PARENT:
1358         case HIST_FILTER__GUEST:
1359         case HIST_FILTER__HOST:
1360         case HIST_FILTER__SOCKET:
1361         case HIST_FILTER__C2C:
1362         default:
1363                 return;
1364         }
1365
1366         /* if it's filtered by own fmt, it has to have filter bits */
1367         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1368                 if (check(fmt)) {
1369                         type_match = true;
1370                         break;
1371                 }
1372         }
1373
1374         if (type_match) {
1375                 /*
1376                  * If the filter is for current level entry, propagate
1377                  * filter marker to parents.  The marker bit was
1378                  * already set by default so it only needs to clear
1379                  * non-filtered entries.
1380                  */
1381                 if (!(he->filtered & (1 << type))) {
1382                         while (parent) {
1383                                 parent->filtered &= ~(1 << type);
1384                                 parent = parent->parent_he;
1385                         }
1386                 }
1387         } else {
1388                 /*
1389                  * If current entry doesn't have matching formats, set
1390                  * filter marker for upper level entries.  it will be
1391                  * cleared if its lower level entries is not filtered.
1392                  *
1393                  * For lower-level entries, it inherits parent's
1394                  * filter bit so that lower level entries of a
1395                  * non-filtered entry won't set the filter marker.
1396                  */
1397                 if (parent == NULL)
1398                         he->filtered |= (1 << type);
1399                 else
1400                         he->filtered |= (parent->filtered & (1 << type));
1401         }
1402 }
1403
1404 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1405 {
1406         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1407                                             check_thread_entry);
1408
1409         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1410                                             perf_hpp__is_dso_entry);
1411
1412         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1413                                             perf_hpp__is_sym_entry);
1414
1415         hists__apply_filters(he->hists, he);
1416 }
1417
1418 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1419                                                  struct rb_root_cached *root,
1420                                                  struct hist_entry *he,
1421                                                  struct hist_entry *parent_he,
1422                                                  struct perf_hpp_list *hpp_list)
1423 {
1424         struct rb_node **p = &root->rb_root.rb_node;
1425         struct rb_node *parent = NULL;
1426         struct hist_entry *iter, *new;
1427         struct perf_hpp_fmt *fmt;
1428         int64_t cmp;
1429         bool leftmost = true;
1430
1431         while (*p != NULL) {
1432                 parent = *p;
1433                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1434
1435                 cmp = 0;
1436                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1437                         cmp = fmt->collapse(fmt, iter, he);
1438                         if (cmp)
1439                                 break;
1440                 }
1441
1442                 if (!cmp) {
1443                         he_stat__add_stat(&iter->stat, &he->stat);
1444                         return iter;
1445                 }
1446
1447                 if (cmp < 0)
1448                         p = &parent->rb_left;
1449                 else {
1450                         p = &parent->rb_right;
1451                         leftmost = false;
1452                 }
1453         }
1454
1455         new = hist_entry__new(he, true);
1456         if (new == NULL)
1457                 return NULL;
1458
1459         hists->nr_entries++;
1460
1461         /* save related format list for output */
1462         new->hpp_list = hpp_list;
1463         new->parent_he = parent_he;
1464
1465         hist_entry__apply_hierarchy_filters(new);
1466
1467         /* some fields are now passed to 'new' */
1468         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1469                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1470                         he->trace_output = NULL;
1471                 else
1472                         new->trace_output = NULL;
1473
1474                 if (perf_hpp__is_srcline_entry(fmt))
1475                         he->srcline = NULL;
1476                 else
1477                         new->srcline = NULL;
1478
1479                 if (perf_hpp__is_srcfile_entry(fmt))
1480                         he->srcfile = NULL;
1481                 else
1482                         new->srcfile = NULL;
1483         }
1484
1485         rb_link_node(&new->rb_node_in, parent, p);
1486         rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1487         return new;
1488 }
1489
1490 static int hists__hierarchy_insert_entry(struct hists *hists,
1491                                          struct rb_root_cached *root,
1492                                          struct hist_entry *he)
1493 {
1494         struct perf_hpp_list_node *node;
1495         struct hist_entry *new_he = NULL;
1496         struct hist_entry *parent = NULL;
1497         int depth = 0;
1498         int ret = 0;
1499
1500         list_for_each_entry(node, &hists->hpp_formats, list) {
1501                 /* skip period (overhead) and elided columns */
1502                 if (node->level == 0 || node->skip)
1503                         continue;
1504
1505                 /* insert copy of 'he' for each fmt into the hierarchy */
1506                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1507                 if (new_he == NULL) {
1508                         ret = -1;
1509                         break;
1510                 }
1511
1512                 root = &new_he->hroot_in;
1513                 new_he->depth = depth++;
1514                 parent = new_he;
1515         }
1516
1517         if (new_he) {
1518                 new_he->leaf = true;
1519
1520                 if (hist_entry__has_callchains(new_he) &&
1521                     symbol_conf.use_callchain) {
1522                         callchain_cursor_reset(&callchain_cursor);
1523                         if (callchain_merge(&callchain_cursor,
1524                                             new_he->callchain,
1525                                             he->callchain) < 0)
1526                                 ret = -1;
1527                 }
1528         }
1529
1530         /* 'he' is no longer used */
1531         hist_entry__delete(he);
1532
1533         /* return 0 (or -1) since it already applied filters */
1534         return ret;
1535 }
1536
1537 static int hists__collapse_insert_entry(struct hists *hists,
1538                                         struct rb_root_cached *root,
1539                                         struct hist_entry *he)
1540 {
1541         struct rb_node **p = &root->rb_root.rb_node;
1542         struct rb_node *parent = NULL;
1543         struct hist_entry *iter;
1544         int64_t cmp;
1545         bool leftmost = true;
1546
1547         if (symbol_conf.report_hierarchy)
1548                 return hists__hierarchy_insert_entry(hists, root, he);
1549
1550         while (*p != NULL) {
1551                 parent = *p;
1552                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1553
1554                 cmp = hist_entry__collapse(iter, he);
1555
1556                 if (!cmp) {
1557                         int ret = 0;
1558
1559                         he_stat__add_stat(&iter->stat, &he->stat);
1560                         if (symbol_conf.cumulate_callchain)
1561                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1562
1563                         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1564                                 callchain_cursor_reset(&callchain_cursor);
1565                                 if (callchain_merge(&callchain_cursor,
1566                                                     iter->callchain,
1567                                                     he->callchain) < 0)
1568                                         ret = -1;
1569                         }
1570                         hist_entry__delete(he);
1571                         return ret;
1572                 }
1573
1574                 if (cmp < 0)
1575                         p = &(*p)->rb_left;
1576                 else {
1577                         p = &(*p)->rb_right;
1578                         leftmost = false;
1579                 }
1580         }
1581         hists->nr_entries++;
1582
1583         rb_link_node(&he->rb_node_in, parent, p);
1584         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1585         return 1;
1586 }
1587
1588 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1589 {
1590         struct rb_root_cached *root;
1591
1592         pthread_mutex_lock(&hists->lock);
1593
1594         root = hists->entries_in;
1595         if (++hists->entries_in > &hists->entries_in_array[1])
1596                 hists->entries_in = &hists->entries_in_array[0];
1597
1598         pthread_mutex_unlock(&hists->lock);
1599
1600         return root;
1601 }
1602
1603 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1604 {
1605         hists__filter_entry_by_dso(hists, he);
1606         hists__filter_entry_by_thread(hists, he);
1607         hists__filter_entry_by_symbol(hists, he);
1608         hists__filter_entry_by_socket(hists, he);
1609 }
1610
1611 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1612 {
1613         struct rb_root_cached *root;
1614         struct rb_node *next;
1615         struct hist_entry *n;
1616         int ret;
1617
1618         if (!hists__has(hists, need_collapse))
1619                 return 0;
1620
1621         hists->nr_entries = 0;
1622
1623         root = hists__get_rotate_entries_in(hists);
1624
1625         next = rb_first_cached(root);
1626
1627         while (next) {
1628                 if (session_done())
1629                         break;
1630                 n = rb_entry(next, struct hist_entry, rb_node_in);
1631                 next = rb_next(&n->rb_node_in);
1632
1633                 rb_erase_cached(&n->rb_node_in, root);
1634                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1635                 if (ret < 0)
1636                         return -1;
1637
1638                 if (ret) {
1639                         /*
1640                          * If it wasn't combined with one of the entries already
1641                          * collapsed, we need to apply the filters that may have
1642                          * been set by, say, the hist_browser.
1643                          */
1644                         hists__apply_filters(hists, n);
1645                 }
1646                 if (prog)
1647                         ui_progress__update(prog, 1);
1648         }
1649         return 0;
1650 }
1651
1652 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1653 {
1654         struct hists *hists = a->hists;
1655         struct perf_hpp_fmt *fmt;
1656         int64_t cmp = 0;
1657
1658         hists__for_each_sort_list(hists, fmt) {
1659                 if (perf_hpp__should_skip(fmt, a->hists))
1660                         continue;
1661
1662                 cmp = fmt->sort(fmt, a, b);
1663                 if (cmp)
1664                         break;
1665         }
1666
1667         return cmp;
1668 }
1669
1670 static void hists__reset_filter_stats(struct hists *hists)
1671 {
1672         hists->nr_non_filtered_entries = 0;
1673         hists->stats.total_non_filtered_period = 0;
1674 }
1675
1676 void hists__reset_stats(struct hists *hists)
1677 {
1678         hists->nr_entries = 0;
1679         hists->stats.total_period = 0;
1680
1681         hists__reset_filter_stats(hists);
1682 }
1683
1684 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1685 {
1686         hists->nr_non_filtered_entries++;
1687         hists->stats.total_non_filtered_period += h->stat.period;
1688 }
1689
1690 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1691 {
1692         if (!h->filtered)
1693                 hists__inc_filter_stats(hists, h);
1694
1695         hists->nr_entries++;
1696         hists->stats.total_period += h->stat.period;
1697 }
1698
1699 static void hierarchy_recalc_total_periods(struct hists *hists)
1700 {
1701         struct rb_node *node;
1702         struct hist_entry *he;
1703
1704         node = rb_first_cached(&hists->entries);
1705
1706         hists->stats.total_period = 0;
1707         hists->stats.total_non_filtered_period = 0;
1708
1709         /*
1710          * recalculate total period using top-level entries only
1711          * since lower level entries only see non-filtered entries
1712          * but upper level entries have sum of both entries.
1713          */
1714         while (node) {
1715                 he = rb_entry(node, struct hist_entry, rb_node);
1716                 node = rb_next(node);
1717
1718                 hists->stats.total_period += he->stat.period;
1719                 if (!he->filtered)
1720                         hists->stats.total_non_filtered_period += he->stat.period;
1721         }
1722 }
1723
1724 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1725                                           struct hist_entry *he)
1726 {
1727         struct rb_node **p = &root->rb_root.rb_node;
1728         struct rb_node *parent = NULL;
1729         struct hist_entry *iter;
1730         struct perf_hpp_fmt *fmt;
1731         bool leftmost = true;
1732
1733         while (*p != NULL) {
1734                 parent = *p;
1735                 iter = rb_entry(parent, struct hist_entry, rb_node);
1736
1737                 if (hist_entry__sort(he, iter) > 0)
1738                         p = &parent->rb_left;
1739                 else {
1740                         p = &parent->rb_right;
1741                         leftmost = false;
1742                 }
1743         }
1744
1745         rb_link_node(&he->rb_node, parent, p);
1746         rb_insert_color_cached(&he->rb_node, root, leftmost);
1747
1748         /* update column width of dynamic entry */
1749         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1750                 if (perf_hpp__is_dynamic_entry(fmt))
1751                         fmt->sort(fmt, he, NULL);
1752         }
1753 }
1754
1755 static void hists__hierarchy_output_resort(struct hists *hists,
1756                                            struct ui_progress *prog,
1757                                            struct rb_root_cached *root_in,
1758                                            struct rb_root_cached *root_out,
1759                                            u64 min_callchain_hits,
1760                                            bool use_callchain)
1761 {
1762         struct rb_node *node;
1763         struct hist_entry *he;
1764
1765         *root_out = RB_ROOT_CACHED;
1766         node = rb_first_cached(root_in);
1767
1768         while (node) {
1769                 he = rb_entry(node, struct hist_entry, rb_node_in);
1770                 node = rb_next(node);
1771
1772                 hierarchy_insert_output_entry(root_out, he);
1773
1774                 if (prog)
1775                         ui_progress__update(prog, 1);
1776
1777                 hists->nr_entries++;
1778                 if (!he->filtered) {
1779                         hists->nr_non_filtered_entries++;
1780                         hists__calc_col_len(hists, he);
1781                 }
1782
1783                 if (!he->leaf) {
1784                         hists__hierarchy_output_resort(hists, prog,
1785                                                        &he->hroot_in,
1786                                                        &he->hroot_out,
1787                                                        min_callchain_hits,
1788                                                        use_callchain);
1789                         continue;
1790                 }
1791
1792                 if (!use_callchain)
1793                         continue;
1794
1795                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1796                         u64 total = he->stat.period;
1797
1798                         if (symbol_conf.cumulate_callchain)
1799                                 total = he->stat_acc->period;
1800
1801                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1802                 }
1803
1804                 callchain_param.sort(&he->sorted_chain, he->callchain,
1805                                      min_callchain_hits, &callchain_param);
1806         }
1807 }
1808
1809 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1810                                          struct hist_entry *he,
1811                                          u64 min_callchain_hits,
1812                                          bool use_callchain)
1813 {
1814         struct rb_node **p = &entries->rb_root.rb_node;
1815         struct rb_node *parent = NULL;
1816         struct hist_entry *iter;
1817         struct perf_hpp_fmt *fmt;
1818         bool leftmost = true;
1819
1820         if (use_callchain) {
1821                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1822                         u64 total = he->stat.period;
1823
1824                         if (symbol_conf.cumulate_callchain)
1825                                 total = he->stat_acc->period;
1826
1827                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1828                 }
1829                 callchain_param.sort(&he->sorted_chain, he->callchain,
1830                                       min_callchain_hits, &callchain_param);
1831         }
1832
1833         while (*p != NULL) {
1834                 parent = *p;
1835                 iter = rb_entry(parent, struct hist_entry, rb_node);
1836
1837                 if (hist_entry__sort(he, iter) > 0)
1838                         p = &(*p)->rb_left;
1839                 else {
1840                         p = &(*p)->rb_right;
1841                         leftmost = false;
1842                 }
1843         }
1844
1845         rb_link_node(&he->rb_node, parent, p);
1846         rb_insert_color_cached(&he->rb_node, entries, leftmost);
1847
1848         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1849                 if (perf_hpp__is_dynamic_entry(fmt) &&
1850                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1851                         fmt->sort(fmt, he, NULL);  /* update column width */
1852         }
1853 }
1854
1855 static void output_resort(struct hists *hists, struct ui_progress *prog,
1856                           bool use_callchain, hists__resort_cb_t cb,
1857                           void *cb_arg)
1858 {
1859         struct rb_root_cached *root;
1860         struct rb_node *next;
1861         struct hist_entry *n;
1862         u64 callchain_total;
1863         u64 min_callchain_hits;
1864
1865         callchain_total = hists->callchain_period;
1866         if (symbol_conf.filter_relative)
1867                 callchain_total = hists->callchain_non_filtered_period;
1868
1869         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1870
1871         hists__reset_stats(hists);
1872         hists__reset_col_len(hists);
1873
1874         if (symbol_conf.report_hierarchy) {
1875                 hists__hierarchy_output_resort(hists, prog,
1876                                                &hists->entries_collapsed,
1877                                                &hists->entries,
1878                                                min_callchain_hits,
1879                                                use_callchain);
1880                 hierarchy_recalc_total_periods(hists);
1881                 return;
1882         }
1883
1884         if (hists__has(hists, need_collapse))
1885                 root = &hists->entries_collapsed;
1886         else
1887                 root = hists->entries_in;
1888
1889         next = rb_first_cached(root);
1890         hists->entries = RB_ROOT_CACHED;
1891
1892         while (next) {
1893                 n = rb_entry(next, struct hist_entry, rb_node_in);
1894                 next = rb_next(&n->rb_node_in);
1895
1896                 if (cb && cb(n, cb_arg))
1897                         continue;
1898
1899                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1900                 hists__inc_stats(hists, n);
1901
1902                 if (!n->filtered)
1903                         hists__calc_col_len(hists, n);
1904
1905                 if (prog)
1906                         ui_progress__update(prog, 1);
1907         }
1908 }
1909
1910 void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1911                                   hists__resort_cb_t cb, void *cb_arg)
1912 {
1913         bool use_callchain;
1914
1915         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1916                 use_callchain = evsel__has_callchain(evsel);
1917         else
1918                 use_callchain = symbol_conf.use_callchain;
1919
1920         use_callchain |= symbol_conf.show_branchflag_count;
1921
1922         output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1923 }
1924
1925 void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1926 {
1927         return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
1928 }
1929
1930 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1931 {
1932         output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1933 }
1934
1935 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1936                              hists__resort_cb_t cb)
1937 {
1938         output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1939 }
1940
1941 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1942 {
1943         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1944                 return false;
1945
1946         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1947                 return true;
1948
1949         return false;
1950 }
1951
1952 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1953 {
1954         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1955
1956         while (can_goto_child(he, HMD_NORMAL)) {
1957                 node = rb_last(&he->hroot_out.rb_root);
1958                 he = rb_entry(node, struct hist_entry, rb_node);
1959         }
1960         return node;
1961 }
1962
1963 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1964 {
1965         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1966
1967         if (can_goto_child(he, hmd))
1968                 node = rb_first_cached(&he->hroot_out);
1969         else
1970                 node = rb_next(node);
1971
1972         while (node == NULL) {
1973                 he = he->parent_he;
1974                 if (he == NULL)
1975                         break;
1976
1977                 node = rb_next(&he->rb_node);
1978         }
1979         return node;
1980 }
1981
1982 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1983 {
1984         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1985
1986         node = rb_prev(node);
1987         if (node)
1988                 return rb_hierarchy_last(node);
1989
1990         he = he->parent_he;
1991         if (he == NULL)
1992                 return NULL;
1993
1994         return &he->rb_node;
1995 }
1996
1997 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1998 {
1999         struct rb_node *node;
2000         struct hist_entry *child;
2001         float percent;
2002
2003         if (he->leaf)
2004                 return false;
2005
2006         node = rb_first_cached(&he->hroot_out);
2007         child = rb_entry(node, struct hist_entry, rb_node);
2008
2009         while (node && child->filtered) {
2010                 node = rb_next(node);
2011                 child = rb_entry(node, struct hist_entry, rb_node);
2012         }
2013
2014         if (node)
2015                 percent = hist_entry__get_percent_limit(child);
2016         else
2017                 percent = 0;
2018
2019         return node && percent >= limit;
2020 }
2021
2022 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2023                                        enum hist_filter filter)
2024 {
2025         h->filtered &= ~(1 << filter);
2026
2027         if (symbol_conf.report_hierarchy) {
2028                 struct hist_entry *parent = h->parent_he;
2029
2030                 while (parent) {
2031                         he_stat__add_stat(&parent->stat, &h->stat);
2032
2033                         parent->filtered &= ~(1 << filter);
2034
2035                         if (parent->filtered)
2036                                 goto next;
2037
2038                         /* force fold unfiltered entry for simplicity */
2039                         parent->unfolded = false;
2040                         parent->has_no_entry = false;
2041                         parent->row_offset = 0;
2042                         parent->nr_rows = 0;
2043 next:
2044                         parent = parent->parent_he;
2045                 }
2046         }
2047
2048         if (h->filtered)
2049                 return;
2050
2051         /* force fold unfiltered entry for simplicity */
2052         h->unfolded = false;
2053         h->has_no_entry = false;
2054         h->row_offset = 0;
2055         h->nr_rows = 0;
2056
2057         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2058
2059         hists__inc_filter_stats(hists, h);
2060         hists__calc_col_len(hists, h);
2061 }
2062
2063
2064 static bool hists__filter_entry_by_dso(struct hists *hists,
2065                                        struct hist_entry *he)
2066 {
2067         if (hists->dso_filter != NULL &&
2068             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2069                 he->filtered |= (1 << HIST_FILTER__DSO);
2070                 return true;
2071         }
2072
2073         return false;
2074 }
2075
2076 static bool hists__filter_entry_by_thread(struct hists *hists,
2077                                           struct hist_entry *he)
2078 {
2079         if (hists->thread_filter != NULL &&
2080             he->thread != hists->thread_filter) {
2081                 he->filtered |= (1 << HIST_FILTER__THREAD);
2082                 return true;
2083         }
2084
2085         return false;
2086 }
2087
2088 static bool hists__filter_entry_by_symbol(struct hists *hists,
2089                                           struct hist_entry *he)
2090 {
2091         if (hists->symbol_filter_str != NULL &&
2092             (!he->ms.sym || strstr(he->ms.sym->name,
2093                                    hists->symbol_filter_str) == NULL)) {
2094                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2095                 return true;
2096         }
2097
2098         return false;
2099 }
2100
2101 static bool hists__filter_entry_by_socket(struct hists *hists,
2102                                           struct hist_entry *he)
2103 {
2104         if ((hists->socket_filter > -1) &&
2105             (he->socket != hists->socket_filter)) {
2106                 he->filtered |= (1 << HIST_FILTER__SOCKET);
2107                 return true;
2108         }
2109
2110         return false;
2111 }
2112
2113 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2114
2115 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2116 {
2117         struct rb_node *nd;
2118
2119         hists->stats.nr_non_filtered_samples = 0;
2120
2121         hists__reset_filter_stats(hists);
2122         hists__reset_col_len(hists);
2123
2124         for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2125                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2126
2127                 if (filter(hists, h))
2128                         continue;
2129
2130                 hists__remove_entry_filter(hists, h, type);
2131         }
2132 }
2133
2134 static void resort_filtered_entry(struct rb_root_cached *root,
2135                                   struct hist_entry *he)
2136 {
2137         struct rb_node **p = &root->rb_root.rb_node;
2138         struct rb_node *parent = NULL;
2139         struct hist_entry *iter;
2140         struct rb_root_cached new_root = RB_ROOT_CACHED;
2141         struct rb_node *nd;
2142         bool leftmost = true;
2143
2144         while (*p != NULL) {
2145                 parent = *p;
2146                 iter = rb_entry(parent, struct hist_entry, rb_node);
2147
2148                 if (hist_entry__sort(he, iter) > 0)
2149                         p = &(*p)->rb_left;
2150                 else {
2151                         p = &(*p)->rb_right;
2152                         leftmost = false;
2153                 }
2154         }
2155
2156         rb_link_node(&he->rb_node, parent, p);
2157         rb_insert_color_cached(&he->rb_node, root, leftmost);
2158
2159         if (he->leaf || he->filtered)
2160                 return;
2161
2162         nd = rb_first_cached(&he->hroot_out);
2163         while (nd) {
2164                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2165
2166                 nd = rb_next(nd);
2167                 rb_erase_cached(&h->rb_node, &he->hroot_out);
2168
2169                 resort_filtered_entry(&new_root, h);
2170         }
2171
2172         he->hroot_out = new_root;
2173 }
2174
2175 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2176 {
2177         struct rb_node *nd;
2178         struct rb_root_cached new_root = RB_ROOT_CACHED;
2179
2180         hists->stats.nr_non_filtered_samples = 0;
2181
2182         hists__reset_filter_stats(hists);
2183         hists__reset_col_len(hists);
2184
2185         nd = rb_first_cached(&hists->entries);
2186         while (nd) {
2187                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2188                 int ret;
2189
2190                 ret = hist_entry__filter(h, type, arg);
2191
2192                 /*
2193                  * case 1. non-matching type
2194                  * zero out the period, set filter marker and move to child
2195                  */
2196                 if (ret < 0) {
2197                         memset(&h->stat, 0, sizeof(h->stat));
2198                         h->filtered |= (1 << type);
2199
2200                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2201                 }
2202                 /*
2203                  * case 2. matched type (filter out)
2204                  * set filter marker and move to next
2205                  */
2206                 else if (ret == 1) {
2207                         h->filtered |= (1 << type);
2208
2209                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2210                 }
2211                 /*
2212                  * case 3. ok (not filtered)
2213                  * add period to hists and parents, erase the filter marker
2214                  * and move to next sibling
2215                  */
2216                 else {
2217                         hists__remove_entry_filter(hists, h, type);
2218
2219                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2220                 }
2221         }
2222
2223         hierarchy_recalc_total_periods(hists);
2224
2225         /*
2226          * resort output after applying a new filter since filter in a lower
2227          * hierarchy can change periods in a upper hierarchy.
2228          */
2229         nd = rb_first_cached(&hists->entries);
2230         while (nd) {
2231                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2232
2233                 nd = rb_next(nd);
2234                 rb_erase_cached(&h->rb_node, &hists->entries);
2235
2236                 resort_filtered_entry(&new_root, h);
2237         }
2238
2239         hists->entries = new_root;
2240 }
2241
2242 void hists__filter_by_thread(struct hists *hists)
2243 {
2244         if (symbol_conf.report_hierarchy)
2245                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2246                                         hists->thread_filter);
2247         else
2248                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2249                                       hists__filter_entry_by_thread);
2250 }
2251
2252 void hists__filter_by_dso(struct hists *hists)
2253 {
2254         if (symbol_conf.report_hierarchy)
2255                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2256                                         hists->dso_filter);
2257         else
2258                 hists__filter_by_type(hists, HIST_FILTER__DSO,
2259                                       hists__filter_entry_by_dso);
2260 }
2261
2262 void hists__filter_by_symbol(struct hists *hists)
2263 {
2264         if (symbol_conf.report_hierarchy)
2265                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2266                                         hists->symbol_filter_str);
2267         else
2268                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2269                                       hists__filter_entry_by_symbol);
2270 }
2271
2272 void hists__filter_by_socket(struct hists *hists)
2273 {
2274         if (symbol_conf.report_hierarchy)
2275                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2276                                         &hists->socket_filter);
2277         else
2278                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2279                                       hists__filter_entry_by_socket);
2280 }
2281
2282 void events_stats__inc(struct events_stats *stats, u32 type)
2283 {
2284         ++stats->nr_events[0];
2285         ++stats->nr_events[type];
2286 }
2287
2288 void hists__inc_nr_events(struct hists *hists, u32 type)
2289 {
2290         events_stats__inc(&hists->stats, type);
2291 }
2292
2293 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2294 {
2295         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2296         if (!filtered)
2297                 hists->stats.nr_non_filtered_samples++;
2298 }
2299
2300 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2301                                                  struct hist_entry *pair)
2302 {
2303         struct rb_root_cached *root;
2304         struct rb_node **p;
2305         struct rb_node *parent = NULL;
2306         struct hist_entry *he;
2307         int64_t cmp;
2308         bool leftmost = true;
2309
2310         if (hists__has(hists, need_collapse))
2311                 root = &hists->entries_collapsed;
2312         else
2313                 root = hists->entries_in;
2314
2315         p = &root->rb_root.rb_node;
2316
2317         while (*p != NULL) {
2318                 parent = *p;
2319                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2320
2321                 cmp = hist_entry__collapse(he, pair);
2322
2323                 if (!cmp)
2324                         goto out;
2325
2326                 if (cmp < 0)
2327                         p = &(*p)->rb_left;
2328                 else {
2329                         p = &(*p)->rb_right;
2330                         leftmost = false;
2331                 }
2332         }
2333
2334         he = hist_entry__new(pair, true);
2335         if (he) {
2336                 memset(&he->stat, 0, sizeof(he->stat));
2337                 he->hists = hists;
2338                 if (symbol_conf.cumulate_callchain)
2339                         memset(he->stat_acc, 0, sizeof(he->stat));
2340                 rb_link_node(&he->rb_node_in, parent, p);
2341                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2342                 hists__inc_stats(hists, he);
2343                 he->dummy = true;
2344         }
2345 out:
2346         return he;
2347 }
2348
2349 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2350                                                     struct rb_root_cached *root,
2351                                                     struct hist_entry *pair)
2352 {
2353         struct rb_node **p;
2354         struct rb_node *parent = NULL;
2355         struct hist_entry *he;
2356         struct perf_hpp_fmt *fmt;
2357         bool leftmost = true;
2358
2359         p = &root->rb_root.rb_node;
2360         while (*p != NULL) {
2361                 int64_t cmp = 0;
2362
2363                 parent = *p;
2364                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2365
2366                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2367                         cmp = fmt->collapse(fmt, he, pair);
2368                         if (cmp)
2369                                 break;
2370                 }
2371                 if (!cmp)
2372                         goto out;
2373
2374                 if (cmp < 0)
2375                         p = &parent->rb_left;
2376                 else {
2377                         p = &parent->rb_right;
2378                         leftmost = false;
2379                 }
2380         }
2381
2382         he = hist_entry__new(pair, true);
2383         if (he) {
2384                 rb_link_node(&he->rb_node_in, parent, p);
2385                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2386
2387                 he->dummy = true;
2388                 he->hists = hists;
2389                 memset(&he->stat, 0, sizeof(he->stat));
2390                 hists__inc_stats(hists, he);
2391         }
2392 out:
2393         return he;
2394 }
2395
2396 static struct hist_entry *hists__find_entry(struct hists *hists,
2397                                             struct hist_entry *he)
2398 {
2399         struct rb_node *n;
2400
2401         if (hists__has(hists, need_collapse))
2402                 n = hists->entries_collapsed.rb_root.rb_node;
2403         else
2404                 n = hists->entries_in->rb_root.rb_node;
2405
2406         while (n) {
2407                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2408                 int64_t cmp = hist_entry__collapse(iter, he);
2409
2410                 if (cmp < 0)
2411                         n = n->rb_left;
2412                 else if (cmp > 0)
2413                         n = n->rb_right;
2414                 else
2415                         return iter;
2416         }
2417
2418         return NULL;
2419 }
2420
2421 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2422                                                       struct hist_entry *he)
2423 {
2424         struct rb_node *n = root->rb_root.rb_node;
2425
2426         while (n) {
2427                 struct hist_entry *iter;
2428                 struct perf_hpp_fmt *fmt;
2429                 int64_t cmp = 0;
2430
2431                 iter = rb_entry(n, struct hist_entry, rb_node_in);
2432                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2433                         cmp = fmt->collapse(fmt, iter, he);
2434                         if (cmp)
2435                                 break;
2436                 }
2437
2438                 if (cmp < 0)
2439                         n = n->rb_left;
2440                 else if (cmp > 0)
2441                         n = n->rb_right;
2442                 else
2443                         return iter;
2444         }
2445
2446         return NULL;
2447 }
2448
2449 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2450                                    struct rb_root_cached *other_root)
2451 {
2452         struct rb_node *nd;
2453         struct hist_entry *pos, *pair;
2454
2455         for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2456                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2457                 pair = hists__find_hierarchy_entry(other_root, pos);
2458
2459                 if (pair) {
2460                         hist_entry__add_pair(pair, pos);
2461                         hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2462                 }
2463         }
2464 }
2465
2466 /*
2467  * Look for pairs to link to the leader buckets (hist_entries):
2468  */
2469 void hists__match(struct hists *leader, struct hists *other)
2470 {
2471         struct rb_root_cached *root;
2472         struct rb_node *nd;
2473         struct hist_entry *pos, *pair;
2474
2475         if (symbol_conf.report_hierarchy) {
2476                 /* hierarchy report always collapses entries */
2477                 return hists__match_hierarchy(&leader->entries_collapsed,
2478                                               &other->entries_collapsed);
2479         }
2480
2481         if (hists__has(leader, need_collapse))
2482                 root = &leader->entries_collapsed;
2483         else
2484                 root = leader->entries_in;
2485
2486         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2487                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2488                 pair = hists__find_entry(other, pos);
2489
2490                 if (pair)
2491                         hist_entry__add_pair(pair, pos);
2492         }
2493 }
2494
2495 static int hists__link_hierarchy(struct hists *leader_hists,
2496                                  struct hist_entry *parent,
2497                                  struct rb_root_cached *leader_root,
2498                                  struct rb_root_cached *other_root)
2499 {
2500         struct rb_node *nd;
2501         struct hist_entry *pos, *leader;
2502
2503         for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2504                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2505
2506                 if (hist_entry__has_pairs(pos)) {
2507                         bool found = false;
2508
2509                         list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2510                                 if (leader->hists == leader_hists) {
2511                                         found = true;
2512                                         break;
2513                                 }
2514                         }
2515                         if (!found)
2516                                 return -1;
2517                 } else {
2518                         leader = add_dummy_hierarchy_entry(leader_hists,
2519                                                            leader_root, pos);
2520                         if (leader == NULL)
2521                                 return -1;
2522
2523                         /* do not point parent in the pos */
2524                         leader->parent_he = parent;
2525
2526                         hist_entry__add_pair(pos, leader);
2527                 }
2528
2529                 if (!pos->leaf) {
2530                         if (hists__link_hierarchy(leader_hists, leader,
2531                                                   &leader->hroot_in,
2532                                                   &pos->hroot_in) < 0)
2533                                 return -1;
2534                 }
2535         }
2536         return 0;
2537 }
2538
2539 /*
2540  * Look for entries in the other hists that are not present in the leader, if
2541  * we find them, just add a dummy entry on the leader hists, with period=0,
2542  * nr_events=0, to serve as the list header.
2543  */
2544 int hists__link(struct hists *leader, struct hists *other)
2545 {
2546         struct rb_root_cached *root;
2547         struct rb_node *nd;
2548         struct hist_entry *pos, *pair;
2549
2550         if (symbol_conf.report_hierarchy) {
2551                 /* hierarchy report always collapses entries */
2552                 return hists__link_hierarchy(leader, NULL,
2553                                              &leader->entries_collapsed,
2554                                              &other->entries_collapsed);
2555         }
2556
2557         if (hists__has(other, need_collapse))
2558                 root = &other->entries_collapsed;
2559         else
2560                 root = other->entries_in;
2561
2562         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2563                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2564
2565                 if (!hist_entry__has_pairs(pos)) {
2566                         pair = hists__add_dummy_entry(leader, pos);
2567                         if (pair == NULL)
2568                                 return -1;
2569                         hist_entry__add_pair(pos, pair);
2570                 }
2571         }
2572
2573         return 0;
2574 }
2575
2576 int hists__unlink(struct hists *hists)
2577 {
2578         struct rb_root_cached *root;
2579         struct rb_node *nd;
2580         struct hist_entry *pos;
2581
2582         if (hists__has(hists, need_collapse))
2583                 root = &hists->entries_collapsed;
2584         else
2585                 root = hists->entries_in;
2586
2587         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2588                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2589                 list_del_init(&pos->pairs.node);
2590         }
2591
2592         return 0;
2593 }
2594
2595 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2596                           struct perf_sample *sample, bool nonany_branch_mode,
2597                           u64 *total_cycles)
2598 {
2599         struct branch_info *bi;
2600         struct branch_entry *entries = perf_sample__branch_entries(sample);
2601
2602         /* If we have branch cycles always annotate them. */
2603         if (bs && bs->nr && entries[0].flags.cycles) {
2604                 int i;
2605
2606                 bi = sample__resolve_bstack(sample, al);
2607                 if (bi) {
2608                         struct addr_map_symbol *prev = NULL;
2609
2610                         /*
2611                          * Ignore errors, still want to process the
2612                          * other entries.
2613                          *
2614                          * For non standard branch modes always
2615                          * force no IPC (prev == NULL)
2616                          *
2617                          * Note that perf stores branches reversed from
2618                          * program order!
2619                          */
2620                         for (i = bs->nr - 1; i >= 0; i--) {
2621                                 addr_map_symbol__account_cycles(&bi[i].from,
2622                                         nonany_branch_mode ? NULL : prev,
2623                                         bi[i].flags.cycles);
2624                                 prev = &bi[i].to;
2625
2626                                 if (total_cycles)
2627                                         *total_cycles += bi[i].flags.cycles;
2628                         }
2629                         free(bi);
2630                 }
2631         }
2632 }
2633
2634 size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2635 {
2636         struct evsel *pos;
2637         size_t ret = 0;
2638
2639         evlist__for_each_entry(evlist, pos) {
2640                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2641                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2642         }
2643
2644         return ret;
2645 }
2646
2647
2648 u64 hists__total_period(struct hists *hists)
2649 {
2650         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2651                 hists->stats.total_period;
2652 }
2653
2654 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2655 {
2656         char unit;
2657         int printed;
2658         const struct dso *dso = hists->dso_filter;
2659         struct thread *thread = hists->thread_filter;
2660         int socket_id = hists->socket_filter;
2661         unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2662         u64 nr_events = hists->stats.total_period;
2663         struct evsel *evsel = hists_to_evsel(hists);
2664         const char *ev_name = perf_evsel__name(evsel);
2665         char buf[512], sample_freq_str[64] = "";
2666         size_t buflen = sizeof(buf);
2667         char ref[30] = " show reference callgraph, ";
2668         bool enable_ref = false;
2669
2670         if (symbol_conf.filter_relative) {
2671                 nr_samples = hists->stats.nr_non_filtered_samples;
2672                 nr_events = hists->stats.total_non_filtered_period;
2673         }
2674
2675         if (perf_evsel__is_group_event(evsel)) {
2676                 struct evsel *pos;
2677
2678                 perf_evsel__group_desc(evsel, buf, buflen);
2679                 ev_name = buf;
2680
2681                 for_each_group_member(pos, evsel) {
2682                         struct hists *pos_hists = evsel__hists(pos);
2683
2684                         if (symbol_conf.filter_relative) {
2685                                 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2686                                 nr_events += pos_hists->stats.total_non_filtered_period;
2687                         } else {
2688                                 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2689                                 nr_events += pos_hists->stats.total_period;
2690                         }
2691                 }
2692         }
2693
2694         if (symbol_conf.show_ref_callgraph &&
2695             strstr(ev_name, "call-graph=no"))
2696                 enable_ref = true;
2697
2698         if (show_freq)
2699                 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2700
2701         nr_samples = convert_unit(nr_samples, &unit);
2702         printed = scnprintf(bf, size,
2703                            "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2704                            nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2705                            ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2706
2707
2708         if (hists->uid_filter_str)
2709                 printed += snprintf(bf + printed, size - printed,
2710                                     ", UID: %s", hists->uid_filter_str);
2711         if (thread) {
2712                 if (hists__has(hists, thread)) {
2713                         printed += scnprintf(bf + printed, size - printed,
2714                                     ", Thread: %s(%d)",
2715                                      (thread->comm_set ? thread__comm_str(thread) : ""),
2716                                     thread->tid);
2717                 } else {
2718                         printed += scnprintf(bf + printed, size - printed,
2719                                     ", Thread: %s",
2720                                      (thread->comm_set ? thread__comm_str(thread) : ""));
2721                 }
2722         }
2723         if (dso)
2724                 printed += scnprintf(bf + printed, size - printed,
2725                                     ", DSO: %s", dso->short_name);
2726         if (socket_id > -1)
2727                 printed += scnprintf(bf + printed, size - printed,
2728                                     ", Processor Socket: %d", socket_id);
2729
2730         return printed;
2731 }
2732
2733 int parse_filter_percentage(const struct option *opt __maybe_unused,
2734                             const char *arg, int unset __maybe_unused)
2735 {
2736         if (!strcmp(arg, "relative"))
2737                 symbol_conf.filter_relative = true;
2738         else if (!strcmp(arg, "absolute"))
2739                 symbol_conf.filter_relative = false;
2740         else {
2741                 pr_debug("Invalid percentage: %s\n", arg);
2742                 return -1;
2743         }
2744
2745         return 0;
2746 }
2747
2748 int perf_hist_config(const char *var, const char *value)
2749 {
2750         if (!strcmp(var, "hist.percentage"))
2751                 return parse_filter_percentage(NULL, value, 0);
2752
2753         return 0;
2754 }
2755
2756 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2757 {
2758         memset(hists, 0, sizeof(*hists));
2759         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2760         hists->entries_in = &hists->entries_in_array[0];
2761         hists->entries_collapsed = RB_ROOT_CACHED;
2762         hists->entries = RB_ROOT_CACHED;
2763         pthread_mutex_init(&hists->lock, NULL);
2764         hists->socket_filter = -1;
2765         hists->hpp_list = hpp_list;
2766         INIT_LIST_HEAD(&hists->hpp_formats);
2767         return 0;
2768 }
2769
2770 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2771 {
2772         struct rb_node *node;
2773         struct hist_entry *he;
2774
2775         while (!RB_EMPTY_ROOT(&root->rb_root)) {
2776                 node = rb_first_cached(root);
2777                 rb_erase_cached(node, root);
2778
2779                 he = rb_entry(node, struct hist_entry, rb_node_in);
2780                 hist_entry__delete(he);
2781         }
2782 }
2783
2784 static void hists__delete_all_entries(struct hists *hists)
2785 {
2786         hists__delete_entries(hists);
2787         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2788         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2789         hists__delete_remaining_entries(&hists->entries_collapsed);
2790 }
2791
2792 static void hists_evsel__exit(struct evsel *evsel)
2793 {
2794         struct hists *hists = evsel__hists(evsel);
2795         struct perf_hpp_fmt *fmt, *pos;
2796         struct perf_hpp_list_node *node, *tmp;
2797
2798         hists__delete_all_entries(hists);
2799
2800         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2801                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2802                         list_del_init(&fmt->list);
2803                         free(fmt);
2804                 }
2805                 list_del_init(&node->list);
2806                 free(node);
2807         }
2808 }
2809
2810 static int hists_evsel__init(struct evsel *evsel)
2811 {
2812         struct hists *hists = evsel__hists(evsel);
2813
2814         __hists__init(hists, &perf_hpp_list);
2815         return 0;
2816 }
2817
2818 /*
2819  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2820  * stored in the rbtree...
2821  */
2822
2823 int hists__init(void)
2824 {
2825         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2826                                             hists_evsel__init,
2827                                             hists_evsel__exit);
2828         if (err)
2829                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2830
2831         return err;
2832 }
2833
2834 void perf_hpp_list__init(struct perf_hpp_list *list)
2835 {
2836         INIT_LIST_HEAD(&list->fields);
2837         INIT_LIST_HEAD(&list->sorts);
2838 }