Merge tag 'linux-kselftest-kunit-5.15-rc1' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / tools / perf / util / hist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "cgroup.h"
14 #include "sort.h"
15 #include "units.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "annotate.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "thread.h"
22 #include "block-info.h"
23 #include "ui/progress.h"
24 #include <errno.h>
25 #include <math.h>
26 #include <inttypes.h>
27 #include <sys/param.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/time64.h>
31 #include <linux/zalloc.h>
32
33 static bool hists__filter_entry_by_dso(struct hists *hists,
34                                        struct hist_entry *he);
35 static bool hists__filter_entry_by_thread(struct hists *hists,
36                                           struct hist_entry *he);
37 static bool hists__filter_entry_by_symbol(struct hists *hists,
38                                           struct hist_entry *he);
39 static bool hists__filter_entry_by_socket(struct hists *hists,
40                                           struct hist_entry *he);
41
42 u16 hists__col_len(struct hists *hists, enum hist_column col)
43 {
44         return hists->col_len[col];
45 }
46
47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
48 {
49         hists->col_len[col] = len;
50 }
51
52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
53 {
54         if (len > hists__col_len(hists, col)) {
55                 hists__set_col_len(hists, col, len);
56                 return true;
57         }
58         return false;
59 }
60
61 void hists__reset_col_len(struct hists *hists)
62 {
63         enum hist_column col;
64
65         for (col = 0; col < HISTC_NR_COLS; ++col)
66                 hists__set_col_len(hists, col, 0);
67 }
68
69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
70 {
71         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72
73         if (hists__col_len(hists, dso) < unresolved_col_width &&
74             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
75             !symbol_conf.dso_list)
76                 hists__set_col_len(hists, dso, unresolved_col_width);
77 }
78
79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
80 {
81         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
82         int symlen;
83         u16 len;
84
85         if (h->block_info)
86                 return;
87         /*
88          * +4 accounts for '[x] ' priv level info
89          * +2 accounts for 0x prefix on raw addresses
90          * +3 accounts for ' y ' symtab origin info
91          */
92         if (h->ms.sym) {
93                 symlen = h->ms.sym->namelen + 4;
94                 if (verbose > 0)
95                         symlen += BITS_PER_LONG / 4 + 2 + 3;
96                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
97         } else {
98                 symlen = unresolved_col_width + 4 + 2;
99                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
100                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
101         }
102
103         len = thread__comm_len(h->thread);
104         if (hists__new_col_len(hists, HISTC_COMM, len))
105                 hists__set_col_len(hists, HISTC_THREAD, len + 8);
106
107         if (h->ms.map) {
108                 len = dso__name_len(h->ms.map->dso);
109                 hists__new_col_len(hists, HISTC_DSO, len);
110         }
111
112         if (h->parent)
113                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
114
115         if (h->branch_info) {
116                 if (h->branch_info->from.ms.sym) {
117                         symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
118                         if (verbose > 0)
119                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
120                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
121
122                         symlen = dso__name_len(h->branch_info->from.ms.map->dso);
123                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
124                 } else {
125                         symlen = unresolved_col_width + 4 + 2;
126                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
127                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
128                 }
129
130                 if (h->branch_info->to.ms.sym) {
131                         symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
132                         if (verbose > 0)
133                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
134                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
135
136                         symlen = dso__name_len(h->branch_info->to.ms.map->dso);
137                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
138                 } else {
139                         symlen = unresolved_col_width + 4 + 2;
140                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
141                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
142                 }
143
144                 if (h->branch_info->srcline_from)
145                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
146                                         strlen(h->branch_info->srcline_from));
147                 if (h->branch_info->srcline_to)
148                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
149                                         strlen(h->branch_info->srcline_to));
150         }
151
152         if (h->mem_info) {
153                 if (h->mem_info->daddr.ms.sym) {
154                         symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
155                                + unresolved_col_width + 2;
156                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
157                                            symlen);
158                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
159                                            symlen + 1);
160                 } else {
161                         symlen = unresolved_col_width + 4 + 2;
162                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
163                                            symlen);
164                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
165                                            symlen);
166                 }
167
168                 if (h->mem_info->iaddr.ms.sym) {
169                         symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
170                                + unresolved_col_width + 2;
171                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
172                                            symlen);
173                 } else {
174                         symlen = unresolved_col_width + 4 + 2;
175                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
176                                            symlen);
177                 }
178
179                 if (h->mem_info->daddr.ms.map) {
180                         symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
181                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
182                                            symlen);
183                 } else {
184                         symlen = unresolved_col_width + 4 + 2;
185                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
186                 }
187
188                 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
189                                    unresolved_col_width + 4 + 2);
190
191                 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
192                                    unresolved_col_width + 4 + 2);
193
194         } else {
195                 symlen = unresolved_col_width + 4 + 2;
196                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
197                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
198                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
199         }
200
201         hists__new_col_len(hists, HISTC_CGROUP, 6);
202         hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
203         hists__new_col_len(hists, HISTC_CPU, 3);
204         hists__new_col_len(hists, HISTC_SOCKET, 6);
205         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
206         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
207         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
208         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
209         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
210         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
211         hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
212         hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
213         hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
214         hists__new_col_len(hists, HISTC_P_STAGE_CYC, 13);
215         if (symbol_conf.nanosecs)
216                 hists__new_col_len(hists, HISTC_TIME, 16);
217         else
218                 hists__new_col_len(hists, HISTC_TIME, 12);
219         hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
220
221         if (h->srcline) {
222                 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
223                 hists__new_col_len(hists, HISTC_SRCLINE, len);
224         }
225
226         if (h->srcfile)
227                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
228
229         if (h->transaction)
230                 hists__new_col_len(hists, HISTC_TRANSACTION,
231                                    hist_entry__transaction_len());
232
233         if (h->trace_output)
234                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
235
236         if (h->cgroup) {
237                 const char *cgrp_name = "unknown";
238                 struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
239                                                    h->cgroup);
240                 if (cgrp != NULL)
241                         cgrp_name = cgrp->name;
242
243                 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
244         }
245 }
246
247 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
248 {
249         struct rb_node *next = rb_first_cached(&hists->entries);
250         struct hist_entry *n;
251         int row = 0;
252
253         hists__reset_col_len(hists);
254
255         while (next && row++ < max_rows) {
256                 n = rb_entry(next, struct hist_entry, rb_node);
257                 if (!n->filtered)
258                         hists__calc_col_len(hists, n);
259                 next = rb_next(&n->rb_node);
260         }
261 }
262
263 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
264                                         unsigned int cpumode, u64 period)
265 {
266         switch (cpumode) {
267         case PERF_RECORD_MISC_KERNEL:
268                 he_stat->period_sys += period;
269                 break;
270         case PERF_RECORD_MISC_USER:
271                 he_stat->period_us += period;
272                 break;
273         case PERF_RECORD_MISC_GUEST_KERNEL:
274                 he_stat->period_guest_sys += period;
275                 break;
276         case PERF_RECORD_MISC_GUEST_USER:
277                 he_stat->period_guest_us += period;
278                 break;
279         default:
280                 break;
281         }
282 }
283
284 static long hist_time(unsigned long htime)
285 {
286         unsigned long time_quantum = symbol_conf.time_quantum;
287         if (time_quantum)
288                 return (htime / time_quantum) * time_quantum;
289         return htime;
290 }
291
292 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
293                                 u64 weight, u64 ins_lat, u64 p_stage_cyc)
294 {
295
296         he_stat->period         += period;
297         he_stat->weight         += weight;
298         he_stat->nr_events      += 1;
299         he_stat->ins_lat        += ins_lat;
300         he_stat->p_stage_cyc    += p_stage_cyc;
301 }
302
303 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
304 {
305         dest->period            += src->period;
306         dest->period_sys        += src->period_sys;
307         dest->period_us         += src->period_us;
308         dest->period_guest_sys  += src->period_guest_sys;
309         dest->period_guest_us   += src->period_guest_us;
310         dest->nr_events         += src->nr_events;
311         dest->weight            += src->weight;
312         dest->ins_lat           += src->ins_lat;
313         dest->p_stage_cyc               += src->p_stage_cyc;
314 }
315
316 static void he_stat__decay(struct he_stat *he_stat)
317 {
318         he_stat->period = (he_stat->period * 7) / 8;
319         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
320         /* XXX need decay for weight too? */
321 }
322
323 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
324
325 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
326 {
327         u64 prev_period = he->stat.period;
328         u64 diff;
329
330         if (prev_period == 0)
331                 return true;
332
333         he_stat__decay(&he->stat);
334         if (symbol_conf.cumulate_callchain)
335                 he_stat__decay(he->stat_acc);
336         decay_callchain(he->callchain);
337
338         diff = prev_period - he->stat.period;
339
340         if (!he->depth) {
341                 hists->stats.total_period -= diff;
342                 if (!he->filtered)
343                         hists->stats.total_non_filtered_period -= diff;
344         }
345
346         if (!he->leaf) {
347                 struct hist_entry *child;
348                 struct rb_node *node = rb_first_cached(&he->hroot_out);
349                 while (node) {
350                         child = rb_entry(node, struct hist_entry, rb_node);
351                         node = rb_next(node);
352
353                         if (hists__decay_entry(hists, child))
354                                 hists__delete_entry(hists, child);
355                 }
356         }
357
358         return he->stat.period == 0;
359 }
360
361 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
362 {
363         struct rb_root_cached *root_in;
364         struct rb_root_cached *root_out;
365
366         if (he->parent_he) {
367                 root_in  = &he->parent_he->hroot_in;
368                 root_out = &he->parent_he->hroot_out;
369         } else {
370                 if (hists__has(hists, need_collapse))
371                         root_in = &hists->entries_collapsed;
372                 else
373                         root_in = hists->entries_in;
374                 root_out = &hists->entries;
375         }
376
377         rb_erase_cached(&he->rb_node_in, root_in);
378         rb_erase_cached(&he->rb_node, root_out);
379
380         --hists->nr_entries;
381         if (!he->filtered)
382                 --hists->nr_non_filtered_entries;
383
384         hist_entry__delete(he);
385 }
386
387 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
388 {
389         struct rb_node *next = rb_first_cached(&hists->entries);
390         struct hist_entry *n;
391
392         while (next) {
393                 n = rb_entry(next, struct hist_entry, rb_node);
394                 next = rb_next(&n->rb_node);
395                 if (((zap_user && n->level == '.') ||
396                      (zap_kernel && n->level != '.') ||
397                      hists__decay_entry(hists, n))) {
398                         hists__delete_entry(hists, n);
399                 }
400         }
401 }
402
403 void hists__delete_entries(struct hists *hists)
404 {
405         struct rb_node *next = rb_first_cached(&hists->entries);
406         struct hist_entry *n;
407
408         while (next) {
409                 n = rb_entry(next, struct hist_entry, rb_node);
410                 next = rb_next(&n->rb_node);
411
412                 hists__delete_entry(hists, n);
413         }
414 }
415
416 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
417 {
418         struct rb_node *next = rb_first_cached(&hists->entries);
419         struct hist_entry *n;
420         int i = 0;
421
422         while (next) {
423                 n = rb_entry(next, struct hist_entry, rb_node);
424                 if (i == idx)
425                         return n;
426
427                 next = rb_next(&n->rb_node);
428                 i++;
429         }
430
431         return NULL;
432 }
433
434 /*
435  * histogram, sorted on item, collects periods
436  */
437
438 static int hist_entry__init(struct hist_entry *he,
439                             struct hist_entry *template,
440                             bool sample_self,
441                             size_t callchain_size)
442 {
443         *he = *template;
444         he->callchain_size = callchain_size;
445
446         if (symbol_conf.cumulate_callchain) {
447                 he->stat_acc = malloc(sizeof(he->stat));
448                 if (he->stat_acc == NULL)
449                         return -ENOMEM;
450                 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
451                 if (!sample_self)
452                         memset(&he->stat, 0, sizeof(he->stat));
453         }
454
455         map__get(he->ms.map);
456
457         if (he->branch_info) {
458                 /*
459                  * This branch info is (a part of) allocated from
460                  * sample__resolve_bstack() and will be freed after
461                  * adding new entries.  So we need to save a copy.
462                  */
463                 he->branch_info = malloc(sizeof(*he->branch_info));
464                 if (he->branch_info == NULL)
465                         goto err;
466
467                 memcpy(he->branch_info, template->branch_info,
468                        sizeof(*he->branch_info));
469
470                 map__get(he->branch_info->from.ms.map);
471                 map__get(he->branch_info->to.ms.map);
472         }
473
474         if (he->mem_info) {
475                 map__get(he->mem_info->iaddr.ms.map);
476                 map__get(he->mem_info->daddr.ms.map);
477         }
478
479         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
480                 callchain_init(he->callchain);
481
482         if (he->raw_data) {
483                 he->raw_data = memdup(he->raw_data, he->raw_size);
484                 if (he->raw_data == NULL)
485                         goto err_infos;
486         }
487
488         if (he->srcline) {
489                 he->srcline = strdup(he->srcline);
490                 if (he->srcline == NULL)
491                         goto err_rawdata;
492         }
493
494         if (symbol_conf.res_sample) {
495                 he->res_samples = calloc(sizeof(struct res_sample),
496                                         symbol_conf.res_sample);
497                 if (!he->res_samples)
498                         goto err_srcline;
499         }
500
501         INIT_LIST_HEAD(&he->pairs.node);
502         thread__get(he->thread);
503         he->hroot_in  = RB_ROOT_CACHED;
504         he->hroot_out = RB_ROOT_CACHED;
505
506         if (!symbol_conf.report_hierarchy)
507                 he->leaf = true;
508
509         return 0;
510
511 err_srcline:
512         zfree(&he->srcline);
513
514 err_rawdata:
515         zfree(&he->raw_data);
516
517 err_infos:
518         if (he->branch_info) {
519                 map__put(he->branch_info->from.ms.map);
520                 map__put(he->branch_info->to.ms.map);
521                 zfree(&he->branch_info);
522         }
523         if (he->mem_info) {
524                 map__put(he->mem_info->iaddr.ms.map);
525                 map__put(he->mem_info->daddr.ms.map);
526         }
527 err:
528         map__zput(he->ms.map);
529         zfree(&he->stat_acc);
530         return -ENOMEM;
531 }
532
533 static void *hist_entry__zalloc(size_t size)
534 {
535         return zalloc(size + sizeof(struct hist_entry));
536 }
537
538 static void hist_entry__free(void *ptr)
539 {
540         free(ptr);
541 }
542
543 static struct hist_entry_ops default_ops = {
544         .new    = hist_entry__zalloc,
545         .free   = hist_entry__free,
546 };
547
548 static struct hist_entry *hist_entry__new(struct hist_entry *template,
549                                           bool sample_self)
550 {
551         struct hist_entry_ops *ops = template->ops;
552         size_t callchain_size = 0;
553         struct hist_entry *he;
554         int err = 0;
555
556         if (!ops)
557                 ops = template->ops = &default_ops;
558
559         if (symbol_conf.use_callchain)
560                 callchain_size = sizeof(struct callchain_root);
561
562         he = ops->new(callchain_size);
563         if (he) {
564                 err = hist_entry__init(he, template, sample_self, callchain_size);
565                 if (err) {
566                         ops->free(he);
567                         he = NULL;
568                 }
569         }
570
571         return he;
572 }
573
574 static u8 symbol__parent_filter(const struct symbol *parent)
575 {
576         if (symbol_conf.exclude_other && parent == NULL)
577                 return 1 << HIST_FILTER__PARENT;
578         return 0;
579 }
580
581 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
582 {
583         if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
584                 return;
585
586         he->hists->callchain_period += period;
587         if (!he->filtered)
588                 he->hists->callchain_non_filtered_period += period;
589 }
590
591 static struct hist_entry *hists__findnew_entry(struct hists *hists,
592                                                struct hist_entry *entry,
593                                                struct addr_location *al,
594                                                bool sample_self)
595 {
596         struct rb_node **p;
597         struct rb_node *parent = NULL;
598         struct hist_entry *he;
599         int64_t cmp;
600         u64 period = entry->stat.period;
601         u64 weight = entry->stat.weight;
602         u64 ins_lat = entry->stat.ins_lat;
603         u64 p_stage_cyc = entry->stat.p_stage_cyc;
604         bool leftmost = true;
605
606         p = &hists->entries_in->rb_root.rb_node;
607
608         while (*p != NULL) {
609                 parent = *p;
610                 he = rb_entry(parent, struct hist_entry, rb_node_in);
611
612                 /*
613                  * Make sure that it receives arguments in a same order as
614                  * hist_entry__collapse() so that we can use an appropriate
615                  * function when searching an entry regardless which sort
616                  * keys were used.
617                  */
618                 cmp = hist_entry__cmp(he, entry);
619
620                 if (!cmp) {
621                         if (sample_self) {
622                                 he_stat__add_period(&he->stat, period, weight, ins_lat, p_stage_cyc);
623                                 hist_entry__add_callchain_period(he, period);
624                         }
625                         if (symbol_conf.cumulate_callchain)
626                                 he_stat__add_period(he->stat_acc, period, weight, ins_lat, p_stage_cyc);
627
628                         /*
629                          * This mem info was allocated from sample__resolve_mem
630                          * and will not be used anymore.
631                          */
632                         mem_info__zput(entry->mem_info);
633
634                         block_info__zput(entry->block_info);
635
636                         /* If the map of an existing hist_entry has
637                          * become out-of-date due to an exec() or
638                          * similar, update it.  Otherwise we will
639                          * mis-adjust symbol addresses when computing
640                          * the history counter to increment.
641                          */
642                         if (he->ms.map != entry->ms.map) {
643                                 map__put(he->ms.map);
644                                 he->ms.map = map__get(entry->ms.map);
645                         }
646                         goto out;
647                 }
648
649                 if (cmp < 0)
650                         p = &(*p)->rb_left;
651                 else {
652                         p = &(*p)->rb_right;
653                         leftmost = false;
654                 }
655         }
656
657         he = hist_entry__new(entry, sample_self);
658         if (!he)
659                 return NULL;
660
661         if (sample_self)
662                 hist_entry__add_callchain_period(he, period);
663         hists->nr_entries++;
664
665         rb_link_node(&he->rb_node_in, parent, p);
666         rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
667 out:
668         if (sample_self)
669                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
670         if (symbol_conf.cumulate_callchain)
671                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
672         return he;
673 }
674
675 static unsigned random_max(unsigned high)
676 {
677         unsigned thresh = -high % high;
678         for (;;) {
679                 unsigned r = random();
680                 if (r >= thresh)
681                         return r % high;
682         }
683 }
684
685 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
686 {
687         struct res_sample *r;
688         int j;
689
690         if (he->num_res < symbol_conf.res_sample) {
691                 j = he->num_res++;
692         } else {
693                 j = random_max(symbol_conf.res_sample);
694         }
695         r = &he->res_samples[j];
696         r->time = sample->time;
697         r->cpu = sample->cpu;
698         r->tid = sample->tid;
699 }
700
701 static struct hist_entry*
702 __hists__add_entry(struct hists *hists,
703                    struct addr_location *al,
704                    struct symbol *sym_parent,
705                    struct branch_info *bi,
706                    struct mem_info *mi,
707                    struct block_info *block_info,
708                    struct perf_sample *sample,
709                    bool sample_self,
710                    struct hist_entry_ops *ops)
711 {
712         struct namespaces *ns = thread__namespaces(al->thread);
713         struct hist_entry entry = {
714                 .thread = al->thread,
715                 .comm = thread__comm(al->thread),
716                 .cgroup_id = {
717                         .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
718                         .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
719                 },
720                 .cgroup = sample->cgroup,
721                 .ms = {
722                         .maps   = al->maps,
723                         .map    = al->map,
724                         .sym    = al->sym,
725                 },
726                 .srcline = (char *) al->srcline,
727                 .socket  = al->socket,
728                 .cpu     = al->cpu,
729                 .cpumode = al->cpumode,
730                 .ip      = al->addr,
731                 .level   = al->level,
732                 .code_page_size = sample->code_page_size,
733                 .stat = {
734                         .nr_events = 1,
735                         .period = sample->period,
736                         .weight = sample->weight,
737                         .ins_lat = sample->ins_lat,
738                         .p_stage_cyc = sample->p_stage_cyc,
739                 },
740                 .parent = sym_parent,
741                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
742                 .hists  = hists,
743                 .branch_info = bi,
744                 .mem_info = mi,
745                 .block_info = block_info,
746                 .transaction = sample->transaction,
747                 .raw_data = sample->raw_data,
748                 .raw_size = sample->raw_size,
749                 .ops = ops,
750                 .time = hist_time(sample->time),
751         }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
752
753         if (!hists->has_callchains && he && he->callchain_size != 0)
754                 hists->has_callchains = true;
755         if (he && symbol_conf.res_sample)
756                 hists__res_sample(he, sample);
757         return he;
758 }
759
760 struct hist_entry *hists__add_entry(struct hists *hists,
761                                     struct addr_location *al,
762                                     struct symbol *sym_parent,
763                                     struct branch_info *bi,
764                                     struct mem_info *mi,
765                                     struct perf_sample *sample,
766                                     bool sample_self)
767 {
768         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
769                                   sample, sample_self, NULL);
770 }
771
772 struct hist_entry *hists__add_entry_ops(struct hists *hists,
773                                         struct hist_entry_ops *ops,
774                                         struct addr_location *al,
775                                         struct symbol *sym_parent,
776                                         struct branch_info *bi,
777                                         struct mem_info *mi,
778                                         struct perf_sample *sample,
779                                         bool sample_self)
780 {
781         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
782                                   sample, sample_self, ops);
783 }
784
785 struct hist_entry *hists__add_entry_block(struct hists *hists,
786                                           struct addr_location *al,
787                                           struct block_info *block_info)
788 {
789         struct hist_entry entry = {
790                 .block_info = block_info,
791                 .hists = hists,
792                 .ms = {
793                         .maps = al->maps,
794                         .map = al->map,
795                         .sym = al->sym,
796                 },
797         }, *he = hists__findnew_entry(hists, &entry, al, false);
798
799         return he;
800 }
801
802 static int
803 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
804                     struct addr_location *al __maybe_unused)
805 {
806         return 0;
807 }
808
809 static int
810 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
811                         struct addr_location *al __maybe_unused)
812 {
813         return 0;
814 }
815
816 static int
817 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
818 {
819         struct perf_sample *sample = iter->sample;
820         struct mem_info *mi;
821
822         mi = sample__resolve_mem(sample, al);
823         if (mi == NULL)
824                 return -ENOMEM;
825
826         iter->priv = mi;
827         return 0;
828 }
829
830 static int
831 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
832 {
833         u64 cost;
834         struct mem_info *mi = iter->priv;
835         struct hists *hists = evsel__hists(iter->evsel);
836         struct perf_sample *sample = iter->sample;
837         struct hist_entry *he;
838
839         if (mi == NULL)
840                 return -EINVAL;
841
842         cost = sample->weight;
843         if (!cost)
844                 cost = 1;
845
846         /*
847          * must pass period=weight in order to get the correct
848          * sorting from hists__collapse_resort() which is solely
849          * based on periods. We want sorting be done on nr_events * weight
850          * and this is indirectly achieved by passing period=weight here
851          * and the he_stat__add_period() function.
852          */
853         sample->period = cost;
854
855         he = hists__add_entry(hists, al, iter->parent, NULL, mi,
856                               sample, true);
857         if (!he)
858                 return -ENOMEM;
859
860         iter->he = he;
861         return 0;
862 }
863
864 static int
865 iter_finish_mem_entry(struct hist_entry_iter *iter,
866                       struct addr_location *al __maybe_unused)
867 {
868         struct evsel *evsel = iter->evsel;
869         struct hists *hists = evsel__hists(evsel);
870         struct hist_entry *he = iter->he;
871         int err = -EINVAL;
872
873         if (he == NULL)
874                 goto out;
875
876         hists__inc_nr_samples(hists, he->filtered);
877
878         err = hist_entry__append_callchain(he, iter->sample);
879
880 out:
881         /*
882          * We don't need to free iter->priv (mem_info) here since the mem info
883          * was either already freed in hists__findnew_entry() or passed to a
884          * new hist entry by hist_entry__new().
885          */
886         iter->priv = NULL;
887
888         iter->he = NULL;
889         return err;
890 }
891
892 static int
893 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
894 {
895         struct branch_info *bi;
896         struct perf_sample *sample = iter->sample;
897
898         bi = sample__resolve_bstack(sample, al);
899         if (!bi)
900                 return -ENOMEM;
901
902         iter->curr = 0;
903         iter->total = sample->branch_stack->nr;
904
905         iter->priv = bi;
906         return 0;
907 }
908
909 static int
910 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
911                              struct addr_location *al __maybe_unused)
912 {
913         return 0;
914 }
915
916 static int
917 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
918 {
919         struct branch_info *bi = iter->priv;
920         int i = iter->curr;
921
922         if (bi == NULL)
923                 return 0;
924
925         if (iter->curr >= iter->total)
926                 return 0;
927
928         al->maps = bi[i].to.ms.maps;
929         al->map = bi[i].to.ms.map;
930         al->sym = bi[i].to.ms.sym;
931         al->addr = bi[i].to.addr;
932         return 1;
933 }
934
935 static int
936 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
937 {
938         struct branch_info *bi;
939         struct evsel *evsel = iter->evsel;
940         struct hists *hists = evsel__hists(evsel);
941         struct perf_sample *sample = iter->sample;
942         struct hist_entry *he = NULL;
943         int i = iter->curr;
944         int err = 0;
945
946         bi = iter->priv;
947
948         if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
949                 goto out;
950
951         /*
952          * The report shows the percentage of total branches captured
953          * and not events sampled. Thus we use a pseudo period of 1.
954          */
955         sample->period = 1;
956         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
957
958         he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
959                               sample, true);
960         if (he == NULL)
961                 return -ENOMEM;
962
963         hists__inc_nr_samples(hists, he->filtered);
964
965 out:
966         iter->he = he;
967         iter->curr++;
968         return err;
969 }
970
971 static int
972 iter_finish_branch_entry(struct hist_entry_iter *iter,
973                          struct addr_location *al __maybe_unused)
974 {
975         zfree(&iter->priv);
976         iter->he = NULL;
977
978         return iter->curr >= iter->total ? 0 : -1;
979 }
980
981 static int
982 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
983                           struct addr_location *al __maybe_unused)
984 {
985         return 0;
986 }
987
988 static int
989 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
990 {
991         struct evsel *evsel = iter->evsel;
992         struct perf_sample *sample = iter->sample;
993         struct hist_entry *he;
994
995         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
996                               sample, true);
997         if (he == NULL)
998                 return -ENOMEM;
999
1000         iter->he = he;
1001         return 0;
1002 }
1003
1004 static int
1005 iter_finish_normal_entry(struct hist_entry_iter *iter,
1006                          struct addr_location *al __maybe_unused)
1007 {
1008         struct hist_entry *he = iter->he;
1009         struct evsel *evsel = iter->evsel;
1010         struct perf_sample *sample = iter->sample;
1011
1012         if (he == NULL)
1013                 return 0;
1014
1015         iter->he = NULL;
1016
1017         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1018
1019         return hist_entry__append_callchain(he, sample);
1020 }
1021
1022 static int
1023 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1024                               struct addr_location *al __maybe_unused)
1025 {
1026         struct hist_entry **he_cache;
1027
1028         callchain_cursor_commit(&callchain_cursor);
1029
1030         /*
1031          * This is for detecting cycles or recursions so that they're
1032          * cumulated only one time to prevent entries more than 100%
1033          * overhead.
1034          */
1035         he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1036         if (he_cache == NULL)
1037                 return -ENOMEM;
1038
1039         iter->priv = he_cache;
1040         iter->curr = 0;
1041
1042         return 0;
1043 }
1044
1045 static int
1046 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1047                                  struct addr_location *al)
1048 {
1049         struct evsel *evsel = iter->evsel;
1050         struct hists *hists = evsel__hists(evsel);
1051         struct perf_sample *sample = iter->sample;
1052         struct hist_entry **he_cache = iter->priv;
1053         struct hist_entry *he;
1054         int err = 0;
1055
1056         he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1057                               sample, true);
1058         if (he == NULL)
1059                 return -ENOMEM;
1060
1061         iter->he = he;
1062         he_cache[iter->curr++] = he;
1063
1064         hist_entry__append_callchain(he, sample);
1065
1066         /*
1067          * We need to re-initialize the cursor since callchain_append()
1068          * advanced the cursor to the end.
1069          */
1070         callchain_cursor_commit(&callchain_cursor);
1071
1072         hists__inc_nr_samples(hists, he->filtered);
1073
1074         return err;
1075 }
1076
1077 static int
1078 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1079                            struct addr_location *al)
1080 {
1081         struct callchain_cursor_node *node;
1082
1083         node = callchain_cursor_current(&callchain_cursor);
1084         if (node == NULL)
1085                 return 0;
1086
1087         return fill_callchain_info(al, node, iter->hide_unresolved);
1088 }
1089
1090 static bool
1091 hist_entry__fast__sym_diff(struct hist_entry *left,
1092                            struct hist_entry *right)
1093 {
1094         struct symbol *sym_l = left->ms.sym;
1095         struct symbol *sym_r = right->ms.sym;
1096
1097         if (!sym_l && !sym_r)
1098                 return left->ip != right->ip;
1099
1100         return !!_sort__sym_cmp(sym_l, sym_r);
1101 }
1102
1103
1104 static int
1105 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1106                                struct addr_location *al)
1107 {
1108         struct evsel *evsel = iter->evsel;
1109         struct perf_sample *sample = iter->sample;
1110         struct hist_entry **he_cache = iter->priv;
1111         struct hist_entry *he;
1112         struct hist_entry he_tmp = {
1113                 .hists = evsel__hists(evsel),
1114                 .cpu = al->cpu,
1115                 .thread = al->thread,
1116                 .comm = thread__comm(al->thread),
1117                 .ip = al->addr,
1118                 .ms = {
1119                         .maps = al->maps,
1120                         .map = al->map,
1121                         .sym = al->sym,
1122                 },
1123                 .srcline = (char *) al->srcline,
1124                 .parent = iter->parent,
1125                 .raw_data = sample->raw_data,
1126                 .raw_size = sample->raw_size,
1127         };
1128         int i;
1129         struct callchain_cursor cursor;
1130         bool fast = hists__has(he_tmp.hists, sym);
1131
1132         callchain_cursor_snapshot(&cursor, &callchain_cursor);
1133
1134         callchain_cursor_advance(&callchain_cursor);
1135
1136         /*
1137          * Check if there's duplicate entries in the callchain.
1138          * It's possible that it has cycles or recursive calls.
1139          */
1140         for (i = 0; i < iter->curr; i++) {
1141                 /*
1142                  * For most cases, there are no duplicate entries in callchain.
1143                  * The symbols are usually different. Do a quick check for
1144                  * symbols first.
1145                  */
1146                 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1147                         continue;
1148
1149                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1150                         /* to avoid calling callback function */
1151                         iter->he = NULL;
1152                         return 0;
1153                 }
1154         }
1155
1156         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1157                               sample, false);
1158         if (he == NULL)
1159                 return -ENOMEM;
1160
1161         iter->he = he;
1162         he_cache[iter->curr++] = he;
1163
1164         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1165                 callchain_append(he->callchain, &cursor, sample->period);
1166         return 0;
1167 }
1168
1169 static int
1170 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1171                              struct addr_location *al __maybe_unused)
1172 {
1173         zfree(&iter->priv);
1174         iter->he = NULL;
1175
1176         return 0;
1177 }
1178
1179 const struct hist_iter_ops hist_iter_mem = {
1180         .prepare_entry          = iter_prepare_mem_entry,
1181         .add_single_entry       = iter_add_single_mem_entry,
1182         .next_entry             = iter_next_nop_entry,
1183         .add_next_entry         = iter_add_next_nop_entry,
1184         .finish_entry           = iter_finish_mem_entry,
1185 };
1186
1187 const struct hist_iter_ops hist_iter_branch = {
1188         .prepare_entry          = iter_prepare_branch_entry,
1189         .add_single_entry       = iter_add_single_branch_entry,
1190         .next_entry             = iter_next_branch_entry,
1191         .add_next_entry         = iter_add_next_branch_entry,
1192         .finish_entry           = iter_finish_branch_entry,
1193 };
1194
1195 const struct hist_iter_ops hist_iter_normal = {
1196         .prepare_entry          = iter_prepare_normal_entry,
1197         .add_single_entry       = iter_add_single_normal_entry,
1198         .next_entry             = iter_next_nop_entry,
1199         .add_next_entry         = iter_add_next_nop_entry,
1200         .finish_entry           = iter_finish_normal_entry,
1201 };
1202
1203 const struct hist_iter_ops hist_iter_cumulative = {
1204         .prepare_entry          = iter_prepare_cumulative_entry,
1205         .add_single_entry       = iter_add_single_cumulative_entry,
1206         .next_entry             = iter_next_cumulative_entry,
1207         .add_next_entry         = iter_add_next_cumulative_entry,
1208         .finish_entry           = iter_finish_cumulative_entry,
1209 };
1210
1211 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1212                          int max_stack_depth, void *arg)
1213 {
1214         int err, err2;
1215         struct map *alm = NULL;
1216
1217         if (al)
1218                 alm = map__get(al->map);
1219
1220         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1221                                         iter->evsel, al, max_stack_depth);
1222         if (err) {
1223                 map__put(alm);
1224                 return err;
1225         }
1226
1227         err = iter->ops->prepare_entry(iter, al);
1228         if (err)
1229                 goto out;
1230
1231         err = iter->ops->add_single_entry(iter, al);
1232         if (err)
1233                 goto out;
1234
1235         if (iter->he && iter->add_entry_cb) {
1236                 err = iter->add_entry_cb(iter, al, true, arg);
1237                 if (err)
1238                         goto out;
1239         }
1240
1241         while (iter->ops->next_entry(iter, al)) {
1242                 err = iter->ops->add_next_entry(iter, al);
1243                 if (err)
1244                         break;
1245
1246                 if (iter->he && iter->add_entry_cb) {
1247                         err = iter->add_entry_cb(iter, al, false, arg);
1248                         if (err)
1249                                 goto out;
1250                 }
1251         }
1252
1253 out:
1254         err2 = iter->ops->finish_entry(iter, al);
1255         if (!err)
1256                 err = err2;
1257
1258         map__put(alm);
1259
1260         return err;
1261 }
1262
1263 int64_t
1264 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1265 {
1266         struct hists *hists = left->hists;
1267         struct perf_hpp_fmt *fmt;
1268         int64_t cmp = 0;
1269
1270         hists__for_each_sort_list(hists, fmt) {
1271                 if (perf_hpp__is_dynamic_entry(fmt) &&
1272                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1273                         continue;
1274
1275                 cmp = fmt->cmp(fmt, left, right);
1276                 if (cmp)
1277                         break;
1278         }
1279
1280         return cmp;
1281 }
1282
1283 int64_t
1284 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1285 {
1286         struct hists *hists = left->hists;
1287         struct perf_hpp_fmt *fmt;
1288         int64_t cmp = 0;
1289
1290         hists__for_each_sort_list(hists, fmt) {
1291                 if (perf_hpp__is_dynamic_entry(fmt) &&
1292                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1293                         continue;
1294
1295                 cmp = fmt->collapse(fmt, left, right);
1296                 if (cmp)
1297                         break;
1298         }
1299
1300         return cmp;
1301 }
1302
1303 void hist_entry__delete(struct hist_entry *he)
1304 {
1305         struct hist_entry_ops *ops = he->ops;
1306
1307         thread__zput(he->thread);
1308         map__zput(he->ms.map);
1309
1310         if (he->branch_info) {
1311                 map__zput(he->branch_info->from.ms.map);
1312                 map__zput(he->branch_info->to.ms.map);
1313                 free_srcline(he->branch_info->srcline_from);
1314                 free_srcline(he->branch_info->srcline_to);
1315                 zfree(&he->branch_info);
1316         }
1317
1318         if (he->mem_info) {
1319                 map__zput(he->mem_info->iaddr.ms.map);
1320                 map__zput(he->mem_info->daddr.ms.map);
1321                 mem_info__zput(he->mem_info);
1322         }
1323
1324         if (he->block_info)
1325                 block_info__zput(he->block_info);
1326
1327         zfree(&he->res_samples);
1328         zfree(&he->stat_acc);
1329         free_srcline(he->srcline);
1330         if (he->srcfile && he->srcfile[0])
1331                 zfree(&he->srcfile);
1332         free_callchain(he->callchain);
1333         zfree(&he->trace_output);
1334         zfree(&he->raw_data);
1335         ops->free(he);
1336 }
1337
1338 /*
1339  * If this is not the last column, then we need to pad it according to the
1340  * pre-calculated max length for this column, otherwise don't bother adding
1341  * spaces because that would break viewing this with, for instance, 'less',
1342  * that would show tons of trailing spaces when a long C++ demangled method
1343  * names is sampled.
1344 */
1345 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1346                                    struct perf_hpp_fmt *fmt, int printed)
1347 {
1348         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1349                 const int width = fmt->width(fmt, hpp, he->hists);
1350                 if (printed < width) {
1351                         advance_hpp(hpp, printed);
1352                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1353                 }
1354         }
1355
1356         return printed;
1357 }
1358
1359 /*
1360  * collapse the histogram
1361  */
1362
1363 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1364 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1365                                        enum hist_filter type);
1366
1367 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1368
1369 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1370 {
1371         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1372 }
1373
1374 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1375                                                 enum hist_filter type,
1376                                                 fmt_chk_fn check)
1377 {
1378         struct perf_hpp_fmt *fmt;
1379         bool type_match = false;
1380         struct hist_entry *parent = he->parent_he;
1381
1382         switch (type) {
1383         case HIST_FILTER__THREAD:
1384                 if (symbol_conf.comm_list == NULL &&
1385                     symbol_conf.pid_list == NULL &&
1386                     symbol_conf.tid_list == NULL)
1387                         return;
1388                 break;
1389         case HIST_FILTER__DSO:
1390                 if (symbol_conf.dso_list == NULL)
1391                         return;
1392                 break;
1393         case HIST_FILTER__SYMBOL:
1394                 if (symbol_conf.sym_list == NULL)
1395                         return;
1396                 break;
1397         case HIST_FILTER__PARENT:
1398         case HIST_FILTER__GUEST:
1399         case HIST_FILTER__HOST:
1400         case HIST_FILTER__SOCKET:
1401         case HIST_FILTER__C2C:
1402         default:
1403                 return;
1404         }
1405
1406         /* if it's filtered by own fmt, it has to have filter bits */
1407         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1408                 if (check(fmt)) {
1409                         type_match = true;
1410                         break;
1411                 }
1412         }
1413
1414         if (type_match) {
1415                 /*
1416                  * If the filter is for current level entry, propagate
1417                  * filter marker to parents.  The marker bit was
1418                  * already set by default so it only needs to clear
1419                  * non-filtered entries.
1420                  */
1421                 if (!(he->filtered & (1 << type))) {
1422                         while (parent) {
1423                                 parent->filtered &= ~(1 << type);
1424                                 parent = parent->parent_he;
1425                         }
1426                 }
1427         } else {
1428                 /*
1429                  * If current entry doesn't have matching formats, set
1430                  * filter marker for upper level entries.  it will be
1431                  * cleared if its lower level entries is not filtered.
1432                  *
1433                  * For lower-level entries, it inherits parent's
1434                  * filter bit so that lower level entries of a
1435                  * non-filtered entry won't set the filter marker.
1436                  */
1437                 if (parent == NULL)
1438                         he->filtered |= (1 << type);
1439                 else
1440                         he->filtered |= (parent->filtered & (1 << type));
1441         }
1442 }
1443
1444 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1445 {
1446         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1447                                             check_thread_entry);
1448
1449         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1450                                             perf_hpp__is_dso_entry);
1451
1452         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1453                                             perf_hpp__is_sym_entry);
1454
1455         hists__apply_filters(he->hists, he);
1456 }
1457
1458 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1459                                                  struct rb_root_cached *root,
1460                                                  struct hist_entry *he,
1461                                                  struct hist_entry *parent_he,
1462                                                  struct perf_hpp_list *hpp_list)
1463 {
1464         struct rb_node **p = &root->rb_root.rb_node;
1465         struct rb_node *parent = NULL;
1466         struct hist_entry *iter, *new;
1467         struct perf_hpp_fmt *fmt;
1468         int64_t cmp;
1469         bool leftmost = true;
1470
1471         while (*p != NULL) {
1472                 parent = *p;
1473                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1474
1475                 cmp = 0;
1476                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1477                         cmp = fmt->collapse(fmt, iter, he);
1478                         if (cmp)
1479                                 break;
1480                 }
1481
1482                 if (!cmp) {
1483                         he_stat__add_stat(&iter->stat, &he->stat);
1484                         return iter;
1485                 }
1486
1487                 if (cmp < 0)
1488                         p = &parent->rb_left;
1489                 else {
1490                         p = &parent->rb_right;
1491                         leftmost = false;
1492                 }
1493         }
1494
1495         new = hist_entry__new(he, true);
1496         if (new == NULL)
1497                 return NULL;
1498
1499         hists->nr_entries++;
1500
1501         /* save related format list for output */
1502         new->hpp_list = hpp_list;
1503         new->parent_he = parent_he;
1504
1505         hist_entry__apply_hierarchy_filters(new);
1506
1507         /* some fields are now passed to 'new' */
1508         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1509                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1510                         he->trace_output = NULL;
1511                 else
1512                         new->trace_output = NULL;
1513
1514                 if (perf_hpp__is_srcline_entry(fmt))
1515                         he->srcline = NULL;
1516                 else
1517                         new->srcline = NULL;
1518
1519                 if (perf_hpp__is_srcfile_entry(fmt))
1520                         he->srcfile = NULL;
1521                 else
1522                         new->srcfile = NULL;
1523         }
1524
1525         rb_link_node(&new->rb_node_in, parent, p);
1526         rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1527         return new;
1528 }
1529
1530 static int hists__hierarchy_insert_entry(struct hists *hists,
1531                                          struct rb_root_cached *root,
1532                                          struct hist_entry *he)
1533 {
1534         struct perf_hpp_list_node *node;
1535         struct hist_entry *new_he = NULL;
1536         struct hist_entry *parent = NULL;
1537         int depth = 0;
1538         int ret = 0;
1539
1540         list_for_each_entry(node, &hists->hpp_formats, list) {
1541                 /* skip period (overhead) and elided columns */
1542                 if (node->level == 0 || node->skip)
1543                         continue;
1544
1545                 /* insert copy of 'he' for each fmt into the hierarchy */
1546                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1547                 if (new_he == NULL) {
1548                         ret = -1;
1549                         break;
1550                 }
1551
1552                 root = &new_he->hroot_in;
1553                 new_he->depth = depth++;
1554                 parent = new_he;
1555         }
1556
1557         if (new_he) {
1558                 new_he->leaf = true;
1559
1560                 if (hist_entry__has_callchains(new_he) &&
1561                     symbol_conf.use_callchain) {
1562                         callchain_cursor_reset(&callchain_cursor);
1563                         if (callchain_merge(&callchain_cursor,
1564                                             new_he->callchain,
1565                                             he->callchain) < 0)
1566                                 ret = -1;
1567                 }
1568         }
1569
1570         /* 'he' is no longer used */
1571         hist_entry__delete(he);
1572
1573         /* return 0 (or -1) since it already applied filters */
1574         return ret;
1575 }
1576
1577 static int hists__collapse_insert_entry(struct hists *hists,
1578                                         struct rb_root_cached *root,
1579                                         struct hist_entry *he)
1580 {
1581         struct rb_node **p = &root->rb_root.rb_node;
1582         struct rb_node *parent = NULL;
1583         struct hist_entry *iter;
1584         int64_t cmp;
1585         bool leftmost = true;
1586
1587         if (symbol_conf.report_hierarchy)
1588                 return hists__hierarchy_insert_entry(hists, root, he);
1589
1590         while (*p != NULL) {
1591                 parent = *p;
1592                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1593
1594                 cmp = hist_entry__collapse(iter, he);
1595
1596                 if (!cmp) {
1597                         int ret = 0;
1598
1599                         he_stat__add_stat(&iter->stat, &he->stat);
1600                         if (symbol_conf.cumulate_callchain)
1601                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1602
1603                         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1604                                 callchain_cursor_reset(&callchain_cursor);
1605                                 if (callchain_merge(&callchain_cursor,
1606                                                     iter->callchain,
1607                                                     he->callchain) < 0)
1608                                         ret = -1;
1609                         }
1610                         hist_entry__delete(he);
1611                         return ret;
1612                 }
1613
1614                 if (cmp < 0)
1615                         p = &(*p)->rb_left;
1616                 else {
1617                         p = &(*p)->rb_right;
1618                         leftmost = false;
1619                 }
1620         }
1621         hists->nr_entries++;
1622
1623         rb_link_node(&he->rb_node_in, parent, p);
1624         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1625         return 1;
1626 }
1627
1628 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1629 {
1630         struct rb_root_cached *root;
1631
1632         pthread_mutex_lock(&hists->lock);
1633
1634         root = hists->entries_in;
1635         if (++hists->entries_in > &hists->entries_in_array[1])
1636                 hists->entries_in = &hists->entries_in_array[0];
1637
1638         pthread_mutex_unlock(&hists->lock);
1639
1640         return root;
1641 }
1642
1643 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1644 {
1645         hists__filter_entry_by_dso(hists, he);
1646         hists__filter_entry_by_thread(hists, he);
1647         hists__filter_entry_by_symbol(hists, he);
1648         hists__filter_entry_by_socket(hists, he);
1649 }
1650
1651 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1652 {
1653         struct rb_root_cached *root;
1654         struct rb_node *next;
1655         struct hist_entry *n;
1656         int ret;
1657
1658         if (!hists__has(hists, need_collapse))
1659                 return 0;
1660
1661         hists->nr_entries = 0;
1662
1663         root = hists__get_rotate_entries_in(hists);
1664
1665         next = rb_first_cached(root);
1666
1667         while (next) {
1668                 if (session_done())
1669                         break;
1670                 n = rb_entry(next, struct hist_entry, rb_node_in);
1671                 next = rb_next(&n->rb_node_in);
1672
1673                 rb_erase_cached(&n->rb_node_in, root);
1674                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1675                 if (ret < 0)
1676                         return -1;
1677
1678                 if (ret) {
1679                         /*
1680                          * If it wasn't combined with one of the entries already
1681                          * collapsed, we need to apply the filters that may have
1682                          * been set by, say, the hist_browser.
1683                          */
1684                         hists__apply_filters(hists, n);
1685                 }
1686                 if (prog)
1687                         ui_progress__update(prog, 1);
1688         }
1689         return 0;
1690 }
1691
1692 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1693 {
1694         struct hists *hists = a->hists;
1695         struct perf_hpp_fmt *fmt;
1696         int64_t cmp = 0;
1697
1698         hists__for_each_sort_list(hists, fmt) {
1699                 if (perf_hpp__should_skip(fmt, a->hists))
1700                         continue;
1701
1702                 cmp = fmt->sort(fmt, a, b);
1703                 if (cmp)
1704                         break;
1705         }
1706
1707         return cmp;
1708 }
1709
1710 static void hists__reset_filter_stats(struct hists *hists)
1711 {
1712         hists->nr_non_filtered_entries = 0;
1713         hists->stats.total_non_filtered_period = 0;
1714 }
1715
1716 void hists__reset_stats(struct hists *hists)
1717 {
1718         hists->nr_entries = 0;
1719         hists->stats.total_period = 0;
1720
1721         hists__reset_filter_stats(hists);
1722 }
1723
1724 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1725 {
1726         hists->nr_non_filtered_entries++;
1727         hists->stats.total_non_filtered_period += h->stat.period;
1728 }
1729
1730 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1731 {
1732         if (!h->filtered)
1733                 hists__inc_filter_stats(hists, h);
1734
1735         hists->nr_entries++;
1736         hists->stats.total_period += h->stat.period;
1737 }
1738
1739 static void hierarchy_recalc_total_periods(struct hists *hists)
1740 {
1741         struct rb_node *node;
1742         struct hist_entry *he;
1743
1744         node = rb_first_cached(&hists->entries);
1745
1746         hists->stats.total_period = 0;
1747         hists->stats.total_non_filtered_period = 0;
1748
1749         /*
1750          * recalculate total period using top-level entries only
1751          * since lower level entries only see non-filtered entries
1752          * but upper level entries have sum of both entries.
1753          */
1754         while (node) {
1755                 he = rb_entry(node, struct hist_entry, rb_node);
1756                 node = rb_next(node);
1757
1758                 hists->stats.total_period += he->stat.period;
1759                 if (!he->filtered)
1760                         hists->stats.total_non_filtered_period += he->stat.period;
1761         }
1762 }
1763
1764 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1765                                           struct hist_entry *he)
1766 {
1767         struct rb_node **p = &root->rb_root.rb_node;
1768         struct rb_node *parent = NULL;
1769         struct hist_entry *iter;
1770         struct perf_hpp_fmt *fmt;
1771         bool leftmost = true;
1772
1773         while (*p != NULL) {
1774                 parent = *p;
1775                 iter = rb_entry(parent, struct hist_entry, rb_node);
1776
1777                 if (hist_entry__sort(he, iter) > 0)
1778                         p = &parent->rb_left;
1779                 else {
1780                         p = &parent->rb_right;
1781                         leftmost = false;
1782                 }
1783         }
1784
1785         rb_link_node(&he->rb_node, parent, p);
1786         rb_insert_color_cached(&he->rb_node, root, leftmost);
1787
1788         /* update column width of dynamic entry */
1789         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1790                 if (perf_hpp__is_dynamic_entry(fmt))
1791                         fmt->sort(fmt, he, NULL);
1792         }
1793 }
1794
1795 static void hists__hierarchy_output_resort(struct hists *hists,
1796                                            struct ui_progress *prog,
1797                                            struct rb_root_cached *root_in,
1798                                            struct rb_root_cached *root_out,
1799                                            u64 min_callchain_hits,
1800                                            bool use_callchain)
1801 {
1802         struct rb_node *node;
1803         struct hist_entry *he;
1804
1805         *root_out = RB_ROOT_CACHED;
1806         node = rb_first_cached(root_in);
1807
1808         while (node) {
1809                 he = rb_entry(node, struct hist_entry, rb_node_in);
1810                 node = rb_next(node);
1811
1812                 hierarchy_insert_output_entry(root_out, he);
1813
1814                 if (prog)
1815                         ui_progress__update(prog, 1);
1816
1817                 hists->nr_entries++;
1818                 if (!he->filtered) {
1819                         hists->nr_non_filtered_entries++;
1820                         hists__calc_col_len(hists, he);
1821                 }
1822
1823                 if (!he->leaf) {
1824                         hists__hierarchy_output_resort(hists, prog,
1825                                                        &he->hroot_in,
1826                                                        &he->hroot_out,
1827                                                        min_callchain_hits,
1828                                                        use_callchain);
1829                         continue;
1830                 }
1831
1832                 if (!use_callchain)
1833                         continue;
1834
1835                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1836                         u64 total = he->stat.period;
1837
1838                         if (symbol_conf.cumulate_callchain)
1839                                 total = he->stat_acc->period;
1840
1841                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1842                 }
1843
1844                 callchain_param.sort(&he->sorted_chain, he->callchain,
1845                                      min_callchain_hits, &callchain_param);
1846         }
1847 }
1848
1849 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1850                                          struct hist_entry *he,
1851                                          u64 min_callchain_hits,
1852                                          bool use_callchain)
1853 {
1854         struct rb_node **p = &entries->rb_root.rb_node;
1855         struct rb_node *parent = NULL;
1856         struct hist_entry *iter;
1857         struct perf_hpp_fmt *fmt;
1858         bool leftmost = true;
1859
1860         if (use_callchain) {
1861                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1862                         u64 total = he->stat.period;
1863
1864                         if (symbol_conf.cumulate_callchain)
1865                                 total = he->stat_acc->period;
1866
1867                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1868                 }
1869                 callchain_param.sort(&he->sorted_chain, he->callchain,
1870                                       min_callchain_hits, &callchain_param);
1871         }
1872
1873         while (*p != NULL) {
1874                 parent = *p;
1875                 iter = rb_entry(parent, struct hist_entry, rb_node);
1876
1877                 if (hist_entry__sort(he, iter) > 0)
1878                         p = &(*p)->rb_left;
1879                 else {
1880                         p = &(*p)->rb_right;
1881                         leftmost = false;
1882                 }
1883         }
1884
1885         rb_link_node(&he->rb_node, parent, p);
1886         rb_insert_color_cached(&he->rb_node, entries, leftmost);
1887
1888         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1889                 if (perf_hpp__is_dynamic_entry(fmt) &&
1890                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1891                         fmt->sort(fmt, he, NULL);  /* update column width */
1892         }
1893 }
1894
1895 static void output_resort(struct hists *hists, struct ui_progress *prog,
1896                           bool use_callchain, hists__resort_cb_t cb,
1897                           void *cb_arg)
1898 {
1899         struct rb_root_cached *root;
1900         struct rb_node *next;
1901         struct hist_entry *n;
1902         u64 callchain_total;
1903         u64 min_callchain_hits;
1904
1905         callchain_total = hists->callchain_period;
1906         if (symbol_conf.filter_relative)
1907                 callchain_total = hists->callchain_non_filtered_period;
1908
1909         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1910
1911         hists__reset_stats(hists);
1912         hists__reset_col_len(hists);
1913
1914         if (symbol_conf.report_hierarchy) {
1915                 hists__hierarchy_output_resort(hists, prog,
1916                                                &hists->entries_collapsed,
1917                                                &hists->entries,
1918                                                min_callchain_hits,
1919                                                use_callchain);
1920                 hierarchy_recalc_total_periods(hists);
1921                 return;
1922         }
1923
1924         if (hists__has(hists, need_collapse))
1925                 root = &hists->entries_collapsed;
1926         else
1927                 root = hists->entries_in;
1928
1929         next = rb_first_cached(root);
1930         hists->entries = RB_ROOT_CACHED;
1931
1932         while (next) {
1933                 n = rb_entry(next, struct hist_entry, rb_node_in);
1934                 next = rb_next(&n->rb_node_in);
1935
1936                 if (cb && cb(n, cb_arg))
1937                         continue;
1938
1939                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1940                 hists__inc_stats(hists, n);
1941
1942                 if (!n->filtered)
1943                         hists__calc_col_len(hists, n);
1944
1945                 if (prog)
1946                         ui_progress__update(prog, 1);
1947         }
1948 }
1949
1950 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1951                              hists__resort_cb_t cb, void *cb_arg)
1952 {
1953         bool use_callchain;
1954
1955         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1956                 use_callchain = evsel__has_callchain(evsel);
1957         else
1958                 use_callchain = symbol_conf.use_callchain;
1959
1960         use_callchain |= symbol_conf.show_branchflag_count;
1961
1962         output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1963 }
1964
1965 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1966 {
1967         return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1968 }
1969
1970 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1971 {
1972         output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1973 }
1974
1975 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1976                              hists__resort_cb_t cb)
1977 {
1978         output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1979 }
1980
1981 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1982 {
1983         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1984                 return false;
1985
1986         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1987                 return true;
1988
1989         return false;
1990 }
1991
1992 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1993 {
1994         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1995
1996         while (can_goto_child(he, HMD_NORMAL)) {
1997                 node = rb_last(&he->hroot_out.rb_root);
1998                 he = rb_entry(node, struct hist_entry, rb_node);
1999         }
2000         return node;
2001 }
2002
2003 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2004 {
2005         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2006
2007         if (can_goto_child(he, hmd))
2008                 node = rb_first_cached(&he->hroot_out);
2009         else
2010                 node = rb_next(node);
2011
2012         while (node == NULL) {
2013                 he = he->parent_he;
2014                 if (he == NULL)
2015                         break;
2016
2017                 node = rb_next(&he->rb_node);
2018         }
2019         return node;
2020 }
2021
2022 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2023 {
2024         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2025
2026         node = rb_prev(node);
2027         if (node)
2028                 return rb_hierarchy_last(node);
2029
2030         he = he->parent_he;
2031         if (he == NULL)
2032                 return NULL;
2033
2034         return &he->rb_node;
2035 }
2036
2037 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2038 {
2039         struct rb_node *node;
2040         struct hist_entry *child;
2041         float percent;
2042
2043         if (he->leaf)
2044                 return false;
2045
2046         node = rb_first_cached(&he->hroot_out);
2047         child = rb_entry(node, struct hist_entry, rb_node);
2048
2049         while (node && child->filtered) {
2050                 node = rb_next(node);
2051                 child = rb_entry(node, struct hist_entry, rb_node);
2052         }
2053
2054         if (node)
2055                 percent = hist_entry__get_percent_limit(child);
2056         else
2057                 percent = 0;
2058
2059         return node && percent >= limit;
2060 }
2061
2062 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2063                                        enum hist_filter filter)
2064 {
2065         h->filtered &= ~(1 << filter);
2066
2067         if (symbol_conf.report_hierarchy) {
2068                 struct hist_entry *parent = h->parent_he;
2069
2070                 while (parent) {
2071                         he_stat__add_stat(&parent->stat, &h->stat);
2072
2073                         parent->filtered &= ~(1 << filter);
2074
2075                         if (parent->filtered)
2076                                 goto next;
2077
2078                         /* force fold unfiltered entry for simplicity */
2079                         parent->unfolded = false;
2080                         parent->has_no_entry = false;
2081                         parent->row_offset = 0;
2082                         parent->nr_rows = 0;
2083 next:
2084                         parent = parent->parent_he;
2085                 }
2086         }
2087
2088         if (h->filtered)
2089                 return;
2090
2091         /* force fold unfiltered entry for simplicity */
2092         h->unfolded = false;
2093         h->has_no_entry = false;
2094         h->row_offset = 0;
2095         h->nr_rows = 0;
2096
2097         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2098
2099         hists__inc_filter_stats(hists, h);
2100         hists__calc_col_len(hists, h);
2101 }
2102
2103
2104 static bool hists__filter_entry_by_dso(struct hists *hists,
2105                                        struct hist_entry *he)
2106 {
2107         if (hists->dso_filter != NULL &&
2108             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2109                 he->filtered |= (1 << HIST_FILTER__DSO);
2110                 return true;
2111         }
2112
2113         return false;
2114 }
2115
2116 static bool hists__filter_entry_by_thread(struct hists *hists,
2117                                           struct hist_entry *he)
2118 {
2119         if (hists->thread_filter != NULL &&
2120             he->thread != hists->thread_filter) {
2121                 he->filtered |= (1 << HIST_FILTER__THREAD);
2122                 return true;
2123         }
2124
2125         return false;
2126 }
2127
2128 static bool hists__filter_entry_by_symbol(struct hists *hists,
2129                                           struct hist_entry *he)
2130 {
2131         if (hists->symbol_filter_str != NULL &&
2132             (!he->ms.sym || strstr(he->ms.sym->name,
2133                                    hists->symbol_filter_str) == NULL)) {
2134                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2135                 return true;
2136         }
2137
2138         return false;
2139 }
2140
2141 static bool hists__filter_entry_by_socket(struct hists *hists,
2142                                           struct hist_entry *he)
2143 {
2144         if ((hists->socket_filter > -1) &&
2145             (he->socket != hists->socket_filter)) {
2146                 he->filtered |= (1 << HIST_FILTER__SOCKET);
2147                 return true;
2148         }
2149
2150         return false;
2151 }
2152
2153 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2154
2155 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2156 {
2157         struct rb_node *nd;
2158
2159         hists->stats.nr_non_filtered_samples = 0;
2160
2161         hists__reset_filter_stats(hists);
2162         hists__reset_col_len(hists);
2163
2164         for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2165                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2166
2167                 if (filter(hists, h))
2168                         continue;
2169
2170                 hists__remove_entry_filter(hists, h, type);
2171         }
2172 }
2173
2174 static void resort_filtered_entry(struct rb_root_cached *root,
2175                                   struct hist_entry *he)
2176 {
2177         struct rb_node **p = &root->rb_root.rb_node;
2178         struct rb_node *parent = NULL;
2179         struct hist_entry *iter;
2180         struct rb_root_cached new_root = RB_ROOT_CACHED;
2181         struct rb_node *nd;
2182         bool leftmost = true;
2183
2184         while (*p != NULL) {
2185                 parent = *p;
2186                 iter = rb_entry(parent, struct hist_entry, rb_node);
2187
2188                 if (hist_entry__sort(he, iter) > 0)
2189                         p = &(*p)->rb_left;
2190                 else {
2191                         p = &(*p)->rb_right;
2192                         leftmost = false;
2193                 }
2194         }
2195
2196         rb_link_node(&he->rb_node, parent, p);
2197         rb_insert_color_cached(&he->rb_node, root, leftmost);
2198
2199         if (he->leaf || he->filtered)
2200                 return;
2201
2202         nd = rb_first_cached(&he->hroot_out);
2203         while (nd) {
2204                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2205
2206                 nd = rb_next(nd);
2207                 rb_erase_cached(&h->rb_node, &he->hroot_out);
2208
2209                 resort_filtered_entry(&new_root, h);
2210         }
2211
2212         he->hroot_out = new_root;
2213 }
2214
2215 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2216 {
2217         struct rb_node *nd;
2218         struct rb_root_cached new_root = RB_ROOT_CACHED;
2219
2220         hists->stats.nr_non_filtered_samples = 0;
2221
2222         hists__reset_filter_stats(hists);
2223         hists__reset_col_len(hists);
2224
2225         nd = rb_first_cached(&hists->entries);
2226         while (nd) {
2227                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2228                 int ret;
2229
2230                 ret = hist_entry__filter(h, type, arg);
2231
2232                 /*
2233                  * case 1. non-matching type
2234                  * zero out the period, set filter marker and move to child
2235                  */
2236                 if (ret < 0) {
2237                         memset(&h->stat, 0, sizeof(h->stat));
2238                         h->filtered |= (1 << type);
2239
2240                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2241                 }
2242                 /*
2243                  * case 2. matched type (filter out)
2244                  * set filter marker and move to next
2245                  */
2246                 else if (ret == 1) {
2247                         h->filtered |= (1 << type);
2248
2249                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2250                 }
2251                 /*
2252                  * case 3. ok (not filtered)
2253                  * add period to hists and parents, erase the filter marker
2254                  * and move to next sibling
2255                  */
2256                 else {
2257                         hists__remove_entry_filter(hists, h, type);
2258
2259                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2260                 }
2261         }
2262
2263         hierarchy_recalc_total_periods(hists);
2264
2265         /*
2266          * resort output after applying a new filter since filter in a lower
2267          * hierarchy can change periods in a upper hierarchy.
2268          */
2269         nd = rb_first_cached(&hists->entries);
2270         while (nd) {
2271                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2272
2273                 nd = rb_next(nd);
2274                 rb_erase_cached(&h->rb_node, &hists->entries);
2275
2276                 resort_filtered_entry(&new_root, h);
2277         }
2278
2279         hists->entries = new_root;
2280 }
2281
2282 void hists__filter_by_thread(struct hists *hists)
2283 {
2284         if (symbol_conf.report_hierarchy)
2285                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2286                                         hists->thread_filter);
2287         else
2288                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2289                                       hists__filter_entry_by_thread);
2290 }
2291
2292 void hists__filter_by_dso(struct hists *hists)
2293 {
2294         if (symbol_conf.report_hierarchy)
2295                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2296                                         hists->dso_filter);
2297         else
2298                 hists__filter_by_type(hists, HIST_FILTER__DSO,
2299                                       hists__filter_entry_by_dso);
2300 }
2301
2302 void hists__filter_by_symbol(struct hists *hists)
2303 {
2304         if (symbol_conf.report_hierarchy)
2305                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2306                                         hists->symbol_filter_str);
2307         else
2308                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2309                                       hists__filter_entry_by_symbol);
2310 }
2311
2312 void hists__filter_by_socket(struct hists *hists)
2313 {
2314         if (symbol_conf.report_hierarchy)
2315                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2316                                         &hists->socket_filter);
2317         else
2318                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2319                                       hists__filter_entry_by_socket);
2320 }
2321
2322 void events_stats__inc(struct events_stats *stats, u32 type)
2323 {
2324         ++stats->nr_events[0];
2325         ++stats->nr_events[type];
2326 }
2327
2328 static void hists_stats__inc(struct hists_stats *stats)
2329 {
2330         ++stats->nr_samples;
2331 }
2332
2333 void hists__inc_nr_events(struct hists *hists)
2334 {
2335         hists_stats__inc(&hists->stats);
2336 }
2337
2338 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2339 {
2340         hists_stats__inc(&hists->stats);
2341         if (!filtered)
2342                 hists->stats.nr_non_filtered_samples++;
2343 }
2344
2345 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2346                                                  struct hist_entry *pair)
2347 {
2348         struct rb_root_cached *root;
2349         struct rb_node **p;
2350         struct rb_node *parent = NULL;
2351         struct hist_entry *he;
2352         int64_t cmp;
2353         bool leftmost = true;
2354
2355         if (hists__has(hists, need_collapse))
2356                 root = &hists->entries_collapsed;
2357         else
2358                 root = hists->entries_in;
2359
2360         p = &root->rb_root.rb_node;
2361
2362         while (*p != NULL) {
2363                 parent = *p;
2364                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2365
2366                 cmp = hist_entry__collapse(he, pair);
2367
2368                 if (!cmp)
2369                         goto out;
2370
2371                 if (cmp < 0)
2372                         p = &(*p)->rb_left;
2373                 else {
2374                         p = &(*p)->rb_right;
2375                         leftmost = false;
2376                 }
2377         }
2378
2379         he = hist_entry__new(pair, true);
2380         if (he) {
2381                 memset(&he->stat, 0, sizeof(he->stat));
2382                 he->hists = hists;
2383                 if (symbol_conf.cumulate_callchain)
2384                         memset(he->stat_acc, 0, sizeof(he->stat));
2385                 rb_link_node(&he->rb_node_in, parent, p);
2386                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2387                 hists__inc_stats(hists, he);
2388                 he->dummy = true;
2389         }
2390 out:
2391         return he;
2392 }
2393
2394 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2395                                                     struct rb_root_cached *root,
2396                                                     struct hist_entry *pair)
2397 {
2398         struct rb_node **p;
2399         struct rb_node *parent = NULL;
2400         struct hist_entry *he;
2401         struct perf_hpp_fmt *fmt;
2402         bool leftmost = true;
2403
2404         p = &root->rb_root.rb_node;
2405         while (*p != NULL) {
2406                 int64_t cmp = 0;
2407
2408                 parent = *p;
2409                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2410
2411                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2412                         cmp = fmt->collapse(fmt, he, pair);
2413                         if (cmp)
2414                                 break;
2415                 }
2416                 if (!cmp)
2417                         goto out;
2418
2419                 if (cmp < 0)
2420                         p = &parent->rb_left;
2421                 else {
2422                         p = &parent->rb_right;
2423                         leftmost = false;
2424                 }
2425         }
2426
2427         he = hist_entry__new(pair, true);
2428         if (he) {
2429                 rb_link_node(&he->rb_node_in, parent, p);
2430                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2431
2432                 he->dummy = true;
2433                 he->hists = hists;
2434                 memset(&he->stat, 0, sizeof(he->stat));
2435                 hists__inc_stats(hists, he);
2436         }
2437 out:
2438         return he;
2439 }
2440
2441 static struct hist_entry *hists__find_entry(struct hists *hists,
2442                                             struct hist_entry *he)
2443 {
2444         struct rb_node *n;
2445
2446         if (hists__has(hists, need_collapse))
2447                 n = hists->entries_collapsed.rb_root.rb_node;
2448         else
2449                 n = hists->entries_in->rb_root.rb_node;
2450
2451         while (n) {
2452                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2453                 int64_t cmp = hist_entry__collapse(iter, he);
2454
2455                 if (cmp < 0)
2456                         n = n->rb_left;
2457                 else if (cmp > 0)
2458                         n = n->rb_right;
2459                 else
2460                         return iter;
2461         }
2462
2463         return NULL;
2464 }
2465
2466 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2467                                                       struct hist_entry *he)
2468 {
2469         struct rb_node *n = root->rb_root.rb_node;
2470
2471         while (n) {
2472                 struct hist_entry *iter;
2473                 struct perf_hpp_fmt *fmt;
2474                 int64_t cmp = 0;
2475
2476                 iter = rb_entry(n, struct hist_entry, rb_node_in);
2477                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2478                         cmp = fmt->collapse(fmt, iter, he);
2479                         if (cmp)
2480                                 break;
2481                 }
2482
2483                 if (cmp < 0)
2484                         n = n->rb_left;
2485                 else if (cmp > 0)
2486                         n = n->rb_right;
2487                 else
2488                         return iter;
2489         }
2490
2491         return NULL;
2492 }
2493
2494 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2495                                    struct rb_root_cached *other_root)
2496 {
2497         struct rb_node *nd;
2498         struct hist_entry *pos, *pair;
2499
2500         for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2501                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2502                 pair = hists__find_hierarchy_entry(other_root, pos);
2503
2504                 if (pair) {
2505                         hist_entry__add_pair(pair, pos);
2506                         hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2507                 }
2508         }
2509 }
2510
2511 /*
2512  * Look for pairs to link to the leader buckets (hist_entries):
2513  */
2514 void hists__match(struct hists *leader, struct hists *other)
2515 {
2516         struct rb_root_cached *root;
2517         struct rb_node *nd;
2518         struct hist_entry *pos, *pair;
2519
2520         if (symbol_conf.report_hierarchy) {
2521                 /* hierarchy report always collapses entries */
2522                 return hists__match_hierarchy(&leader->entries_collapsed,
2523                                               &other->entries_collapsed);
2524         }
2525
2526         if (hists__has(leader, need_collapse))
2527                 root = &leader->entries_collapsed;
2528         else
2529                 root = leader->entries_in;
2530
2531         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2532                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2533                 pair = hists__find_entry(other, pos);
2534
2535                 if (pair)
2536                         hist_entry__add_pair(pair, pos);
2537         }
2538 }
2539
2540 static int hists__link_hierarchy(struct hists *leader_hists,
2541                                  struct hist_entry *parent,
2542                                  struct rb_root_cached *leader_root,
2543                                  struct rb_root_cached *other_root)
2544 {
2545         struct rb_node *nd;
2546         struct hist_entry *pos, *leader;
2547
2548         for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2549                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2550
2551                 if (hist_entry__has_pairs(pos)) {
2552                         bool found = false;
2553
2554                         list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2555                                 if (leader->hists == leader_hists) {
2556                                         found = true;
2557                                         break;
2558                                 }
2559                         }
2560                         if (!found)
2561                                 return -1;
2562                 } else {
2563                         leader = add_dummy_hierarchy_entry(leader_hists,
2564                                                            leader_root, pos);
2565                         if (leader == NULL)
2566                                 return -1;
2567
2568                         /* do not point parent in the pos */
2569                         leader->parent_he = parent;
2570
2571                         hist_entry__add_pair(pos, leader);
2572                 }
2573
2574                 if (!pos->leaf) {
2575                         if (hists__link_hierarchy(leader_hists, leader,
2576                                                   &leader->hroot_in,
2577                                                   &pos->hroot_in) < 0)
2578                                 return -1;
2579                 }
2580         }
2581         return 0;
2582 }
2583
2584 /*
2585  * Look for entries in the other hists that are not present in the leader, if
2586  * we find them, just add a dummy entry on the leader hists, with period=0,
2587  * nr_events=0, to serve as the list header.
2588  */
2589 int hists__link(struct hists *leader, struct hists *other)
2590 {
2591         struct rb_root_cached *root;
2592         struct rb_node *nd;
2593         struct hist_entry *pos, *pair;
2594
2595         if (symbol_conf.report_hierarchy) {
2596                 /* hierarchy report always collapses entries */
2597                 return hists__link_hierarchy(leader, NULL,
2598                                              &leader->entries_collapsed,
2599                                              &other->entries_collapsed);
2600         }
2601
2602         if (hists__has(other, need_collapse))
2603                 root = &other->entries_collapsed;
2604         else
2605                 root = other->entries_in;
2606
2607         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2608                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2609
2610                 if (!hist_entry__has_pairs(pos)) {
2611                         pair = hists__add_dummy_entry(leader, pos);
2612                         if (pair == NULL)
2613                                 return -1;
2614                         hist_entry__add_pair(pos, pair);
2615                 }
2616         }
2617
2618         return 0;
2619 }
2620
2621 int hists__unlink(struct hists *hists)
2622 {
2623         struct rb_root_cached *root;
2624         struct rb_node *nd;
2625         struct hist_entry *pos;
2626
2627         if (hists__has(hists, need_collapse))
2628                 root = &hists->entries_collapsed;
2629         else
2630                 root = hists->entries_in;
2631
2632         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2633                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2634                 list_del_init(&pos->pairs.node);
2635         }
2636
2637         return 0;
2638 }
2639
2640 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2641                           struct perf_sample *sample, bool nonany_branch_mode,
2642                           u64 *total_cycles)
2643 {
2644         struct branch_info *bi;
2645         struct branch_entry *entries = perf_sample__branch_entries(sample);
2646
2647         /* If we have branch cycles always annotate them. */
2648         if (bs && bs->nr && entries[0].flags.cycles) {
2649                 int i;
2650
2651                 bi = sample__resolve_bstack(sample, al);
2652                 if (bi) {
2653                         struct addr_map_symbol *prev = NULL;
2654
2655                         /*
2656                          * Ignore errors, still want to process the
2657                          * other entries.
2658                          *
2659                          * For non standard branch modes always
2660                          * force no IPC (prev == NULL)
2661                          *
2662                          * Note that perf stores branches reversed from
2663                          * program order!
2664                          */
2665                         for (i = bs->nr - 1; i >= 0; i--) {
2666                                 addr_map_symbol__account_cycles(&bi[i].from,
2667                                         nonany_branch_mode ? NULL : prev,
2668                                         bi[i].flags.cycles);
2669                                 prev = &bi[i].to;
2670
2671                                 if (total_cycles)
2672                                         *total_cycles += bi[i].flags.cycles;
2673                         }
2674                         free(bi);
2675                 }
2676         }
2677 }
2678
2679 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2680                                  bool skip_empty)
2681 {
2682         struct evsel *pos;
2683         size_t ret = 0;
2684
2685         evlist__for_each_entry(evlist, pos) {
2686                 struct hists *hists = evsel__hists(pos);
2687
2688                 if (skip_empty && !hists->stats.nr_samples)
2689                         continue;
2690
2691                 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2692                 ret += fprintf(fp, "%16s events: %10d\n",
2693                                "SAMPLE", hists->stats.nr_samples);
2694         }
2695
2696         return ret;
2697 }
2698
2699
2700 u64 hists__total_period(struct hists *hists)
2701 {
2702         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2703                 hists->stats.total_period;
2704 }
2705
2706 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2707 {
2708         char unit;
2709         int printed;
2710         const struct dso *dso = hists->dso_filter;
2711         struct thread *thread = hists->thread_filter;
2712         int socket_id = hists->socket_filter;
2713         unsigned long nr_samples = hists->stats.nr_samples;
2714         u64 nr_events = hists->stats.total_period;
2715         struct evsel *evsel = hists_to_evsel(hists);
2716         const char *ev_name = evsel__name(evsel);
2717         char buf[512], sample_freq_str[64] = "";
2718         size_t buflen = sizeof(buf);
2719         char ref[30] = " show reference callgraph, ";
2720         bool enable_ref = false;
2721
2722         if (symbol_conf.filter_relative) {
2723                 nr_samples = hists->stats.nr_non_filtered_samples;
2724                 nr_events = hists->stats.total_non_filtered_period;
2725         }
2726
2727         if (evsel__is_group_event(evsel)) {
2728                 struct evsel *pos;
2729
2730                 evsel__group_desc(evsel, buf, buflen);
2731                 ev_name = buf;
2732
2733                 for_each_group_member(pos, evsel) {
2734                         struct hists *pos_hists = evsel__hists(pos);
2735
2736                         if (symbol_conf.filter_relative) {
2737                                 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2738                                 nr_events += pos_hists->stats.total_non_filtered_period;
2739                         } else {
2740                                 nr_samples += pos_hists->stats.nr_samples;
2741                                 nr_events += pos_hists->stats.total_period;
2742                         }
2743                 }
2744         }
2745
2746         if (symbol_conf.show_ref_callgraph &&
2747             strstr(ev_name, "call-graph=no"))
2748                 enable_ref = true;
2749
2750         if (show_freq)
2751                 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2752
2753         nr_samples = convert_unit(nr_samples, &unit);
2754         printed = scnprintf(bf, size,
2755                            "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2756                            nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2757                            ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2758
2759
2760         if (hists->uid_filter_str)
2761                 printed += snprintf(bf + printed, size - printed,
2762                                     ", UID: %s", hists->uid_filter_str);
2763         if (thread) {
2764                 if (hists__has(hists, thread)) {
2765                         printed += scnprintf(bf + printed, size - printed,
2766                                     ", Thread: %s(%d)",
2767                                      (thread->comm_set ? thread__comm_str(thread) : ""),
2768                                     thread->tid);
2769                 } else {
2770                         printed += scnprintf(bf + printed, size - printed,
2771                                     ", Thread: %s",
2772                                      (thread->comm_set ? thread__comm_str(thread) : ""));
2773                 }
2774         }
2775         if (dso)
2776                 printed += scnprintf(bf + printed, size - printed,
2777                                     ", DSO: %s", dso->short_name);
2778         if (socket_id > -1)
2779                 printed += scnprintf(bf + printed, size - printed,
2780                                     ", Processor Socket: %d", socket_id);
2781
2782         return printed;
2783 }
2784
2785 int parse_filter_percentage(const struct option *opt __maybe_unused,
2786                             const char *arg, int unset __maybe_unused)
2787 {
2788         if (!strcmp(arg, "relative"))
2789                 symbol_conf.filter_relative = true;
2790         else if (!strcmp(arg, "absolute"))
2791                 symbol_conf.filter_relative = false;
2792         else {
2793                 pr_debug("Invalid percentage: %s\n", arg);
2794                 return -1;
2795         }
2796
2797         return 0;
2798 }
2799
2800 int perf_hist_config(const char *var, const char *value)
2801 {
2802         if (!strcmp(var, "hist.percentage"))
2803                 return parse_filter_percentage(NULL, value, 0);
2804
2805         return 0;
2806 }
2807
2808 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2809 {
2810         memset(hists, 0, sizeof(*hists));
2811         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2812         hists->entries_in = &hists->entries_in_array[0];
2813         hists->entries_collapsed = RB_ROOT_CACHED;
2814         hists->entries = RB_ROOT_CACHED;
2815         pthread_mutex_init(&hists->lock, NULL);
2816         hists->socket_filter = -1;
2817         hists->hpp_list = hpp_list;
2818         INIT_LIST_HEAD(&hists->hpp_formats);
2819         return 0;
2820 }
2821
2822 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2823 {
2824         struct rb_node *node;
2825         struct hist_entry *he;
2826
2827         while (!RB_EMPTY_ROOT(&root->rb_root)) {
2828                 node = rb_first_cached(root);
2829                 rb_erase_cached(node, root);
2830
2831                 he = rb_entry(node, struct hist_entry, rb_node_in);
2832                 hist_entry__delete(he);
2833         }
2834 }
2835
2836 static void hists__delete_all_entries(struct hists *hists)
2837 {
2838         hists__delete_entries(hists);
2839         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2840         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2841         hists__delete_remaining_entries(&hists->entries_collapsed);
2842 }
2843
2844 static void hists_evsel__exit(struct evsel *evsel)
2845 {
2846         struct hists *hists = evsel__hists(evsel);
2847         struct perf_hpp_fmt *fmt, *pos;
2848         struct perf_hpp_list_node *node, *tmp;
2849
2850         hists__delete_all_entries(hists);
2851
2852         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2853                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2854                         list_del_init(&fmt->list);
2855                         free(fmt);
2856                 }
2857                 list_del_init(&node->list);
2858                 free(node);
2859         }
2860 }
2861
2862 static int hists_evsel__init(struct evsel *evsel)
2863 {
2864         struct hists *hists = evsel__hists(evsel);
2865
2866         __hists__init(hists, &perf_hpp_list);
2867         return 0;
2868 }
2869
2870 /*
2871  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2872  * stored in the rbtree...
2873  */
2874
2875 int hists__init(void)
2876 {
2877         int err = evsel__object_config(sizeof(struct hists_evsel),
2878                                        hists_evsel__init, hists_evsel__exit);
2879         if (err)
2880                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2881
2882         return err;
2883 }
2884
2885 void perf_hpp_list__init(struct perf_hpp_list *list)
2886 {
2887         INIT_LIST_HEAD(&list->fields);
2888         INIT_LIST_HEAD(&list->sorts);
2889 }