tools: rename bitmap_alloc() to bitmap_zalloc()
[linux-2.6-microblaze.git] / tools / perf / util / event.c
1 #include <errno.h>
2 #include <fcntl.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <perf/cpumap.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <unistd.h>
10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
11 #include <linux/perf_event.h>
12 #include <linux/zalloc.h>
13 #include "cpumap.h"
14 #include "dso.h"
15 #include "event.h"
16 #include "debug.h"
17 #include "hist.h"
18 #include "machine.h"
19 #include "sort.h"
20 #include "string2.h"
21 #include "strlist.h"
22 #include "thread.h"
23 #include "thread_map.h"
24 #include "time-utils.h"
25 #include <linux/ctype.h>
26 #include "map.h"
27 #include "util/namespaces.h"
28 #include "symbol.h"
29 #include "symbol/kallsyms.h"
30 #include "asm/bug.h"
31 #include "stat.h"
32 #include "session.h"
33 #include "bpf-event.h"
34 #include "print_binary.h"
35 #include "tool.h"
36 #include "../perf.h"
37
38 static const char *perf_event__names[] = {
39         [0]                                     = "TOTAL",
40         [PERF_RECORD_MMAP]                      = "MMAP",
41         [PERF_RECORD_MMAP2]                     = "MMAP2",
42         [PERF_RECORD_LOST]                      = "LOST",
43         [PERF_RECORD_COMM]                      = "COMM",
44         [PERF_RECORD_EXIT]                      = "EXIT",
45         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
46         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
47         [PERF_RECORD_FORK]                      = "FORK",
48         [PERF_RECORD_READ]                      = "READ",
49         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
50         [PERF_RECORD_AUX]                       = "AUX",
51         [PERF_RECORD_ITRACE_START]              = "ITRACE_START",
52         [PERF_RECORD_LOST_SAMPLES]              = "LOST_SAMPLES",
53         [PERF_RECORD_SWITCH]                    = "SWITCH",
54         [PERF_RECORD_SWITCH_CPU_WIDE]           = "SWITCH_CPU_WIDE",
55         [PERF_RECORD_NAMESPACES]                = "NAMESPACES",
56         [PERF_RECORD_KSYMBOL]                   = "KSYMBOL",
57         [PERF_RECORD_BPF_EVENT]                 = "BPF_EVENT",
58         [PERF_RECORD_CGROUP]                    = "CGROUP",
59         [PERF_RECORD_TEXT_POKE]                 = "TEXT_POKE",
60         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
61         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
62         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
63         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
64         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
65         [PERF_RECORD_ID_INDEX]                  = "ID_INDEX",
66         [PERF_RECORD_AUXTRACE_INFO]             = "AUXTRACE_INFO",
67         [PERF_RECORD_AUXTRACE]                  = "AUXTRACE",
68         [PERF_RECORD_AUXTRACE_ERROR]            = "AUXTRACE_ERROR",
69         [PERF_RECORD_THREAD_MAP]                = "THREAD_MAP",
70         [PERF_RECORD_CPU_MAP]                   = "CPU_MAP",
71         [PERF_RECORD_STAT_CONFIG]               = "STAT_CONFIG",
72         [PERF_RECORD_STAT]                      = "STAT",
73         [PERF_RECORD_STAT_ROUND]                = "STAT_ROUND",
74         [PERF_RECORD_EVENT_UPDATE]              = "EVENT_UPDATE",
75         [PERF_RECORD_TIME_CONV]                 = "TIME_CONV",
76         [PERF_RECORD_HEADER_FEATURE]            = "FEATURE",
77         [PERF_RECORD_COMPRESSED]                = "COMPRESSED",
78 };
79
80 const char *perf_event__name(unsigned int id)
81 {
82         if (id >= ARRAY_SIZE(perf_event__names))
83                 return "INVALID";
84         if (!perf_event__names[id])
85                 return "UNKNOWN";
86         return perf_event__names[id];
87 }
88
89 struct process_symbol_args {
90         const char *name;
91         u64        start;
92 };
93
94 static int find_symbol_cb(void *arg, const char *name, char type,
95                           u64 start)
96 {
97         struct process_symbol_args *args = arg;
98
99         /*
100          * Must be a function or at least an alias, as in PARISC64, where "_text" is
101          * an 'A' to the same address as "_stext".
102          */
103         if (!(kallsyms__is_function(type) ||
104               type == 'A') || strcmp(name, args->name))
105                 return 0;
106
107         args->start = start;
108         return 1;
109 }
110
111 int kallsyms__get_function_start(const char *kallsyms_filename,
112                                  const char *symbol_name, u64 *addr)
113 {
114         struct process_symbol_args args = { .name = symbol_name, };
115
116         if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
117                 return -1;
118
119         *addr = args.start;
120         return 0;
121 }
122
123 void perf_event__read_stat_config(struct perf_stat_config *config,
124                                   struct perf_record_stat_config *event)
125 {
126         unsigned i;
127
128         for (i = 0; i < event->nr; i++) {
129
130                 switch (event->data[i].tag) {
131 #define CASE(__term, __val)                                     \
132                 case PERF_STAT_CONFIG_TERM__##__term:           \
133                         config->__val = event->data[i].val;     \
134                         break;
135
136                 CASE(AGGR_MODE, aggr_mode)
137                 CASE(SCALE,     scale)
138                 CASE(INTERVAL,  interval)
139 #undef CASE
140                 default:
141                         pr_warning("unknown stat config term %" PRI_lu64 "\n",
142                                    event->data[i].tag);
143                 }
144         }
145 }
146
147 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
148 {
149         const char *s;
150
151         if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
152                 s = " exec";
153         else
154                 s = "";
155
156         return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
157 }
158
159 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
160 {
161         size_t ret = 0;
162         struct perf_ns_link_info *ns_link_info;
163         u32 nr_namespaces, idx;
164
165         ns_link_info = event->namespaces.link_info;
166         nr_namespaces = event->namespaces.nr_namespaces;
167
168         ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
169                        event->namespaces.pid,
170                        event->namespaces.tid,
171                        nr_namespaces);
172
173         for (idx = 0; idx < nr_namespaces; idx++) {
174                 if (idx && (idx % 4 == 0))
175                         ret += fprintf(fp, "\n\t\t ");
176
177                 ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
178                                 perf_ns__name(idx), (u64)ns_link_info[idx].dev,
179                                 (u64)ns_link_info[idx].ino,
180                                 ((idx + 1) != nr_namespaces) ? ", " : "]\n");
181         }
182
183         return ret;
184 }
185
186 size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp)
187 {
188         return fprintf(fp, " cgroup: %" PRI_lu64 " %s\n",
189                        event->cgroup.id, event->cgroup.path);
190 }
191
192 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
193                              union perf_event *event,
194                              struct perf_sample *sample,
195                              struct machine *machine)
196 {
197         return machine__process_comm_event(machine, event, sample);
198 }
199
200 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
201                                    union perf_event *event,
202                                    struct perf_sample *sample,
203                                    struct machine *machine)
204 {
205         return machine__process_namespaces_event(machine, event, sample);
206 }
207
208 int perf_event__process_cgroup(struct perf_tool *tool __maybe_unused,
209                                union perf_event *event,
210                                struct perf_sample *sample,
211                                struct machine *machine)
212 {
213         return machine__process_cgroup_event(machine, event, sample);
214 }
215
216 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
217                              union perf_event *event,
218                              struct perf_sample *sample,
219                              struct machine *machine)
220 {
221         return machine__process_lost_event(machine, event, sample);
222 }
223
224 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
225                             union perf_event *event,
226                             struct perf_sample *sample __maybe_unused,
227                             struct machine *machine)
228 {
229         return machine__process_aux_event(machine, event);
230 }
231
232 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
233                                      union perf_event *event,
234                                      struct perf_sample *sample __maybe_unused,
235                                      struct machine *machine)
236 {
237         return machine__process_itrace_start_event(machine, event);
238 }
239
240 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
241                                      union perf_event *event,
242                                      struct perf_sample *sample,
243                                      struct machine *machine)
244 {
245         return machine__process_lost_samples_event(machine, event, sample);
246 }
247
248 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
249                                union perf_event *event,
250                                struct perf_sample *sample __maybe_unused,
251                                struct machine *machine)
252 {
253         return machine__process_switch_event(machine, event);
254 }
255
256 int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
257                                 union perf_event *event,
258                                 struct perf_sample *sample __maybe_unused,
259                                 struct machine *machine)
260 {
261         return machine__process_ksymbol(machine, event, sample);
262 }
263
264 int perf_event__process_bpf(struct perf_tool *tool __maybe_unused,
265                             union perf_event *event,
266                             struct perf_sample *sample,
267                             struct machine *machine)
268 {
269         return machine__process_bpf(machine, event, sample);
270 }
271
272 int perf_event__process_text_poke(struct perf_tool *tool __maybe_unused,
273                                   union perf_event *event,
274                                   struct perf_sample *sample,
275                                   struct machine *machine)
276 {
277         return machine__process_text_poke(machine, event, sample);
278 }
279
280 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
281 {
282         return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64 "]: %c %s\n",
283                        event->mmap.pid, event->mmap.tid, event->mmap.start,
284                        event->mmap.len, event->mmap.pgoff,
285                        (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
286                        event->mmap.filename);
287 }
288
289 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
290 {
291         if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
292                 char sbuild_id[SBUILD_ID_SIZE];
293                 struct build_id bid;
294
295                 build_id__init(&bid, event->mmap2.build_id,
296                                event->mmap2.build_id_size);
297                 build_id__sprintf(&bid, sbuild_id);
298
299                 return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
300                                    " <%s>]: %c%c%c%c %s\n",
301                                event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
302                                event->mmap2.len, event->mmap2.pgoff, sbuild_id,
303                                (event->mmap2.prot & PROT_READ) ? 'r' : '-',
304                                (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
305                                (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
306                                (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
307                                event->mmap2.filename);
308         } else {
309                 return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
310                                    " %02x:%02x %"PRI_lu64" %"PRI_lu64"]: %c%c%c%c %s\n",
311                                event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
312                                event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
313                                event->mmap2.min, event->mmap2.ino,
314                                event->mmap2.ino_generation,
315                                (event->mmap2.prot & PROT_READ) ? 'r' : '-',
316                                (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
317                                (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
318                                (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
319                                event->mmap2.filename);
320         }
321 }
322
323 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
324 {
325         struct perf_thread_map *threads = thread_map__new_event(&event->thread_map);
326         size_t ret;
327
328         ret = fprintf(fp, " nr: ");
329
330         if (threads)
331                 ret += thread_map__fprintf(threads, fp);
332         else
333                 ret += fprintf(fp, "failed to get threads from event\n");
334
335         perf_thread_map__put(threads);
336         return ret;
337 }
338
339 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
340 {
341         struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
342         size_t ret;
343
344         ret = fprintf(fp, ": ");
345
346         if (cpus)
347                 ret += cpu_map__fprintf(cpus, fp);
348         else
349                 ret += fprintf(fp, "failed to get cpumap from event\n");
350
351         perf_cpu_map__put(cpus);
352         return ret;
353 }
354
355 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
356                              union perf_event *event,
357                              struct perf_sample *sample,
358                              struct machine *machine)
359 {
360         return machine__process_mmap_event(machine, event, sample);
361 }
362
363 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
364                              union perf_event *event,
365                              struct perf_sample *sample,
366                              struct machine *machine)
367 {
368         return machine__process_mmap2_event(machine, event, sample);
369 }
370
371 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
372 {
373         return fprintf(fp, "(%d:%d):(%d:%d)\n",
374                        event->fork.pid, event->fork.tid,
375                        event->fork.ppid, event->fork.ptid);
376 }
377
378 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
379                              union perf_event *event,
380                              struct perf_sample *sample,
381                              struct machine *machine)
382 {
383         return machine__process_fork_event(machine, event, sample);
384 }
385
386 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
387                              union perf_event *event,
388                              struct perf_sample *sample,
389                              struct machine *machine)
390 {
391         return machine__process_exit_event(machine, event, sample);
392 }
393
394 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
395 {
396         return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s]\n",
397                        event->aux.aux_offset, event->aux.aux_size,
398                        event->aux.flags,
399                        event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
400                        event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
401                        event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
402 }
403
404 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
405 {
406         return fprintf(fp, " pid: %u tid: %u\n",
407                        event->itrace_start.pid, event->itrace_start.tid);
408 }
409
410 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
411 {
412         bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
413         const char *in_out = !out ? "IN         " :
414                 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
415                                     "OUT        " : "OUT preempt";
416
417         if (event->header.type == PERF_RECORD_SWITCH)
418                 return fprintf(fp, " %s\n", in_out);
419
420         return fprintf(fp, " %s  %s pid/tid: %5d/%-5d\n",
421                        in_out, out ? "next" : "prev",
422                        event->context_switch.next_prev_pid,
423                        event->context_switch.next_prev_tid);
424 }
425
426 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
427 {
428         return fprintf(fp, " lost %" PRI_lu64 "\n", event->lost.lost);
429 }
430
431 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
432 {
433         return fprintf(fp, " addr %" PRI_lx64 " len %u type %u flags 0x%x name %s\n",
434                        event->ksymbol.addr, event->ksymbol.len,
435                        event->ksymbol.ksym_type,
436                        event->ksymbol.flags, event->ksymbol.name);
437 }
438
439 size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp)
440 {
441         return fprintf(fp, " type %u, flags %u, id %u\n",
442                        event->bpf.type, event->bpf.flags, event->bpf.id);
443 }
444
445 static int text_poke_printer(enum binary_printer_ops op, unsigned int val,
446                              void *extra, FILE *fp)
447 {
448         bool old = *(bool *)extra;
449
450         switch ((int)op) {
451         case BINARY_PRINT_LINE_BEGIN:
452                 return fprintf(fp, "            %s bytes:", old ? "Old" : "New");
453         case BINARY_PRINT_NUM_DATA:
454                 return fprintf(fp, " %02x", val);
455         case BINARY_PRINT_LINE_END:
456                 return fprintf(fp, "\n");
457         default:
458                 return 0;
459         }
460 }
461
462 size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine, FILE *fp)
463 {
464         struct perf_record_text_poke_event *tp = &event->text_poke;
465         size_t ret;
466         bool old;
467
468         ret = fprintf(fp, " %" PRI_lx64 " ", tp->addr);
469         if (machine) {
470                 struct addr_location al;
471
472                 al.map = maps__find(&machine->kmaps, tp->addr);
473                 if (al.map && map__load(al.map) >= 0) {
474                         al.addr = al.map->map_ip(al.map, tp->addr);
475                         al.sym = map__find_symbol(al.map, al.addr);
476                         if (al.sym)
477                                 ret += symbol__fprintf_symname_offs(al.sym, &al, fp);
478                 }
479         }
480         ret += fprintf(fp, " old len %u new len %u\n", tp->old_len, tp->new_len);
481         old = true;
482         ret += binary__fprintf(tp->bytes, tp->old_len, 16, text_poke_printer,
483                                &old, fp);
484         old = false;
485         ret += binary__fprintf(tp->bytes + tp->old_len, tp->new_len, 16,
486                                text_poke_printer, &old, fp);
487         return ret;
488 }
489
490 size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp)
491 {
492         size_t ret = fprintf(fp, "PERF_RECORD_%s",
493                              perf_event__name(event->header.type));
494
495         switch (event->header.type) {
496         case PERF_RECORD_COMM:
497                 ret += perf_event__fprintf_comm(event, fp);
498                 break;
499         case PERF_RECORD_FORK:
500         case PERF_RECORD_EXIT:
501                 ret += perf_event__fprintf_task(event, fp);
502                 break;
503         case PERF_RECORD_MMAP:
504                 ret += perf_event__fprintf_mmap(event, fp);
505                 break;
506         case PERF_RECORD_NAMESPACES:
507                 ret += perf_event__fprintf_namespaces(event, fp);
508                 break;
509         case PERF_RECORD_CGROUP:
510                 ret += perf_event__fprintf_cgroup(event, fp);
511                 break;
512         case PERF_RECORD_MMAP2:
513                 ret += perf_event__fprintf_mmap2(event, fp);
514                 break;
515         case PERF_RECORD_AUX:
516                 ret += perf_event__fprintf_aux(event, fp);
517                 break;
518         case PERF_RECORD_ITRACE_START:
519                 ret += perf_event__fprintf_itrace_start(event, fp);
520                 break;
521         case PERF_RECORD_SWITCH:
522         case PERF_RECORD_SWITCH_CPU_WIDE:
523                 ret += perf_event__fprintf_switch(event, fp);
524                 break;
525         case PERF_RECORD_LOST:
526                 ret += perf_event__fprintf_lost(event, fp);
527                 break;
528         case PERF_RECORD_KSYMBOL:
529                 ret += perf_event__fprintf_ksymbol(event, fp);
530                 break;
531         case PERF_RECORD_BPF_EVENT:
532                 ret += perf_event__fprintf_bpf(event, fp);
533                 break;
534         case PERF_RECORD_TEXT_POKE:
535                 ret += perf_event__fprintf_text_poke(event, machine, fp);
536                 break;
537         default:
538                 ret += fprintf(fp, "\n");
539         }
540
541         return ret;
542 }
543
544 int perf_event__process(struct perf_tool *tool __maybe_unused,
545                         union perf_event *event,
546                         struct perf_sample *sample,
547                         struct machine *machine)
548 {
549         return machine__process_event(machine, event, sample);
550 }
551
552 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
553                              struct addr_location *al)
554 {
555         struct maps *maps = thread->maps;
556         struct machine *machine = maps->machine;
557         bool load_map = false;
558
559         al->maps = maps;
560         al->thread = thread;
561         al->addr = addr;
562         al->cpumode = cpumode;
563         al->filtered = 0;
564
565         if (machine == NULL) {
566                 al->map = NULL;
567                 return NULL;
568         }
569
570         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
571                 al->level = 'k';
572                 al->maps = maps = &machine->kmaps;
573                 load_map = true;
574         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
575                 al->level = '.';
576         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
577                 al->level = 'g';
578                 al->maps = maps = &machine->kmaps;
579                 load_map = true;
580         } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
581                 al->level = 'u';
582         } else {
583                 al->level = 'H';
584                 al->map = NULL;
585
586                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
587                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
588                         !perf_guest)
589                         al->filtered |= (1 << HIST_FILTER__GUEST);
590                 if ((cpumode == PERF_RECORD_MISC_USER ||
591                         cpumode == PERF_RECORD_MISC_KERNEL) &&
592                         !perf_host)
593                         al->filtered |= (1 << HIST_FILTER__HOST);
594
595                 return NULL;
596         }
597
598         al->map = maps__find(maps, al->addr);
599         if (al->map != NULL) {
600                 /*
601                  * Kernel maps might be changed when loading symbols so loading
602                  * must be done prior to using kernel maps.
603                  */
604                 if (load_map)
605                         map__load(al->map);
606                 al->addr = al->map->map_ip(al->map, al->addr);
607         }
608
609         return al->map;
610 }
611
612 /*
613  * For branch stacks or branch samples, the sample cpumode might not be correct
614  * because it applies only to the sample 'ip' and not necessary to 'addr' or
615  * branch stack addresses. If possible, use a fallback to deal with those cases.
616  */
617 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
618                                 struct addr_location *al)
619 {
620         struct map *map = thread__find_map(thread, cpumode, addr, al);
621         struct machine *machine = thread->maps->machine;
622         u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
623
624         if (map || addr_cpumode == cpumode)
625                 return map;
626
627         return thread__find_map(thread, addr_cpumode, addr, al);
628 }
629
630 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
631                                    u64 addr, struct addr_location *al)
632 {
633         al->sym = NULL;
634         if (thread__find_map(thread, cpumode, addr, al))
635                 al->sym = map__find_symbol(al->map, al->addr);
636         return al->sym;
637 }
638
639 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
640                                       u64 addr, struct addr_location *al)
641 {
642         al->sym = NULL;
643         if (thread__find_map_fb(thread, cpumode, addr, al))
644                 al->sym = map__find_symbol(al->map, al->addr);
645         return al->sym;
646 }
647
648 static bool check_address_range(struct intlist *addr_list, int addr_range,
649                                 unsigned long addr)
650 {
651         struct int_node *pos;
652
653         intlist__for_each_entry(pos, addr_list) {
654                 if (addr >= pos->i && addr < pos->i + addr_range)
655                         return true;
656         }
657
658         return false;
659 }
660
661 /*
662  * Callers need to drop the reference to al->thread, obtained in
663  * machine__findnew_thread()
664  */
665 int machine__resolve(struct machine *machine, struct addr_location *al,
666                      struct perf_sample *sample)
667 {
668         struct thread *thread = machine__findnew_thread(machine, sample->pid,
669                                                         sample->tid);
670
671         if (thread == NULL)
672                 return -1;
673
674         dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
675         thread__find_map(thread, sample->cpumode, sample->ip, al);
676         dump_printf(" ...... dso: %s\n",
677                     al->map ? al->map->dso->long_name :
678                         al->level == 'H' ? "[hypervisor]" : "<not found>");
679
680         if (thread__is_filtered(thread))
681                 al->filtered |= (1 << HIST_FILTER__THREAD);
682
683         al->sym = NULL;
684         al->cpu = sample->cpu;
685         al->socket = -1;
686         al->srcline = NULL;
687
688         if (al->cpu >= 0) {
689                 struct perf_env *env = machine->env;
690
691                 if (env && env->cpu)
692                         al->socket = env->cpu[al->cpu].socket_id;
693         }
694
695         if (al->map) {
696                 struct dso *dso = al->map->dso;
697
698                 if (symbol_conf.dso_list &&
699                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
700                                                   dso->short_name) ||
701                                (dso->short_name != dso->long_name &&
702                                 strlist__has_entry(symbol_conf.dso_list,
703                                                    dso->long_name))))) {
704                         al->filtered |= (1 << HIST_FILTER__DSO);
705                 }
706
707                 al->sym = map__find_symbol(al->map, al->addr);
708         } else if (symbol_conf.dso_list) {
709                 al->filtered |= (1 << HIST_FILTER__DSO);
710         }
711
712         if (symbol_conf.sym_list) {
713                 int ret = 0;
714                 char al_addr_str[32];
715                 size_t sz = sizeof(al_addr_str);
716
717                 if (al->sym) {
718                         ret = strlist__has_entry(symbol_conf.sym_list,
719                                                 al->sym->name);
720                 }
721                 if (!ret && al->sym) {
722                         snprintf(al_addr_str, sz, "0x%"PRIx64,
723                                 al->map->unmap_ip(al->map, al->sym->start));
724                         ret = strlist__has_entry(symbol_conf.sym_list,
725                                                 al_addr_str);
726                 }
727                 if (!ret && symbol_conf.addr_list && al->map) {
728                         unsigned long addr = al->map->unmap_ip(al->map, al->addr);
729
730                         ret = intlist__has_entry(symbol_conf.addr_list, addr);
731                         if (!ret && symbol_conf.addr_range) {
732                                 ret = check_address_range(symbol_conf.addr_list,
733                                                           symbol_conf.addr_range,
734                                                           addr);
735                         }
736                 }
737
738                 if (!ret)
739                         al->filtered |= (1 << HIST_FILTER__SYMBOL);
740         }
741
742         return 0;
743 }
744
745 /*
746  * The preprocess_sample method will return with reference counts for the
747  * in it, when done using (and perhaps getting ref counts if needing to
748  * keep a pointer to one of those entries) it must be paired with
749  * addr_location__put(), so that the refcounts can be decremented.
750  */
751 void addr_location__put(struct addr_location *al)
752 {
753         thread__zput(al->thread);
754 }
755
756 bool is_bts_event(struct perf_event_attr *attr)
757 {
758         return attr->type == PERF_TYPE_HARDWARE &&
759                (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
760                attr->sample_period == 1;
761 }
762
763 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
764 {
765         if (attr->type == PERF_TYPE_SOFTWARE &&
766             (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
767              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
768              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
769                 return true;
770
771         if (is_bts_event(attr))
772                 return true;
773
774         return false;
775 }
776
777 void thread__resolve(struct thread *thread, struct addr_location *al,
778                      struct perf_sample *sample)
779 {
780         thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
781
782         al->cpu = sample->cpu;
783         al->sym = NULL;
784
785         if (al->map)
786                 al->sym = map__find_symbol(al->map, al->addr);
787 }