Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / tools / perf / util / event.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <inttypes.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12 #include <api/fs/fs.h>
13 #include <linux/perf_event.h>
14 #include "event.h"
15 #include "debug.h"
16 #include "hist.h"
17 #include "machine.h"
18 #include "sort.h"
19 #include "string2.h"
20 #include "strlist.h"
21 #include "thread.h"
22 #include "thread_map.h"
23 #include "sane_ctype.h"
24 #include "map.h"
25 #include "symbol.h"
26 #include "symbol/kallsyms.h"
27 #include "asm/bug.h"
28 #include "stat.h"
29 #include "session.h"
30 #include "bpf-event.h"
31
32 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
33
34 static const char *perf_event__names[] = {
35         [0]                                     = "TOTAL",
36         [PERF_RECORD_MMAP]                      = "MMAP",
37         [PERF_RECORD_MMAP2]                     = "MMAP2",
38         [PERF_RECORD_LOST]                      = "LOST",
39         [PERF_RECORD_COMM]                      = "COMM",
40         [PERF_RECORD_EXIT]                      = "EXIT",
41         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
42         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
43         [PERF_RECORD_FORK]                      = "FORK",
44         [PERF_RECORD_READ]                      = "READ",
45         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
46         [PERF_RECORD_AUX]                       = "AUX",
47         [PERF_RECORD_ITRACE_START]              = "ITRACE_START",
48         [PERF_RECORD_LOST_SAMPLES]              = "LOST_SAMPLES",
49         [PERF_RECORD_SWITCH]                    = "SWITCH",
50         [PERF_RECORD_SWITCH_CPU_WIDE]           = "SWITCH_CPU_WIDE",
51         [PERF_RECORD_NAMESPACES]                = "NAMESPACES",
52         [PERF_RECORD_KSYMBOL]                   = "KSYMBOL",
53         [PERF_RECORD_BPF_EVENT]                 = "BPF_EVENT",
54         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
55         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
56         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
57         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
58         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
59         [PERF_RECORD_ID_INDEX]                  = "ID_INDEX",
60         [PERF_RECORD_AUXTRACE_INFO]             = "AUXTRACE_INFO",
61         [PERF_RECORD_AUXTRACE]                  = "AUXTRACE",
62         [PERF_RECORD_AUXTRACE_ERROR]            = "AUXTRACE_ERROR",
63         [PERF_RECORD_THREAD_MAP]                = "THREAD_MAP",
64         [PERF_RECORD_CPU_MAP]                   = "CPU_MAP",
65         [PERF_RECORD_STAT_CONFIG]               = "STAT_CONFIG",
66         [PERF_RECORD_STAT]                      = "STAT",
67         [PERF_RECORD_STAT_ROUND]                = "STAT_ROUND",
68         [PERF_RECORD_EVENT_UPDATE]              = "EVENT_UPDATE",
69         [PERF_RECORD_TIME_CONV]                 = "TIME_CONV",
70         [PERF_RECORD_HEADER_FEATURE]            = "FEATURE",
71         [PERF_RECORD_COMPRESSED]                = "COMPRESSED",
72 };
73
74 static const char *perf_ns__names[] = {
75         [NET_NS_INDEX]          = "net",
76         [UTS_NS_INDEX]          = "uts",
77         [IPC_NS_INDEX]          = "ipc",
78         [PID_NS_INDEX]          = "pid",
79         [USER_NS_INDEX]         = "user",
80         [MNT_NS_INDEX]          = "mnt",
81         [CGROUP_NS_INDEX]       = "cgroup",
82 };
83
84 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
85
86 const char *perf_event__name(unsigned int id)
87 {
88         if (id >= ARRAY_SIZE(perf_event__names))
89                 return "INVALID";
90         if (!perf_event__names[id])
91                 return "UNKNOWN";
92         return perf_event__names[id];
93 }
94
95 static const char *perf_ns__name(unsigned int id)
96 {
97         if (id >= ARRAY_SIZE(perf_ns__names))
98                 return "UNKNOWN";
99         return perf_ns__names[id];
100 }
101
102 int perf_tool__process_synth_event(struct perf_tool *tool,
103                                    union perf_event *event,
104                                    struct machine *machine,
105                                    perf_event__handler_t process)
106 {
107         struct perf_sample synth_sample = {
108         .pid       = -1,
109         .tid       = -1,
110         .time      = -1,
111         .stream_id = -1,
112         .cpu       = -1,
113         .period    = 1,
114         .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
115         };
116
117         return process(tool, event, &synth_sample, machine);
118 };
119
120 /*
121  * Assumes that the first 4095 bytes of /proc/pid/stat contains
122  * the comm, tgid and ppid.
123  */
124 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
125                                     pid_t *tgid, pid_t *ppid)
126 {
127         char filename[PATH_MAX];
128         char bf[4096];
129         int fd;
130         size_t size = 0;
131         ssize_t n;
132         char *name, *tgids, *ppids;
133
134         *tgid = -1;
135         *ppid = -1;
136
137         snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
138
139         fd = open(filename, O_RDONLY);
140         if (fd < 0) {
141                 pr_debug("couldn't open %s\n", filename);
142                 return -1;
143         }
144
145         n = read(fd, bf, sizeof(bf) - 1);
146         close(fd);
147         if (n <= 0) {
148                 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
149                            pid);
150                 return -1;
151         }
152         bf[n] = '\0';
153
154         name = strstr(bf, "Name:");
155         tgids = strstr(bf, "Tgid:");
156         ppids = strstr(bf, "PPid:");
157
158         if (name) {
159                 char *nl;
160
161                 name += 5;  /* strlen("Name:") */
162                 name = ltrim(name);
163
164                 nl = strchr(name, '\n');
165                 if (nl)
166                         *nl = '\0';
167
168                 size = strlen(name);
169                 if (size >= len)
170                         size = len - 1;
171                 memcpy(comm, name, size);
172                 comm[size] = '\0';
173         } else {
174                 pr_debug("Name: string not found for pid %d\n", pid);
175         }
176
177         if (tgids) {
178                 tgids += 5;  /* strlen("Tgid:") */
179                 *tgid = atoi(tgids);
180         } else {
181                 pr_debug("Tgid: string not found for pid %d\n", pid);
182         }
183
184         if (ppids) {
185                 ppids += 5;  /* strlen("PPid:") */
186                 *ppid = atoi(ppids);
187         } else {
188                 pr_debug("PPid: string not found for pid %d\n", pid);
189         }
190
191         return 0;
192 }
193
194 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
195                                     struct machine *machine,
196                                     pid_t *tgid, pid_t *ppid)
197 {
198         size_t size;
199
200         *ppid = -1;
201
202         memset(&event->comm, 0, sizeof(event->comm));
203
204         if (machine__is_host(machine)) {
205                 if (perf_event__get_comm_ids(pid, event->comm.comm,
206                                              sizeof(event->comm.comm),
207                                              tgid, ppid) != 0) {
208                         return -1;
209                 }
210         } else {
211                 *tgid = machine->pid;
212         }
213
214         if (*tgid < 0)
215                 return -1;
216
217         event->comm.pid = *tgid;
218         event->comm.header.type = PERF_RECORD_COMM;
219
220         size = strlen(event->comm.comm) + 1;
221         size = PERF_ALIGN(size, sizeof(u64));
222         memset(event->comm.comm + size, 0, machine->id_hdr_size);
223         event->comm.header.size = (sizeof(event->comm) -
224                                 (sizeof(event->comm.comm) - size) +
225                                 machine->id_hdr_size);
226         event->comm.tid = pid;
227
228         return 0;
229 }
230
231 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
232                                          union perf_event *event, pid_t pid,
233                                          perf_event__handler_t process,
234                                          struct machine *machine)
235 {
236         pid_t tgid, ppid;
237
238         if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
239                 return -1;
240
241         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
242                 return -1;
243
244         return tgid;
245 }
246
247 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
248                                          struct perf_ns_link_info *ns_link_info)
249 {
250         struct stat64 st;
251         char proc_ns[128];
252
253         sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
254         if (stat64(proc_ns, &st) == 0) {
255                 ns_link_info->dev = st.st_dev;
256                 ns_link_info->ino = st.st_ino;
257         }
258 }
259
260 int perf_event__synthesize_namespaces(struct perf_tool *tool,
261                                       union perf_event *event,
262                                       pid_t pid, pid_t tgid,
263                                       perf_event__handler_t process,
264                                       struct machine *machine)
265 {
266         u32 idx;
267         struct perf_ns_link_info *ns_link_info;
268
269         if (!tool || !tool->namespace_events)
270                 return 0;
271
272         memset(&event->namespaces, 0, (sizeof(event->namespaces) +
273                (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
274                machine->id_hdr_size));
275
276         event->namespaces.pid = tgid;
277         event->namespaces.tid = pid;
278
279         event->namespaces.nr_namespaces = NR_NAMESPACES;
280
281         ns_link_info = event->namespaces.link_info;
282
283         for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
284                 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
285                                              &ns_link_info[idx]);
286
287         event->namespaces.header.type = PERF_RECORD_NAMESPACES;
288
289         event->namespaces.header.size = (sizeof(event->namespaces) +
290                         (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
291                         machine->id_hdr_size);
292
293         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
294                 return -1;
295
296         return 0;
297 }
298
299 static int perf_event__synthesize_fork(struct perf_tool *tool,
300                                        union perf_event *event,
301                                        pid_t pid, pid_t tgid, pid_t ppid,
302                                        perf_event__handler_t process,
303                                        struct machine *machine)
304 {
305         memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
306
307         /*
308          * for main thread set parent to ppid from status file. For other
309          * threads set parent pid to main thread. ie., assume main thread
310          * spawns all threads in a process
311         */
312         if (tgid == pid) {
313                 event->fork.ppid = ppid;
314                 event->fork.ptid = ppid;
315         } else {
316                 event->fork.ppid = tgid;
317                 event->fork.ptid = tgid;
318         }
319         event->fork.pid  = tgid;
320         event->fork.tid  = pid;
321         event->fork.header.type = PERF_RECORD_FORK;
322         event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
323
324         event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
325
326         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
327                 return -1;
328
329         return 0;
330 }
331
332 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
333                                        union perf_event *event,
334                                        pid_t pid, pid_t tgid,
335                                        perf_event__handler_t process,
336                                        struct machine *machine,
337                                        bool mmap_data)
338 {
339         char filename[PATH_MAX];
340         FILE *fp;
341         unsigned long long t;
342         bool truncation = false;
343         unsigned long long timeout = proc_map_timeout * 1000000ULL;
344         int rc = 0;
345         const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
346         int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
347
348         if (machine__is_default_guest(machine))
349                 return 0;
350
351         snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
352                  machine->root_dir, pid, pid);
353
354         fp = fopen(filename, "r");
355         if (fp == NULL) {
356                 /*
357                  * We raced with a task exiting - just return:
358                  */
359                 pr_debug("couldn't open %s\n", filename);
360                 return -1;
361         }
362
363         event->header.type = PERF_RECORD_MMAP2;
364         t = rdclock();
365
366         while (1) {
367                 char bf[BUFSIZ];
368                 char prot[5];
369                 char execname[PATH_MAX];
370                 char anonstr[] = "//anon";
371                 unsigned int ino;
372                 size_t size;
373                 ssize_t n;
374
375                 if (fgets(bf, sizeof(bf), fp) == NULL)
376                         break;
377
378                 if ((rdclock() - t) > timeout) {
379                         pr_warning("Reading %s time out. "
380                                    "You may want to increase "
381                                    "the time limit by --proc-map-timeout\n",
382                                    filename);
383                         truncation = true;
384                         goto out;
385                 }
386
387                 /* ensure null termination since stack will be reused. */
388                 strcpy(execname, "");
389
390                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
391                 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
392                        &event->mmap2.start, &event->mmap2.len, prot,
393                        &event->mmap2.pgoff, &event->mmap2.maj,
394                        &event->mmap2.min,
395                        &ino, execname);
396
397                 /*
398                  * Anon maps don't have the execname.
399                  */
400                 if (n < 7)
401                         continue;
402
403                 event->mmap2.ino = (u64)ino;
404
405                 /*
406                  * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
407                  */
408                 if (machine__is_host(machine))
409                         event->header.misc = PERF_RECORD_MISC_USER;
410                 else
411                         event->header.misc = PERF_RECORD_MISC_GUEST_USER;
412
413                 /* map protection and flags bits */
414                 event->mmap2.prot = 0;
415                 event->mmap2.flags = 0;
416                 if (prot[0] == 'r')
417                         event->mmap2.prot |= PROT_READ;
418                 if (prot[1] == 'w')
419                         event->mmap2.prot |= PROT_WRITE;
420                 if (prot[2] == 'x')
421                         event->mmap2.prot |= PROT_EXEC;
422
423                 if (prot[3] == 's')
424                         event->mmap2.flags |= MAP_SHARED;
425                 else
426                         event->mmap2.flags |= MAP_PRIVATE;
427
428                 if (prot[2] != 'x') {
429                         if (!mmap_data || prot[0] != 'r')
430                                 continue;
431
432                         event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
433                 }
434
435 out:
436                 if (truncation)
437                         event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
438
439                 if (!strcmp(execname, ""))
440                         strcpy(execname, anonstr);
441
442                 if (hugetlbfs_mnt_len &&
443                     !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
444                         strcpy(execname, anonstr);
445                         event->mmap2.flags |= MAP_HUGETLB;
446                 }
447
448                 size = strlen(execname) + 1;
449                 memcpy(event->mmap2.filename, execname, size);
450                 size = PERF_ALIGN(size, sizeof(u64));
451                 event->mmap2.len -= event->mmap.start;
452                 event->mmap2.header.size = (sizeof(event->mmap2) -
453                                         (sizeof(event->mmap2.filename) - size));
454                 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
455                 event->mmap2.header.size += machine->id_hdr_size;
456                 event->mmap2.pid = tgid;
457                 event->mmap2.tid = pid;
458
459                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
460                         rc = -1;
461                         break;
462                 }
463
464                 if (truncation)
465                         break;
466         }
467
468         fclose(fp);
469         return rc;
470 }
471
472 int perf_event__synthesize_modules(struct perf_tool *tool,
473                                    perf_event__handler_t process,
474                                    struct machine *machine)
475 {
476         int rc = 0;
477         struct map *pos;
478         struct maps *maps = machine__kernel_maps(machine);
479         union perf_event *event = zalloc((sizeof(event->mmap) +
480                                           machine->id_hdr_size));
481         if (event == NULL) {
482                 pr_debug("Not enough memory synthesizing mmap event "
483                          "for kernel modules\n");
484                 return -1;
485         }
486
487         event->header.type = PERF_RECORD_MMAP;
488
489         /*
490          * kernel uses 0 for user space maps, see kernel/perf_event.c
491          * __perf_event_mmap
492          */
493         if (machine__is_host(machine))
494                 event->header.misc = PERF_RECORD_MISC_KERNEL;
495         else
496                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
497
498         for (pos = maps__first(maps); pos; pos = map__next(pos)) {
499                 size_t size;
500
501                 if (!__map__is_kmodule(pos))
502                         continue;
503
504                 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
505                 event->mmap.header.type = PERF_RECORD_MMAP;
506                 event->mmap.header.size = (sizeof(event->mmap) -
507                                         (sizeof(event->mmap.filename) - size));
508                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
509                 event->mmap.header.size += machine->id_hdr_size;
510                 event->mmap.start = pos->start;
511                 event->mmap.len   = pos->end - pos->start;
512                 event->mmap.pid   = machine->pid;
513
514                 memcpy(event->mmap.filename, pos->dso->long_name,
515                        pos->dso->long_name_len + 1);
516                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
517                         rc = -1;
518                         break;
519                 }
520         }
521
522         free(event);
523         return rc;
524 }
525
526 static int __event__synthesize_thread(union perf_event *comm_event,
527                                       union perf_event *mmap_event,
528                                       union perf_event *fork_event,
529                                       union perf_event *namespaces_event,
530                                       pid_t pid, int full,
531                                       perf_event__handler_t process,
532                                       struct perf_tool *tool,
533                                       struct machine *machine,
534                                       bool mmap_data)
535 {
536         char filename[PATH_MAX];
537         DIR *tasks;
538         struct dirent *dirent;
539         pid_t tgid, ppid;
540         int rc = 0;
541
542         /* special case: only send one comm event using passed in pid */
543         if (!full) {
544                 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
545                                                    process, machine);
546
547                 if (tgid == -1)
548                         return -1;
549
550                 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
551                                                       tgid, process, machine) < 0)
552                         return -1;
553
554                 /*
555                  * send mmap only for thread group leader
556                  * see thread__init_map_groups
557                  */
558                 if (pid == tgid &&
559                     perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
560                                                        process, machine, mmap_data))
561                         return -1;
562
563                 return 0;
564         }
565
566         if (machine__is_default_guest(machine))
567                 return 0;
568
569         snprintf(filename, sizeof(filename), "%s/proc/%d/task",
570                  machine->root_dir, pid);
571
572         tasks = opendir(filename);
573         if (tasks == NULL) {
574                 pr_debug("couldn't open %s\n", filename);
575                 return 0;
576         }
577
578         while ((dirent = readdir(tasks)) != NULL) {
579                 char *end;
580                 pid_t _pid;
581
582                 _pid = strtol(dirent->d_name, &end, 10);
583                 if (*end)
584                         continue;
585
586                 rc = -1;
587                 if (perf_event__prepare_comm(comm_event, _pid, machine,
588                                              &tgid, &ppid) != 0)
589                         break;
590
591                 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
592                                                 ppid, process, machine) < 0)
593                         break;
594
595                 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
596                                                       tgid, process, machine) < 0)
597                         break;
598
599                 /*
600                  * Send the prepared comm event
601                  */
602                 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
603                         break;
604
605                 rc = 0;
606                 if (_pid == pid) {
607                         /* process the parent's maps too */
608                         rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
609                                                 process, machine, mmap_data);
610                         if (rc)
611                                 break;
612                 }
613         }
614
615         closedir(tasks);
616         return rc;
617 }
618
619 int perf_event__synthesize_thread_map(struct perf_tool *tool,
620                                       struct thread_map *threads,
621                                       perf_event__handler_t process,
622                                       struct machine *machine,
623                                       bool mmap_data)
624 {
625         union perf_event *comm_event, *mmap_event, *fork_event;
626         union perf_event *namespaces_event;
627         int err = -1, thread, j;
628
629         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
630         if (comm_event == NULL)
631                 goto out;
632
633         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
634         if (mmap_event == NULL)
635                 goto out_free_comm;
636
637         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
638         if (fork_event == NULL)
639                 goto out_free_mmap;
640
641         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
642                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
643                                   machine->id_hdr_size);
644         if (namespaces_event == NULL)
645                 goto out_free_fork;
646
647         err = 0;
648         for (thread = 0; thread < threads->nr; ++thread) {
649                 if (__event__synthesize_thread(comm_event, mmap_event,
650                                                fork_event, namespaces_event,
651                                                thread_map__pid(threads, thread), 0,
652                                                process, tool, machine,
653                                                mmap_data)) {
654                         err = -1;
655                         break;
656                 }
657
658                 /*
659                  * comm.pid is set to thread group id by
660                  * perf_event__synthesize_comm
661                  */
662                 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
663                         bool need_leader = true;
664
665                         /* is thread group leader in thread_map? */
666                         for (j = 0; j < threads->nr; ++j) {
667                                 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
668                                         need_leader = false;
669                                         break;
670                                 }
671                         }
672
673                         /* if not, generate events for it */
674                         if (need_leader &&
675                             __event__synthesize_thread(comm_event, mmap_event,
676                                                        fork_event, namespaces_event,
677                                                        comm_event->comm.pid, 0,
678                                                        process, tool, machine,
679                                                        mmap_data)) {
680                                 err = -1;
681                                 break;
682                         }
683                 }
684         }
685         free(namespaces_event);
686 out_free_fork:
687         free(fork_event);
688 out_free_mmap:
689         free(mmap_event);
690 out_free_comm:
691         free(comm_event);
692 out:
693         return err;
694 }
695
696 static int __perf_event__synthesize_threads(struct perf_tool *tool,
697                                             perf_event__handler_t process,
698                                             struct machine *machine,
699                                             bool mmap_data,
700                                             struct dirent **dirent,
701                                             int start,
702                                             int num)
703 {
704         union perf_event *comm_event, *mmap_event, *fork_event;
705         union perf_event *namespaces_event;
706         int err = -1;
707         char *end;
708         pid_t pid;
709         int i;
710
711         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
712         if (comm_event == NULL)
713                 goto out;
714
715         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
716         if (mmap_event == NULL)
717                 goto out_free_comm;
718
719         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
720         if (fork_event == NULL)
721                 goto out_free_mmap;
722
723         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
724                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
725                                   machine->id_hdr_size);
726         if (namespaces_event == NULL)
727                 goto out_free_fork;
728
729         for (i = start; i < start + num; i++) {
730                 if (!isdigit(dirent[i]->d_name[0]))
731                         continue;
732
733                 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
734                 /* only interested in proper numerical dirents */
735                 if (*end)
736                         continue;
737                 /*
738                  * We may race with exiting thread, so don't stop just because
739                  * one thread couldn't be synthesized.
740                  */
741                 __event__synthesize_thread(comm_event, mmap_event, fork_event,
742                                            namespaces_event, pid, 1, process,
743                                            tool, machine, mmap_data);
744         }
745         err = 0;
746
747         free(namespaces_event);
748 out_free_fork:
749         free(fork_event);
750 out_free_mmap:
751         free(mmap_event);
752 out_free_comm:
753         free(comm_event);
754 out:
755         return err;
756 }
757
758 struct synthesize_threads_arg {
759         struct perf_tool *tool;
760         perf_event__handler_t process;
761         struct machine *machine;
762         bool mmap_data;
763         struct dirent **dirent;
764         int num;
765         int start;
766 };
767
768 static void *synthesize_threads_worker(void *arg)
769 {
770         struct synthesize_threads_arg *args = arg;
771
772         __perf_event__synthesize_threads(args->tool, args->process,
773                                          args->machine, args->mmap_data,
774                                          args->dirent,
775                                          args->start, args->num);
776         return NULL;
777 }
778
779 int perf_event__synthesize_threads(struct perf_tool *tool,
780                                    perf_event__handler_t process,
781                                    struct machine *machine,
782                                    bool mmap_data,
783                                    unsigned int nr_threads_synthesize)
784 {
785         struct synthesize_threads_arg *args = NULL;
786         pthread_t *synthesize_threads = NULL;
787         char proc_path[PATH_MAX];
788         struct dirent **dirent;
789         int num_per_thread;
790         int m, n, i, j;
791         int thread_nr;
792         int base = 0;
793         int err = -1;
794
795
796         if (machine__is_default_guest(machine))
797                 return 0;
798
799         snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
800         n = scandir(proc_path, &dirent, 0, alphasort);
801         if (n < 0)
802                 return err;
803
804         if (nr_threads_synthesize == UINT_MAX)
805                 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
806         else
807                 thread_nr = nr_threads_synthesize;
808
809         if (thread_nr <= 1) {
810                 err = __perf_event__synthesize_threads(tool, process,
811                                                        machine, mmap_data,
812                                                        dirent, base, n);
813                 goto free_dirent;
814         }
815         if (thread_nr > n)
816                 thread_nr = n;
817
818         synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
819         if (synthesize_threads == NULL)
820                 goto free_dirent;
821
822         args = calloc(sizeof(*args), thread_nr);
823         if (args == NULL)
824                 goto free_threads;
825
826         num_per_thread = n / thread_nr;
827         m = n % thread_nr;
828         for (i = 0; i < thread_nr; i++) {
829                 args[i].tool = tool;
830                 args[i].process = process;
831                 args[i].machine = machine;
832                 args[i].mmap_data = mmap_data;
833                 args[i].dirent = dirent;
834         }
835         for (i = 0; i < m; i++) {
836                 args[i].num = num_per_thread + 1;
837                 args[i].start = i * args[i].num;
838         }
839         if (i != 0)
840                 base = args[i-1].start + args[i-1].num;
841         for (j = i; j < thread_nr; j++) {
842                 args[j].num = num_per_thread;
843                 args[j].start = base + (j - i) * args[i].num;
844         }
845
846         for (i = 0; i < thread_nr; i++) {
847                 if (pthread_create(&synthesize_threads[i], NULL,
848                                    synthesize_threads_worker, &args[i]))
849                         goto out_join;
850         }
851         err = 0;
852 out_join:
853         for (i = 0; i < thread_nr; i++)
854                 pthread_join(synthesize_threads[i], NULL);
855         free(args);
856 free_threads:
857         free(synthesize_threads);
858 free_dirent:
859         for (i = 0; i < n; i++)
860                 free(dirent[i]);
861         free(dirent);
862
863         return err;
864 }
865
866 struct process_symbol_args {
867         const char *name;
868         u64        start;
869 };
870
871 static int find_symbol_cb(void *arg, const char *name, char type,
872                           u64 start)
873 {
874         struct process_symbol_args *args = arg;
875
876         /*
877          * Must be a function or at least an alias, as in PARISC64, where "_text" is
878          * an 'A' to the same address as "_stext".
879          */
880         if (!(kallsyms__is_function(type) ||
881               type == 'A') || strcmp(name, args->name))
882                 return 0;
883
884         args->start = start;
885         return 1;
886 }
887
888 int kallsyms__get_function_start(const char *kallsyms_filename,
889                                  const char *symbol_name, u64 *addr)
890 {
891         struct process_symbol_args args = { .name = symbol_name, };
892
893         if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
894                 return -1;
895
896         *addr = args.start;
897         return 0;
898 }
899
900 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
901                                               perf_event__handler_t process __maybe_unused,
902                                               struct machine *machine __maybe_unused)
903 {
904         return 0;
905 }
906
907 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
908                                                 perf_event__handler_t process,
909                                                 struct machine *machine)
910 {
911         size_t size;
912         struct map *map = machine__kernel_map(machine);
913         struct kmap *kmap;
914         int err;
915         union perf_event *event;
916
917         if (symbol_conf.kptr_restrict)
918                 return -1;
919         if (map == NULL)
920                 return -1;
921
922         /*
923          * We should get this from /sys/kernel/sections/.text, but till that is
924          * available use this, and after it is use this as a fallback for older
925          * kernels.
926          */
927         event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
928         if (event == NULL) {
929                 pr_debug("Not enough memory synthesizing mmap event "
930                          "for kernel modules\n");
931                 return -1;
932         }
933
934         if (machine__is_host(machine)) {
935                 /*
936                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
937                  * see kernel/perf_event.c __perf_event_mmap
938                  */
939                 event->header.misc = PERF_RECORD_MISC_KERNEL;
940         } else {
941                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
942         }
943
944         kmap = map__kmap(map);
945         size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
946                         "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
947         size = PERF_ALIGN(size, sizeof(u64));
948         event->mmap.header.type = PERF_RECORD_MMAP;
949         event->mmap.header.size = (sizeof(event->mmap) -
950                         (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
951         event->mmap.pgoff = kmap->ref_reloc_sym->addr;
952         event->mmap.start = map->start;
953         event->mmap.len   = map->end - event->mmap.start;
954         event->mmap.pid   = machine->pid;
955
956         err = perf_tool__process_synth_event(tool, event, machine, process);
957         free(event);
958
959         return err;
960 }
961
962 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
963                                        perf_event__handler_t process,
964                                        struct machine *machine)
965 {
966         int err;
967
968         err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
969         if (err < 0)
970                 return err;
971
972         return perf_event__synthesize_extra_kmaps(tool, process, machine);
973 }
974
975 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
976                                       struct thread_map *threads,
977                                       perf_event__handler_t process,
978                                       struct machine *machine)
979 {
980         union perf_event *event;
981         int i, err, size;
982
983         size  = sizeof(event->thread_map);
984         size += threads->nr * sizeof(event->thread_map.entries[0]);
985
986         event = zalloc(size);
987         if (!event)
988                 return -ENOMEM;
989
990         event->header.type = PERF_RECORD_THREAD_MAP;
991         event->header.size = size;
992         event->thread_map.nr = threads->nr;
993
994         for (i = 0; i < threads->nr; i++) {
995                 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
996                 char *comm = thread_map__comm(threads, i);
997
998                 if (!comm)
999                         comm = (char *) "";
1000
1001                 entry->pid = thread_map__pid(threads, i);
1002                 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1003         }
1004
1005         err = process(tool, event, NULL, machine);
1006
1007         free(event);
1008         return err;
1009 }
1010
1011 static void synthesize_cpus(struct cpu_map_entries *cpus,
1012                             struct cpu_map *map)
1013 {
1014         int i;
1015
1016         cpus->nr = map->nr;
1017
1018         for (i = 0; i < map->nr; i++)
1019                 cpus->cpu[i] = map->map[i];
1020 }
1021
1022 static void synthesize_mask(struct cpu_map_mask *mask,
1023                             struct cpu_map *map, int max)
1024 {
1025         int i;
1026
1027         mask->nr = BITS_TO_LONGS(max);
1028         mask->long_size = sizeof(long);
1029
1030         for (i = 0; i < map->nr; i++)
1031                 set_bit(map->map[i], mask->mask);
1032 }
1033
1034 static size_t cpus_size(struct cpu_map *map)
1035 {
1036         return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1037 }
1038
1039 static size_t mask_size(struct cpu_map *map, int *max)
1040 {
1041         int i;
1042
1043         *max = 0;
1044
1045         for (i = 0; i < map->nr; i++) {
1046                 /* bit possition of the cpu is + 1 */
1047                 int bit = map->map[i] + 1;
1048
1049                 if (bit > *max)
1050                         *max = bit;
1051         }
1052
1053         return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
1054 }
1055
1056 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
1057 {
1058         size_t size_cpus, size_mask;
1059         bool is_dummy = cpu_map__empty(map);
1060
1061         /*
1062          * Both array and mask data have variable size based
1063          * on the number of cpus and their actual values.
1064          * The size of the 'struct cpu_map_data' is:
1065          *
1066          *   array = size of 'struct cpu_map_entries' +
1067          *           number of cpus * sizeof(u64)
1068          *
1069          *   mask  = size of 'struct cpu_map_mask' +
1070          *           maximum cpu bit converted to size of longs
1071          *
1072          * and finaly + the size of 'struct cpu_map_data'.
1073          */
1074         size_cpus = cpus_size(map);
1075         size_mask = mask_size(map, max);
1076
1077         if (is_dummy || (size_cpus < size_mask)) {
1078                 *size += size_cpus;
1079                 *type  = PERF_CPU_MAP__CPUS;
1080         } else {
1081                 *size += size_mask;
1082                 *type  = PERF_CPU_MAP__MASK;
1083         }
1084
1085         *size += sizeof(struct cpu_map_data);
1086         *size = PERF_ALIGN(*size, sizeof(u64));
1087         return zalloc(*size);
1088 }
1089
1090 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
1091                               u16 type, int max)
1092 {
1093         data->type = type;
1094
1095         switch (type) {
1096         case PERF_CPU_MAP__CPUS:
1097                 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1098                 break;
1099         case PERF_CPU_MAP__MASK:
1100                 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
1101         default:
1102                 break;
1103         };
1104 }
1105
1106 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
1107 {
1108         size_t size = sizeof(struct cpu_map_event);
1109         struct cpu_map_event *event;
1110         int max;
1111         u16 type;
1112
1113         event = cpu_map_data__alloc(map, &size, &type, &max);
1114         if (!event)
1115                 return NULL;
1116
1117         event->header.type = PERF_RECORD_CPU_MAP;
1118         event->header.size = size;
1119         event->data.type   = type;
1120
1121         cpu_map_data__synthesize(&event->data, map, type, max);
1122         return event;
1123 }
1124
1125 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1126                                    struct cpu_map *map,
1127                                    perf_event__handler_t process,
1128                                    struct machine *machine)
1129 {
1130         struct cpu_map_event *event;
1131         int err;
1132
1133         event = cpu_map_event__new(map);
1134         if (!event)
1135                 return -ENOMEM;
1136
1137         err = process(tool, (union perf_event *) event, NULL, machine);
1138
1139         free(event);
1140         return err;
1141 }
1142
1143 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1144                                        struct perf_stat_config *config,
1145                                        perf_event__handler_t process,
1146                                        struct machine *machine)
1147 {
1148         struct stat_config_event *event;
1149         int size, i = 0, err;
1150
1151         size  = sizeof(*event);
1152         size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1153
1154         event = zalloc(size);
1155         if (!event)
1156                 return -ENOMEM;
1157
1158         event->header.type = PERF_RECORD_STAT_CONFIG;
1159         event->header.size = size;
1160         event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1161
1162 #define ADD(__term, __val)                                      \
1163         event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1164         event->data[i].val = __val;                             \
1165         i++;
1166
1167         ADD(AGGR_MODE,  config->aggr_mode)
1168         ADD(INTERVAL,   config->interval)
1169         ADD(SCALE,      config->scale)
1170
1171         WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1172                   "stat config terms unbalanced\n");
1173 #undef ADD
1174
1175         err = process(tool, (union perf_event *) event, NULL, machine);
1176
1177         free(event);
1178         return err;
1179 }
1180
1181 int perf_event__synthesize_stat(struct perf_tool *tool,
1182                                 u32 cpu, u32 thread, u64 id,
1183                                 struct perf_counts_values *count,
1184                                 perf_event__handler_t process,
1185                                 struct machine *machine)
1186 {
1187         struct stat_event event;
1188
1189         event.header.type = PERF_RECORD_STAT;
1190         event.header.size = sizeof(event);
1191         event.header.misc = 0;
1192
1193         event.id        = id;
1194         event.cpu       = cpu;
1195         event.thread    = thread;
1196         event.val       = count->val;
1197         event.ena       = count->ena;
1198         event.run       = count->run;
1199
1200         return process(tool, (union perf_event *) &event, NULL, machine);
1201 }
1202
1203 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1204                                       u64 evtime, u64 type,
1205                                       perf_event__handler_t process,
1206                                       struct machine *machine)
1207 {
1208         struct stat_round_event event;
1209
1210         event.header.type = PERF_RECORD_STAT_ROUND;
1211         event.header.size = sizeof(event);
1212         event.header.misc = 0;
1213
1214         event.time = evtime;
1215         event.type = type;
1216
1217         return process(tool, (union perf_event *) &event, NULL, machine);
1218 }
1219
1220 void perf_event__read_stat_config(struct perf_stat_config *config,
1221                                   struct stat_config_event *event)
1222 {
1223         unsigned i;
1224
1225         for (i = 0; i < event->nr; i++) {
1226
1227                 switch (event->data[i].tag) {
1228 #define CASE(__term, __val)                                     \
1229                 case PERF_STAT_CONFIG_TERM__##__term:           \
1230                         config->__val = event->data[i].val;     \
1231                         break;
1232
1233                 CASE(AGGR_MODE, aggr_mode)
1234                 CASE(SCALE,     scale)
1235                 CASE(INTERVAL,  interval)
1236 #undef CASE
1237                 default:
1238                         pr_warning("unknown stat config term %" PRIu64 "\n",
1239                                    event->data[i].tag);
1240                 }
1241         }
1242 }
1243
1244 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1245 {
1246         const char *s;
1247
1248         if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1249                 s = " exec";
1250         else
1251                 s = "";
1252
1253         return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1254 }
1255
1256 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1257 {
1258         size_t ret = 0;
1259         struct perf_ns_link_info *ns_link_info;
1260         u32 nr_namespaces, idx;
1261
1262         ns_link_info = event->namespaces.link_info;
1263         nr_namespaces = event->namespaces.nr_namespaces;
1264
1265         ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1266                        event->namespaces.pid,
1267                        event->namespaces.tid,
1268                        nr_namespaces);
1269
1270         for (idx = 0; idx < nr_namespaces; idx++) {
1271                 if (idx && (idx % 4 == 0))
1272                         ret += fprintf(fp, "\n\t\t ");
1273
1274                 ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1275                                 perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1276                                 (u64)ns_link_info[idx].ino,
1277                                 ((idx + 1) != nr_namespaces) ? ", " : "]\n");
1278         }
1279
1280         return ret;
1281 }
1282
1283 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1284                              union perf_event *event,
1285                              struct perf_sample *sample,
1286                              struct machine *machine)
1287 {
1288         return machine__process_comm_event(machine, event, sample);
1289 }
1290
1291 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1292                                    union perf_event *event,
1293                                    struct perf_sample *sample,
1294                                    struct machine *machine)
1295 {
1296         return machine__process_namespaces_event(machine, event, sample);
1297 }
1298
1299 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1300                              union perf_event *event,
1301                              struct perf_sample *sample,
1302                              struct machine *machine)
1303 {
1304         return machine__process_lost_event(machine, event, sample);
1305 }
1306
1307 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1308                             union perf_event *event,
1309                             struct perf_sample *sample __maybe_unused,
1310                             struct machine *machine)
1311 {
1312         return machine__process_aux_event(machine, event);
1313 }
1314
1315 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1316                                      union perf_event *event,
1317                                      struct perf_sample *sample __maybe_unused,
1318                                      struct machine *machine)
1319 {
1320         return machine__process_itrace_start_event(machine, event);
1321 }
1322
1323 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1324                                      union perf_event *event,
1325                                      struct perf_sample *sample,
1326                                      struct machine *machine)
1327 {
1328         return machine__process_lost_samples_event(machine, event, sample);
1329 }
1330
1331 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1332                                union perf_event *event,
1333                                struct perf_sample *sample __maybe_unused,
1334                                struct machine *machine)
1335 {
1336         return machine__process_switch_event(machine, event);
1337 }
1338
1339 int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
1340                                 union perf_event *event,
1341                                 struct perf_sample *sample __maybe_unused,
1342                                 struct machine *machine)
1343 {
1344         return machine__process_ksymbol(machine, event, sample);
1345 }
1346
1347 int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
1348                                   union perf_event *event,
1349                                   struct perf_sample *sample __maybe_unused,
1350                                   struct machine *machine)
1351 {
1352         return machine__process_bpf_event(machine, event, sample);
1353 }
1354
1355 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1356 {
1357         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1358                        event->mmap.pid, event->mmap.tid, event->mmap.start,
1359                        event->mmap.len, event->mmap.pgoff,
1360                        (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1361                        event->mmap.filename);
1362 }
1363
1364 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1365 {
1366         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1367                            " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1368                        event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1369                        event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1370                        event->mmap2.min, event->mmap2.ino,
1371                        event->mmap2.ino_generation,
1372                        (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1373                        (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1374                        (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1375                        (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1376                        event->mmap2.filename);
1377 }
1378
1379 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1380 {
1381         struct thread_map *threads = thread_map__new_event(&event->thread_map);
1382         size_t ret;
1383
1384         ret = fprintf(fp, " nr: ");
1385
1386         if (threads)
1387                 ret += thread_map__fprintf(threads, fp);
1388         else
1389                 ret += fprintf(fp, "failed to get threads from event\n");
1390
1391         thread_map__put(threads);
1392         return ret;
1393 }
1394
1395 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1396 {
1397         struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1398         size_t ret;
1399
1400         ret = fprintf(fp, ": ");
1401
1402         if (cpus)
1403                 ret += cpu_map__fprintf(cpus, fp);
1404         else
1405                 ret += fprintf(fp, "failed to get cpumap from event\n");
1406
1407         cpu_map__put(cpus);
1408         return ret;
1409 }
1410
1411 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1412                              union perf_event *event,
1413                              struct perf_sample *sample,
1414                              struct machine *machine)
1415 {
1416         return machine__process_mmap_event(machine, event, sample);
1417 }
1418
1419 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1420                              union perf_event *event,
1421                              struct perf_sample *sample,
1422                              struct machine *machine)
1423 {
1424         return machine__process_mmap2_event(machine, event, sample);
1425 }
1426
1427 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1428 {
1429         return fprintf(fp, "(%d:%d):(%d:%d)\n",
1430                        event->fork.pid, event->fork.tid,
1431                        event->fork.ppid, event->fork.ptid);
1432 }
1433
1434 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1435                              union perf_event *event,
1436                              struct perf_sample *sample,
1437                              struct machine *machine)
1438 {
1439         return machine__process_fork_event(machine, event, sample);
1440 }
1441
1442 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1443                              union perf_event *event,
1444                              struct perf_sample *sample,
1445                              struct machine *machine)
1446 {
1447         return machine__process_exit_event(machine, event, sample);
1448 }
1449
1450 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1451 {
1452         return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1453                        event->aux.aux_offset, event->aux.aux_size,
1454                        event->aux.flags,
1455                        event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1456                        event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1457                        event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1458 }
1459
1460 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1461 {
1462         return fprintf(fp, " pid: %u tid: %u\n",
1463                        event->itrace_start.pid, event->itrace_start.tid);
1464 }
1465
1466 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1467 {
1468         bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1469         const char *in_out = !out ? "IN         " :
1470                 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1471                                     "OUT        " : "OUT preempt";
1472
1473         if (event->header.type == PERF_RECORD_SWITCH)
1474                 return fprintf(fp, " %s\n", in_out);
1475
1476         return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1477                        in_out, out ? "next" : "prev",
1478                        event->context_switch.next_prev_pid,
1479                        event->context_switch.next_prev_tid);
1480 }
1481
1482 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
1483 {
1484         return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
1485 }
1486
1487 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
1488 {
1489         return fprintf(fp, " ksymbol event with addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
1490                        event->ksymbol_event.addr, event->ksymbol_event.len,
1491                        event->ksymbol_event.ksym_type,
1492                        event->ksymbol_event.flags, event->ksymbol_event.name);
1493 }
1494
1495 size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
1496 {
1497         return fprintf(fp, " bpf event with type %u, flags %u, id %u\n",
1498                        event->bpf_event.type, event->bpf_event.flags,
1499                        event->bpf_event.id);
1500 }
1501
1502 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1503 {
1504         size_t ret = fprintf(fp, "PERF_RECORD_%s",
1505                              perf_event__name(event->header.type));
1506
1507         switch (event->header.type) {
1508         case PERF_RECORD_COMM:
1509                 ret += perf_event__fprintf_comm(event, fp);
1510                 break;
1511         case PERF_RECORD_FORK:
1512         case PERF_RECORD_EXIT:
1513                 ret += perf_event__fprintf_task(event, fp);
1514                 break;
1515         case PERF_RECORD_MMAP:
1516                 ret += perf_event__fprintf_mmap(event, fp);
1517                 break;
1518         case PERF_RECORD_NAMESPACES:
1519                 ret += perf_event__fprintf_namespaces(event, fp);
1520                 break;
1521         case PERF_RECORD_MMAP2:
1522                 ret += perf_event__fprintf_mmap2(event, fp);
1523                 break;
1524         case PERF_RECORD_AUX:
1525                 ret += perf_event__fprintf_aux(event, fp);
1526                 break;
1527         case PERF_RECORD_ITRACE_START:
1528                 ret += perf_event__fprintf_itrace_start(event, fp);
1529                 break;
1530         case PERF_RECORD_SWITCH:
1531         case PERF_RECORD_SWITCH_CPU_WIDE:
1532                 ret += perf_event__fprintf_switch(event, fp);
1533                 break;
1534         case PERF_RECORD_LOST:
1535                 ret += perf_event__fprintf_lost(event, fp);
1536                 break;
1537         case PERF_RECORD_KSYMBOL:
1538                 ret += perf_event__fprintf_ksymbol(event, fp);
1539                 break;
1540         case PERF_RECORD_BPF_EVENT:
1541                 ret += perf_event__fprintf_bpf_event(event, fp);
1542                 break;
1543         default:
1544                 ret += fprintf(fp, "\n");
1545         }
1546
1547         return ret;
1548 }
1549
1550 int perf_event__process(struct perf_tool *tool __maybe_unused,
1551                         union perf_event *event,
1552                         struct perf_sample *sample,
1553                         struct machine *machine)
1554 {
1555         return machine__process_event(machine, event, sample);
1556 }
1557
1558 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1559                              struct addr_location *al)
1560 {
1561         struct map_groups *mg = thread->mg;
1562         struct machine *machine = mg->machine;
1563         bool load_map = false;
1564
1565         al->machine = machine;
1566         al->thread = thread;
1567         al->addr = addr;
1568         al->cpumode = cpumode;
1569         al->filtered = 0;
1570
1571         if (machine == NULL) {
1572                 al->map = NULL;
1573                 return NULL;
1574         }
1575
1576         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1577                 al->level = 'k';
1578                 mg = &machine->kmaps;
1579                 load_map = true;
1580         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1581                 al->level = '.';
1582         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1583                 al->level = 'g';
1584                 mg = &machine->kmaps;
1585                 load_map = true;
1586         } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1587                 al->level = 'u';
1588         } else {
1589                 al->level = 'H';
1590                 al->map = NULL;
1591
1592                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1593                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1594                         !perf_guest)
1595                         al->filtered |= (1 << HIST_FILTER__GUEST);
1596                 if ((cpumode == PERF_RECORD_MISC_USER ||
1597                         cpumode == PERF_RECORD_MISC_KERNEL) &&
1598                         !perf_host)
1599                         al->filtered |= (1 << HIST_FILTER__HOST);
1600
1601                 return NULL;
1602         }
1603
1604         al->map = map_groups__find(mg, al->addr);
1605         if (al->map != NULL) {
1606                 /*
1607                  * Kernel maps might be changed when loading symbols so loading
1608                  * must be done prior to using kernel maps.
1609                  */
1610                 if (load_map)
1611                         map__load(al->map);
1612                 al->addr = al->map->map_ip(al->map, al->addr);
1613         }
1614
1615         return al->map;
1616 }
1617
1618 /*
1619  * For branch stacks or branch samples, the sample cpumode might not be correct
1620  * because it applies only to the sample 'ip' and not necessary to 'addr' or
1621  * branch stack addresses. If possible, use a fallback to deal with those cases.
1622  */
1623 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
1624                                 struct addr_location *al)
1625 {
1626         struct map *map = thread__find_map(thread, cpumode, addr, al);
1627         struct machine *machine = thread->mg->machine;
1628         u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
1629
1630         if (map || addr_cpumode == cpumode)
1631                 return map;
1632
1633         return thread__find_map(thread, addr_cpumode, addr, al);
1634 }
1635
1636 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1637                                    u64 addr, struct addr_location *al)
1638 {
1639         al->sym = NULL;
1640         if (thread__find_map(thread, cpumode, addr, al))
1641                 al->sym = map__find_symbol(al->map, al->addr);
1642         return al->sym;
1643 }
1644
1645 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
1646                                       u64 addr, struct addr_location *al)
1647 {
1648         al->sym = NULL;
1649         if (thread__find_map_fb(thread, cpumode, addr, al))
1650                 al->sym = map__find_symbol(al->map, al->addr);
1651         return al->sym;
1652 }
1653
1654 /*
1655  * Callers need to drop the reference to al->thread, obtained in
1656  * machine__findnew_thread()
1657  */
1658 int machine__resolve(struct machine *machine, struct addr_location *al,
1659                      struct perf_sample *sample)
1660 {
1661         struct thread *thread = machine__findnew_thread(machine, sample->pid,
1662                                                         sample->tid);
1663
1664         if (thread == NULL)
1665                 return -1;
1666
1667         dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1668         thread__find_map(thread, sample->cpumode, sample->ip, al);
1669         dump_printf(" ...... dso: %s\n",
1670                     al->map ? al->map->dso->long_name :
1671                         al->level == 'H' ? "[hypervisor]" : "<not found>");
1672
1673         if (thread__is_filtered(thread))
1674                 al->filtered |= (1 << HIST_FILTER__THREAD);
1675
1676         al->sym = NULL;
1677         al->cpu = sample->cpu;
1678         al->socket = -1;
1679         al->srcline = NULL;
1680
1681         if (al->cpu >= 0) {
1682                 struct perf_env *env = machine->env;
1683
1684                 if (env && env->cpu)
1685                         al->socket = env->cpu[al->cpu].socket_id;
1686         }
1687
1688         if (al->map) {
1689                 struct dso *dso = al->map->dso;
1690
1691                 if (symbol_conf.dso_list &&
1692                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1693                                                   dso->short_name) ||
1694                                (dso->short_name != dso->long_name &&
1695                                 strlist__has_entry(symbol_conf.dso_list,
1696                                                    dso->long_name))))) {
1697                         al->filtered |= (1 << HIST_FILTER__DSO);
1698                 }
1699
1700                 al->sym = map__find_symbol(al->map, al->addr);
1701         }
1702
1703         if (symbol_conf.sym_list &&
1704                 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1705                                                 al->sym->name))) {
1706                 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1707         }
1708
1709         return 0;
1710 }
1711
1712 /*
1713  * The preprocess_sample method will return with reference counts for the
1714  * in it, when done using (and perhaps getting ref counts if needing to
1715  * keep a pointer to one of those entries) it must be paired with
1716  * addr_location__put(), so that the refcounts can be decremented.
1717  */
1718 void addr_location__put(struct addr_location *al)
1719 {
1720         thread__zput(al->thread);
1721 }
1722
1723 bool is_bts_event(struct perf_event_attr *attr)
1724 {
1725         return attr->type == PERF_TYPE_HARDWARE &&
1726                (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1727                attr->sample_period == 1;
1728 }
1729
1730 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1731 {
1732         if (attr->type == PERF_TYPE_SOFTWARE &&
1733             (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1734              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1735              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1736                 return true;
1737
1738         if (is_bts_event(attr))
1739                 return true;
1740
1741         return false;
1742 }
1743
1744 void thread__resolve(struct thread *thread, struct addr_location *al,
1745                      struct perf_sample *sample)
1746 {
1747         thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
1748
1749         al->cpu = sample->cpu;
1750         al->sym = NULL;
1751
1752         if (al->map)
1753                 al->sym = map__find_symbol(al->map, al->addr);
1754 }