b654de0841f87422cdf5f7033a3f9be96d4fc31f
[linux-2.6-microblaze.git] / tools / perf / util / synthetic-events.c
1 // SPDX-License-Identifier: GPL-2.0-only 
2
3 #include "util/cgroup.h"
4 #include "util/data.h"
5 #include "util/debug.h"
6 #include "util/dso.h"
7 #include "util/event.h"
8 #include "util/evlist.h"
9 #include "util/machine.h"
10 #include "util/map.h"
11 #include "util/map_symbol.h"
12 #include "util/branch.h"
13 #include "util/memswap.h"
14 #include "util/namespaces.h"
15 #include "util/session.h"
16 #include "util/stat.h"
17 #include "util/symbol.h"
18 #include "util/synthetic-events.h"
19 #include "util/target.h"
20 #include "util/time-utils.h"
21 #include <linux/bitops.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/zalloc.h>
25 #include <linux/perf_event.h>
26 #include <asm/bug.h>
27 #include <perf/evsel.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
33 #include <dirent.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
40 #include <api/io.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <unistd.h>
45
46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47
48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49
50 int perf_tool__process_synth_event(struct perf_tool *tool,
51                                    union perf_event *event,
52                                    struct machine *machine,
53                                    perf_event__handler_t process)
54 {
55         struct perf_sample synth_sample = {
56                 .pid       = -1,
57                 .tid       = -1,
58                 .time      = -1,
59                 .stream_id = -1,
60                 .cpu       = -1,
61                 .period    = 1,
62                 .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63         };
64
65         return process(tool, event, &synth_sample, machine);
66 };
67
68 /*
69  * Assumes that the first 4095 bytes of /proc/pid/stat contains
70  * the comm, tgid and ppid.
71  */
72 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
73                                     pid_t *tgid, pid_t *ppid, bool *kernel)
74 {
75         char bf[4096];
76         int fd;
77         size_t size = 0;
78         ssize_t n;
79         char *name, *tgids, *ppids, *vmpeak, *threads;
80
81         *tgid = -1;
82         *ppid = -1;
83
84         if (pid)
85                 snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
86         else
87                 snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
88
89         fd = open(bf, O_RDONLY);
90         if (fd < 0) {
91                 pr_debug("couldn't open %s\n", bf);
92                 return -1;
93         }
94
95         n = read(fd, bf, sizeof(bf) - 1);
96         close(fd);
97         if (n <= 0) {
98                 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
99                            tid);
100                 return -1;
101         }
102         bf[n] = '\0';
103
104         name = strstr(bf, "Name:");
105         tgids = strstr(name ?: bf, "Tgid:");
106         ppids = strstr(tgids ?: bf, "PPid:");
107         vmpeak = strstr(ppids ?: bf, "VmPeak:");
108
109         if (vmpeak)
110                 threads = NULL;
111         else
112                 threads = strstr(ppids ?: bf, "Threads:");
113
114         if (name) {
115                 char *nl;
116
117                 name = skip_spaces(name + 5);  /* strlen("Name:") */
118                 nl = strchr(name, '\n');
119                 if (nl)
120                         *nl = '\0';
121
122                 size = strlen(name);
123                 if (size >= len)
124                         size = len - 1;
125                 memcpy(comm, name, size);
126                 comm[size] = '\0';
127         } else {
128                 pr_debug("Name: string not found for pid %d\n", tid);
129         }
130
131         if (tgids) {
132                 tgids += 5;  /* strlen("Tgid:") */
133                 *tgid = atoi(tgids);
134         } else {
135                 pr_debug("Tgid: string not found for pid %d\n", tid);
136         }
137
138         if (ppids) {
139                 ppids += 5;  /* strlen("PPid:") */
140                 *ppid = atoi(ppids);
141         } else {
142                 pr_debug("PPid: string not found for pid %d\n", tid);
143         }
144
145         if (!vmpeak && threads)
146                 *kernel = true;
147         else
148                 *kernel = false;
149
150         return 0;
151 }
152
153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
154                                     struct machine *machine,
155                                     pid_t *tgid, pid_t *ppid, bool *kernel)
156 {
157         size_t size;
158
159         *ppid = -1;
160
161         memset(&event->comm, 0, sizeof(event->comm));
162
163         if (machine__is_host(machine)) {
164                 if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
165                                              sizeof(event->comm.comm),
166                                              tgid, ppid, kernel) != 0) {
167                         return -1;
168                 }
169         } else {
170                 *tgid = machine->pid;
171         }
172
173         if (*tgid < 0)
174                 return -1;
175
176         event->comm.pid = *tgid;
177         event->comm.header.type = PERF_RECORD_COMM;
178
179         size = strlen(event->comm.comm) + 1;
180         size = PERF_ALIGN(size, sizeof(u64));
181         memset(event->comm.comm + size, 0, machine->id_hdr_size);
182         event->comm.header.size = (sizeof(event->comm) -
183                                 (sizeof(event->comm.comm) - size) +
184                                 machine->id_hdr_size);
185         event->comm.tid = tid;
186
187         return 0;
188 }
189
190 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191                                          union perf_event *event, pid_t pid,
192                                          perf_event__handler_t process,
193                                          struct machine *machine)
194 {
195         pid_t tgid, ppid;
196         bool kernel_thread;
197
198         if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
199                                      &kernel_thread) != 0)
200                 return -1;
201
202         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
203                 return -1;
204
205         return tgid;
206 }
207
208 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
209                                          struct perf_ns_link_info *ns_link_info)
210 {
211         struct stat64 st;
212         char proc_ns[128];
213
214         sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
215         if (stat64(proc_ns, &st) == 0) {
216                 ns_link_info->dev = st.st_dev;
217                 ns_link_info->ino = st.st_ino;
218         }
219 }
220
221 int perf_event__synthesize_namespaces(struct perf_tool *tool,
222                                       union perf_event *event,
223                                       pid_t pid, pid_t tgid,
224                                       perf_event__handler_t process,
225                                       struct machine *machine)
226 {
227         u32 idx;
228         struct perf_ns_link_info *ns_link_info;
229
230         if (!tool || !tool->namespace_events)
231                 return 0;
232
233         memset(&event->namespaces, 0, (sizeof(event->namespaces) +
234                (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
235                machine->id_hdr_size));
236
237         event->namespaces.pid = tgid;
238         event->namespaces.tid = pid;
239
240         event->namespaces.nr_namespaces = NR_NAMESPACES;
241
242         ns_link_info = event->namespaces.link_info;
243
244         for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
245                 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
246                                              &ns_link_info[idx]);
247
248         event->namespaces.header.type = PERF_RECORD_NAMESPACES;
249
250         event->namespaces.header.size = (sizeof(event->namespaces) +
251                         (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
252                         machine->id_hdr_size);
253
254         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
255                 return -1;
256
257         return 0;
258 }
259
260 static int perf_event__synthesize_fork(struct perf_tool *tool,
261                                        union perf_event *event,
262                                        pid_t pid, pid_t tgid, pid_t ppid,
263                                        perf_event__handler_t process,
264                                        struct machine *machine)
265 {
266         memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
267
268         /*
269          * for main thread set parent to ppid from status file. For other
270          * threads set parent pid to main thread. ie., assume main thread
271          * spawns all threads in a process
272         */
273         if (tgid == pid) {
274                 event->fork.ppid = ppid;
275                 event->fork.ptid = ppid;
276         } else {
277                 event->fork.ppid = tgid;
278                 event->fork.ptid = tgid;
279         }
280         event->fork.pid  = tgid;
281         event->fork.tid  = pid;
282         event->fork.header.type = PERF_RECORD_FORK;
283         event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
284
285         event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
286
287         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
288                 return -1;
289
290         return 0;
291 }
292
293 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
294                                 u32 *prot, u32 *flags, __u64 *offset,
295                                 u32 *maj, u32 *min,
296                                 __u64 *inode,
297                                 ssize_t pathname_size, char *pathname)
298 {
299         __u64 temp;
300         int ch;
301         char *start_pathname = pathname;
302
303         if (io__get_hex(io, start) != '-')
304                 return false;
305         if (io__get_hex(io, end) != ' ')
306                 return false;
307
308         /* map protection and flags bits */
309         *prot = 0;
310         ch = io__get_char(io);
311         if (ch == 'r')
312                 *prot |= PROT_READ;
313         else if (ch != '-')
314                 return false;
315         ch = io__get_char(io);
316         if (ch == 'w')
317                 *prot |= PROT_WRITE;
318         else if (ch != '-')
319                 return false;
320         ch = io__get_char(io);
321         if (ch == 'x')
322                 *prot |= PROT_EXEC;
323         else if (ch != '-')
324                 return false;
325         ch = io__get_char(io);
326         if (ch == 's')
327                 *flags = MAP_SHARED;
328         else if (ch == 'p')
329                 *flags = MAP_PRIVATE;
330         else
331                 return false;
332         if (io__get_char(io) != ' ')
333                 return false;
334
335         if (io__get_hex(io, offset) != ' ')
336                 return false;
337
338         if (io__get_hex(io, &temp) != ':')
339                 return false;
340         *maj = temp;
341         if (io__get_hex(io, &temp) != ' ')
342                 return false;
343         *min = temp;
344
345         ch = io__get_dec(io, inode);
346         if (ch != ' ') {
347                 *pathname = '\0';
348                 return ch == '\n';
349         }
350         do {
351                 ch = io__get_char(io);
352         } while (ch == ' ');
353         while (true) {
354                 if (ch < 0)
355                         return false;
356                 if (ch == '\0' || ch == '\n' ||
357                     (pathname + 1 - start_pathname) >= pathname_size) {
358                         *pathname = '\0';
359                         return true;
360                 }
361                 *pathname++ = ch;
362                 ch = io__get_char(io);
363         }
364 }
365
366 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
367                                              bool is_kernel)
368 {
369         struct build_id bid;
370         int rc;
371
372         if (is_kernel)
373                 rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
374         else
375                 rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
376
377         if (rc == 0) {
378                 memcpy(event->build_id, bid.data, sizeof(bid.data));
379                 event->build_id_size = (u8) bid.size;
380                 event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
381                 event->__reserved_1 = 0;
382                 event->__reserved_2 = 0;
383         } else {
384                 if (event->filename[0] == '/') {
385                         pr_debug2("Failed to read build ID for %s\n",
386                                   event->filename);
387                 }
388         }
389 }
390
391 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
392                                        union perf_event *event,
393                                        pid_t pid, pid_t tgid,
394                                        perf_event__handler_t process,
395                                        struct machine *machine,
396                                        bool mmap_data)
397 {
398         unsigned long long t;
399         char bf[BUFSIZ];
400         struct io io;
401         bool truncation = false;
402         unsigned long long timeout = proc_map_timeout * 1000000ULL;
403         int rc = 0;
404         const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
405         int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
406
407         if (machine__is_default_guest(machine))
408                 return 0;
409
410         snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
411                 machine->root_dir, pid, pid);
412
413         io.fd = open(bf, O_RDONLY, 0);
414         if (io.fd < 0) {
415                 /*
416                  * We raced with a task exiting - just return:
417                  */
418                 pr_debug("couldn't open %s\n", bf);
419                 return -1;
420         }
421         io__init(&io, io.fd, bf, sizeof(bf));
422
423         event->header.type = PERF_RECORD_MMAP2;
424         t = rdclock();
425
426         while (!io.eof) {
427                 static const char anonstr[] = "//anon";
428                 size_t size, aligned_size;
429
430                 /* ensure null termination since stack will be reused. */
431                 event->mmap2.filename[0] = '\0';
432
433                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
434                 if (!read_proc_maps_line(&io,
435                                         &event->mmap2.start,
436                                         &event->mmap2.len,
437                                         &event->mmap2.prot,
438                                         &event->mmap2.flags,
439                                         &event->mmap2.pgoff,
440                                         &event->mmap2.maj,
441                                         &event->mmap2.min,
442                                         &event->mmap2.ino,
443                                         sizeof(event->mmap2.filename),
444                                         event->mmap2.filename))
445                         continue;
446
447                 if ((rdclock() - t) > timeout) {
448                         pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
449                                    "You may want to increase "
450                                    "the time limit by --proc-map-timeout\n",
451                                    machine->root_dir, pid, pid);
452                         truncation = true;
453                         goto out;
454                 }
455
456                 event->mmap2.ino_generation = 0;
457
458                 /*
459                  * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
460                  */
461                 if (machine__is_host(machine))
462                         event->header.misc = PERF_RECORD_MISC_USER;
463                 else
464                         event->header.misc = PERF_RECORD_MISC_GUEST_USER;
465
466                 if ((event->mmap2.prot & PROT_EXEC) == 0) {
467                         if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
468                                 continue;
469
470                         event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
471                 }
472
473 out:
474                 if (truncation)
475                         event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
476
477                 if (!strcmp(event->mmap2.filename, ""))
478                         strcpy(event->mmap2.filename, anonstr);
479
480                 if (hugetlbfs_mnt_len &&
481                     !strncmp(event->mmap2.filename, hugetlbfs_mnt,
482                              hugetlbfs_mnt_len)) {
483                         strcpy(event->mmap2.filename, anonstr);
484                         event->mmap2.flags |= MAP_HUGETLB;
485                 }
486
487                 size = strlen(event->mmap2.filename) + 1;
488                 aligned_size = PERF_ALIGN(size, sizeof(u64));
489                 event->mmap2.len -= event->mmap.start;
490                 event->mmap2.header.size = (sizeof(event->mmap2) -
491                                         (sizeof(event->mmap2.filename) - aligned_size));
492                 memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
493                         (aligned_size - size));
494                 event->mmap2.header.size += machine->id_hdr_size;
495                 event->mmap2.pid = tgid;
496                 event->mmap2.tid = pid;
497
498                 if (symbol_conf.buildid_mmap2)
499                         perf_record_mmap2__read_build_id(&event->mmap2, false);
500
501                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
502                         rc = -1;
503                         break;
504                 }
505
506                 if (truncation)
507                         break;
508         }
509
510         close(io.fd);
511         return rc;
512 }
513
514 #ifdef HAVE_FILE_HANDLE
515 static int perf_event__synthesize_cgroup(struct perf_tool *tool,
516                                          union perf_event *event,
517                                          char *path, size_t mount_len,
518                                          perf_event__handler_t process,
519                                          struct machine *machine)
520 {
521         size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
522         size_t path_len = strlen(path) - mount_len + 1;
523         struct {
524                 struct file_handle fh;
525                 uint64_t cgroup_id;
526         } handle;
527         int mount_id;
528
529         while (path_len % sizeof(u64))
530                 path[mount_len + path_len++] = '\0';
531
532         memset(&event->cgroup, 0, event_size);
533
534         event->cgroup.header.type = PERF_RECORD_CGROUP;
535         event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
536
537         handle.fh.handle_bytes = sizeof(handle.cgroup_id);
538         if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
539                 pr_debug("stat failed: %s\n", path);
540                 return -1;
541         }
542
543         event->cgroup.id = handle.cgroup_id;
544         strncpy(event->cgroup.path, path + mount_len, path_len);
545         memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
546
547         if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
548                 pr_debug("process synth event failed\n");
549                 return -1;
550         }
551
552         return 0;
553 }
554
555 static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
556                                         union perf_event *event,
557                                         char *path, size_t mount_len,
558                                         perf_event__handler_t process,
559                                         struct machine *machine)
560 {
561         size_t pos = strlen(path);
562         DIR *d;
563         struct dirent *dent;
564         int ret = 0;
565
566         if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
567                                           process, machine) < 0)
568                 return -1;
569
570         d = opendir(path);
571         if (d == NULL) {
572                 pr_debug("failed to open directory: %s\n", path);
573                 return -1;
574         }
575
576         while ((dent = readdir(d)) != NULL) {
577                 if (dent->d_type != DT_DIR)
578                         continue;
579                 if (!strcmp(dent->d_name, ".") ||
580                     !strcmp(dent->d_name, ".."))
581                         continue;
582
583                 /* any sane path should be less than PATH_MAX */
584                 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
585                         continue;
586
587                 if (path[pos - 1] != '/')
588                         strcat(path, "/");
589                 strcat(path, dent->d_name);
590
591                 ret = perf_event__walk_cgroup_tree(tool, event, path,
592                                                    mount_len, process, machine);
593                 if (ret < 0)
594                         break;
595
596                 path[pos] = '\0';
597         }
598
599         closedir(d);
600         return ret;
601 }
602
603 int perf_event__synthesize_cgroups(struct perf_tool *tool,
604                                    perf_event__handler_t process,
605                                    struct machine *machine)
606 {
607         union perf_event event;
608         char cgrp_root[PATH_MAX];
609         size_t mount_len;  /* length of mount point in the path */
610
611         if (!tool || !tool->cgroup_events)
612                 return 0;
613
614         if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
615                 pr_debug("cannot find cgroup mount point\n");
616                 return -1;
617         }
618
619         mount_len = strlen(cgrp_root);
620         /* make sure the path starts with a slash (after mount point) */
621         strcat(cgrp_root, "/");
622
623         if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
624                                          process, machine) < 0)
625                 return -1;
626
627         return 0;
628 }
629 #else
630 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
631                                    perf_event__handler_t process __maybe_unused,
632                                    struct machine *machine __maybe_unused)
633 {
634         return -1;
635 }
636 #endif
637
638 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
639                                    struct machine *machine)
640 {
641         int rc = 0;
642         struct map *pos;
643         struct maps *maps = machine__kernel_maps(machine);
644         union perf_event *event;
645         size_t size = symbol_conf.buildid_mmap2 ?
646                         sizeof(event->mmap2) : sizeof(event->mmap);
647
648         event = zalloc(size + machine->id_hdr_size);
649         if (event == NULL) {
650                 pr_debug("Not enough memory synthesizing mmap event "
651                          "for kernel modules\n");
652                 return -1;
653         }
654
655         /*
656          * kernel uses 0 for user space maps, see kernel/perf_event.c
657          * __perf_event_mmap
658          */
659         if (machine__is_host(machine))
660                 event->header.misc = PERF_RECORD_MISC_KERNEL;
661         else
662                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
663
664         maps__for_each_entry(maps, pos) {
665                 if (!__map__is_kmodule(pos))
666                         continue;
667
668                 if (symbol_conf.buildid_mmap2) {
669                         size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
670                         event->mmap2.header.type = PERF_RECORD_MMAP2;
671                         event->mmap2.header.size = (sizeof(event->mmap2) -
672                                                 (sizeof(event->mmap2.filename) - size));
673                         memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
674                         event->mmap2.header.size += machine->id_hdr_size;
675                         event->mmap2.start = pos->start;
676                         event->mmap2.len   = pos->end - pos->start;
677                         event->mmap2.pid   = machine->pid;
678
679                         memcpy(event->mmap2.filename, pos->dso->long_name,
680                                pos->dso->long_name_len + 1);
681
682                         perf_record_mmap2__read_build_id(&event->mmap2, false);
683                 } else {
684                         size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
685                         event->mmap.header.type = PERF_RECORD_MMAP;
686                         event->mmap.header.size = (sizeof(event->mmap) -
687                                                 (sizeof(event->mmap.filename) - size));
688                         memset(event->mmap.filename + size, 0, machine->id_hdr_size);
689                         event->mmap.header.size += machine->id_hdr_size;
690                         event->mmap.start = pos->start;
691                         event->mmap.len   = pos->end - pos->start;
692                         event->mmap.pid   = machine->pid;
693
694                         memcpy(event->mmap.filename, pos->dso->long_name,
695                                pos->dso->long_name_len + 1);
696                 }
697
698                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
699                         rc = -1;
700                         break;
701                 }
702         }
703
704         free(event);
705         return rc;
706 }
707
708 static int filter_task(const struct dirent *dirent)
709 {
710         return isdigit(dirent->d_name[0]);
711 }
712
713 static int __event__synthesize_thread(union perf_event *comm_event,
714                                       union perf_event *mmap_event,
715                                       union perf_event *fork_event,
716                                       union perf_event *namespaces_event,
717                                       pid_t pid, int full, perf_event__handler_t process,
718                                       struct perf_tool *tool, struct machine *machine,
719                                       bool needs_mmap, bool mmap_data)
720 {
721         char filename[PATH_MAX];
722         struct dirent **dirent;
723         pid_t tgid, ppid;
724         int rc = 0;
725         int i, n;
726
727         /* special case: only send one comm event using passed in pid */
728         if (!full) {
729                 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
730                                                    process, machine);
731
732                 if (tgid == -1)
733                         return -1;
734
735                 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
736                                                       tgid, process, machine) < 0)
737                         return -1;
738
739                 /*
740                  * send mmap only for thread group leader
741                  * see thread__init_maps()
742                  */
743                 if (pid == tgid && needs_mmap &&
744                     perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
745                                                        process, machine, mmap_data))
746                         return -1;
747
748                 return 0;
749         }
750
751         if (machine__is_default_guest(machine))
752                 return 0;
753
754         snprintf(filename, sizeof(filename), "%s/proc/%d/task",
755                  machine->root_dir, pid);
756
757         n = scandir(filename, &dirent, filter_task, alphasort);
758         if (n < 0)
759                 return n;
760
761         for (i = 0; i < n; i++) {
762                 char *end;
763                 pid_t _pid;
764                 bool kernel_thread = false;
765
766                 _pid = strtol(dirent[i]->d_name, &end, 10);
767                 if (*end)
768                         continue;
769
770                 rc = -1;
771                 if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
772                                              &tgid, &ppid, &kernel_thread) != 0)
773                         break;
774
775                 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
776                                                 ppid, process, machine) < 0)
777                         break;
778
779                 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
780                                                       tgid, process, machine) < 0)
781                         break;
782
783                 /*
784                  * Send the prepared comm event
785                  */
786                 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
787                         break;
788
789                 rc = 0;
790                 if (_pid == pid && !kernel_thread && needs_mmap) {
791                         /* process the parent's maps too */
792                         rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
793                                                 process, machine, mmap_data);
794                         if (rc)
795                                 break;
796                 }
797         }
798
799         for (i = 0; i < n; i++)
800                 zfree(&dirent[i]);
801         free(dirent);
802
803         return rc;
804 }
805
806 int perf_event__synthesize_thread_map(struct perf_tool *tool,
807                                       struct perf_thread_map *threads,
808                                       perf_event__handler_t process,
809                                       struct machine *machine,
810                                       bool needs_mmap, bool mmap_data)
811 {
812         union perf_event *comm_event, *mmap_event, *fork_event;
813         union perf_event *namespaces_event;
814         int err = -1, thread, j;
815
816         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
817         if (comm_event == NULL)
818                 goto out;
819
820         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
821         if (mmap_event == NULL)
822                 goto out_free_comm;
823
824         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
825         if (fork_event == NULL)
826                 goto out_free_mmap;
827
828         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
829                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
830                                   machine->id_hdr_size);
831         if (namespaces_event == NULL)
832                 goto out_free_fork;
833
834         err = 0;
835         for (thread = 0; thread < threads->nr; ++thread) {
836                 if (__event__synthesize_thread(comm_event, mmap_event,
837                                                fork_event, namespaces_event,
838                                                perf_thread_map__pid(threads, thread), 0,
839                                                process, tool, machine,
840                                                needs_mmap, mmap_data)) {
841                         err = -1;
842                         break;
843                 }
844
845                 /*
846                  * comm.pid is set to thread group id by
847                  * perf_event__synthesize_comm
848                  */
849                 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
850                         bool need_leader = true;
851
852                         /* is thread group leader in thread_map? */
853                         for (j = 0; j < threads->nr; ++j) {
854                                 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
855                                         need_leader = false;
856                                         break;
857                                 }
858                         }
859
860                         /* if not, generate events for it */
861                         if (need_leader &&
862                             __event__synthesize_thread(comm_event, mmap_event,
863                                                        fork_event, namespaces_event,
864                                                        comm_event->comm.pid, 0,
865                                                        process, tool, machine,
866                                                        needs_mmap, mmap_data)) {
867                                 err = -1;
868                                 break;
869                         }
870                 }
871         }
872         free(namespaces_event);
873 out_free_fork:
874         free(fork_event);
875 out_free_mmap:
876         free(mmap_event);
877 out_free_comm:
878         free(comm_event);
879 out:
880         return err;
881 }
882
883 static int __perf_event__synthesize_threads(struct perf_tool *tool,
884                                             perf_event__handler_t process,
885                                             struct machine *machine,
886                                             bool needs_mmap,
887                                             bool mmap_data,
888                                             struct dirent **dirent,
889                                             int start,
890                                             int num)
891 {
892         union perf_event *comm_event, *mmap_event, *fork_event;
893         union perf_event *namespaces_event;
894         int err = -1;
895         char *end;
896         pid_t pid;
897         int i;
898
899         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
900         if (comm_event == NULL)
901                 goto out;
902
903         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
904         if (mmap_event == NULL)
905                 goto out_free_comm;
906
907         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
908         if (fork_event == NULL)
909                 goto out_free_mmap;
910
911         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
912                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
913                                   machine->id_hdr_size);
914         if (namespaces_event == NULL)
915                 goto out_free_fork;
916
917         for (i = start; i < start + num; i++) {
918                 if (!isdigit(dirent[i]->d_name[0]))
919                         continue;
920
921                 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
922                 /* only interested in proper numerical dirents */
923                 if (*end)
924                         continue;
925                 /*
926                  * We may race with exiting thread, so don't stop just because
927                  * one thread couldn't be synthesized.
928                  */
929                 __event__synthesize_thread(comm_event, mmap_event, fork_event,
930                                            namespaces_event, pid, 1, process,
931                                            tool, machine, needs_mmap, mmap_data);
932         }
933         err = 0;
934
935         free(namespaces_event);
936 out_free_fork:
937         free(fork_event);
938 out_free_mmap:
939         free(mmap_event);
940 out_free_comm:
941         free(comm_event);
942 out:
943         return err;
944 }
945
946 struct synthesize_threads_arg {
947         struct perf_tool *tool;
948         perf_event__handler_t process;
949         struct machine *machine;
950         bool needs_mmap;
951         bool mmap_data;
952         struct dirent **dirent;
953         int num;
954         int start;
955 };
956
957 static void *synthesize_threads_worker(void *arg)
958 {
959         struct synthesize_threads_arg *args = arg;
960
961         __perf_event__synthesize_threads(args->tool, args->process,
962                                          args->machine,
963                                          args->needs_mmap, args->mmap_data,
964                                          args->dirent,
965                                          args->start, args->num);
966         return NULL;
967 }
968
969 int perf_event__synthesize_threads(struct perf_tool *tool,
970                                    perf_event__handler_t process,
971                                    struct machine *machine,
972                                    bool needs_mmap, bool mmap_data,
973                                    unsigned int nr_threads_synthesize)
974 {
975         struct synthesize_threads_arg *args = NULL;
976         pthread_t *synthesize_threads = NULL;
977         char proc_path[PATH_MAX];
978         struct dirent **dirent;
979         int num_per_thread;
980         int m, n, i, j;
981         int thread_nr;
982         int base = 0;
983         int err = -1;
984
985
986         if (machine__is_default_guest(machine))
987                 return 0;
988
989         snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
990         n = scandir(proc_path, &dirent, filter_task, alphasort);
991         if (n < 0)
992                 return err;
993
994         if (nr_threads_synthesize == UINT_MAX)
995                 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
996         else
997                 thread_nr = nr_threads_synthesize;
998
999         if (thread_nr <= 1) {
1000                 err = __perf_event__synthesize_threads(tool, process,
1001                                                        machine,
1002                                                        needs_mmap, mmap_data,
1003                                                        dirent, base, n);
1004                 goto free_dirent;
1005         }
1006         if (thread_nr > n)
1007                 thread_nr = n;
1008
1009         synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1010         if (synthesize_threads == NULL)
1011                 goto free_dirent;
1012
1013         args = calloc(sizeof(*args), thread_nr);
1014         if (args == NULL)
1015                 goto free_threads;
1016
1017         num_per_thread = n / thread_nr;
1018         m = n % thread_nr;
1019         for (i = 0; i < thread_nr; i++) {
1020                 args[i].tool = tool;
1021                 args[i].process = process;
1022                 args[i].machine = machine;
1023                 args[i].needs_mmap = needs_mmap;
1024                 args[i].mmap_data = mmap_data;
1025                 args[i].dirent = dirent;
1026         }
1027         for (i = 0; i < m; i++) {
1028                 args[i].num = num_per_thread + 1;
1029                 args[i].start = i * args[i].num;
1030         }
1031         if (i != 0)
1032                 base = args[i-1].start + args[i-1].num;
1033         for (j = i; j < thread_nr; j++) {
1034                 args[j].num = num_per_thread;
1035                 args[j].start = base + (j - i) * args[i].num;
1036         }
1037
1038         for (i = 0; i < thread_nr; i++) {
1039                 if (pthread_create(&synthesize_threads[i], NULL,
1040                                    synthesize_threads_worker, &args[i]))
1041                         goto out_join;
1042         }
1043         err = 0;
1044 out_join:
1045         for (i = 0; i < thread_nr; i++)
1046                 pthread_join(synthesize_threads[i], NULL);
1047         free(args);
1048 free_threads:
1049         free(synthesize_threads);
1050 free_dirent:
1051         for (i = 0; i < n; i++)
1052                 zfree(&dirent[i]);
1053         free(dirent);
1054
1055         return err;
1056 }
1057
1058 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1059                                               perf_event__handler_t process __maybe_unused,
1060                                               struct machine *machine __maybe_unused)
1061 {
1062         return 0;
1063 }
1064
1065 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1066                                                 perf_event__handler_t process,
1067                                                 struct machine *machine)
1068 {
1069         union perf_event *event;
1070         size_t size = symbol_conf.buildid_mmap2 ?
1071                         sizeof(event->mmap2) : sizeof(event->mmap);
1072         struct map *map = machine__kernel_map(machine);
1073         struct kmap *kmap;
1074         int err;
1075
1076         if (map == NULL)
1077                 return -1;
1078
1079         kmap = map__kmap(map);
1080         if (!kmap->ref_reloc_sym)
1081                 return -1;
1082
1083         /*
1084          * We should get this from /sys/kernel/sections/.text, but till that is
1085          * available use this, and after it is use this as a fallback for older
1086          * kernels.
1087          */
1088         event = zalloc(size + machine->id_hdr_size);
1089         if (event == NULL) {
1090                 pr_debug("Not enough memory synthesizing mmap event "
1091                          "for kernel modules\n");
1092                 return -1;
1093         }
1094
1095         if (machine__is_host(machine)) {
1096                 /*
1097                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
1098                  * see kernel/perf_event.c __perf_event_mmap
1099                  */
1100                 event->header.misc = PERF_RECORD_MISC_KERNEL;
1101         } else {
1102                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1103         }
1104
1105         if (symbol_conf.buildid_mmap2) {
1106                 size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1107                                 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1108                 size = PERF_ALIGN(size, sizeof(u64));
1109                 event->mmap2.header.type = PERF_RECORD_MMAP2;
1110                 event->mmap2.header.size = (sizeof(event->mmap2) -
1111                                 (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1112                 event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1113                 event->mmap2.start = map->start;
1114                 event->mmap2.len   = map->end - event->mmap.start;
1115                 event->mmap2.pid   = machine->pid;
1116
1117                 perf_record_mmap2__read_build_id(&event->mmap2, true);
1118         } else {
1119                 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1120                                 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1121                 size = PERF_ALIGN(size, sizeof(u64));
1122                 event->mmap.header.type = PERF_RECORD_MMAP;
1123                 event->mmap.header.size = (sizeof(event->mmap) -
1124                                 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1125                 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1126                 event->mmap.start = map->start;
1127                 event->mmap.len   = map->end - event->mmap.start;
1128                 event->mmap.pid   = machine->pid;
1129         }
1130
1131         err = perf_tool__process_synth_event(tool, event, machine, process);
1132         free(event);
1133
1134         return err;
1135 }
1136
1137 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1138                                        perf_event__handler_t process,
1139                                        struct machine *machine)
1140 {
1141         int err;
1142
1143         err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1144         if (err < 0)
1145                 return err;
1146
1147         return perf_event__synthesize_extra_kmaps(tool, process, machine);
1148 }
1149
1150 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1151                                       struct perf_thread_map *threads,
1152                                       perf_event__handler_t process,
1153                                       struct machine *machine)
1154 {
1155         union perf_event *event;
1156         int i, err, size;
1157
1158         size  = sizeof(event->thread_map);
1159         size += threads->nr * sizeof(event->thread_map.entries[0]);
1160
1161         event = zalloc(size);
1162         if (!event)
1163                 return -ENOMEM;
1164
1165         event->header.type = PERF_RECORD_THREAD_MAP;
1166         event->header.size = size;
1167         event->thread_map.nr = threads->nr;
1168
1169         for (i = 0; i < threads->nr; i++) {
1170                 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1171                 char *comm = perf_thread_map__comm(threads, i);
1172
1173                 if (!comm)
1174                         comm = (char *) "";
1175
1176                 entry->pid = perf_thread_map__pid(threads, i);
1177                 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1178         }
1179
1180         err = process(tool, event, NULL, machine);
1181
1182         free(event);
1183         return err;
1184 }
1185
1186 static void synthesize_cpus(struct cpu_map_entries *cpus,
1187                             struct perf_cpu_map *map)
1188 {
1189         int i, map_nr = perf_cpu_map__nr(map);
1190
1191         cpus->nr = map_nr;
1192
1193         for (i = 0; i < map_nr; i++)
1194                 cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
1195 }
1196
1197 static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1198                             struct perf_cpu_map *map, int max)
1199 {
1200         int i;
1201
1202         mask->nr = BITS_TO_LONGS(max);
1203         mask->long_size = sizeof(long);
1204
1205         for (i = 0; i < perf_cpu_map__nr(map); i++)
1206                 set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
1207 }
1208
1209 static size_t cpus_size(struct perf_cpu_map *map)
1210 {
1211         return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
1212 }
1213
1214 static size_t mask_size(struct perf_cpu_map *map, int *max)
1215 {
1216         int i;
1217
1218         *max = 0;
1219
1220         for (i = 0; i < perf_cpu_map__nr(map); i++) {
1221                 /* bit position of the cpu is + 1 */
1222                 int bit = perf_cpu_map__cpu(map, i).cpu + 1;
1223
1224                 if (bit > *max)
1225                         *max = bit;
1226         }
1227
1228         return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1229 }
1230
1231 void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1232 {
1233         size_t size_cpus, size_mask;
1234         bool is_dummy = perf_cpu_map__empty(map);
1235
1236         /*
1237          * Both array and mask data have variable size based
1238          * on the number of cpus and their actual values.
1239          * The size of the 'struct perf_record_cpu_map_data' is:
1240          *
1241          *   array = size of 'struct cpu_map_entries' +
1242          *           number of cpus * sizeof(u64)
1243          *
1244          *   mask  = size of 'struct perf_record_record_cpu_map' +
1245          *           maximum cpu bit converted to size of longs
1246          *
1247          * and finally + the size of 'struct perf_record_cpu_map_data'.
1248          */
1249         size_cpus = cpus_size(map);
1250         size_mask = mask_size(map, max);
1251
1252         if (is_dummy || (size_cpus < size_mask)) {
1253                 *size += size_cpus;
1254                 *type  = PERF_CPU_MAP__CPUS;
1255         } else {
1256                 *size += size_mask;
1257                 *type  = PERF_CPU_MAP__MASK;
1258         }
1259
1260         *size += sizeof(struct perf_record_cpu_map_data);
1261         *size = PERF_ALIGN(*size, sizeof(u64));
1262         return zalloc(*size);
1263 }
1264
1265 void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1266                               u16 type, int max)
1267 {
1268         data->type = type;
1269
1270         switch (type) {
1271         case PERF_CPU_MAP__CPUS:
1272                 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1273                 break;
1274         case PERF_CPU_MAP__MASK:
1275                 synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1276         default:
1277                 break;
1278         }
1279 }
1280
1281 static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1282 {
1283         size_t size = sizeof(struct perf_record_cpu_map);
1284         struct perf_record_cpu_map *event;
1285         int max;
1286         u16 type;
1287
1288         event = cpu_map_data__alloc(map, &size, &type, &max);
1289         if (!event)
1290                 return NULL;
1291
1292         event->header.type = PERF_RECORD_CPU_MAP;
1293         event->header.size = size;
1294         event->data.type   = type;
1295
1296         cpu_map_data__synthesize(&event->data, map, type, max);
1297         return event;
1298 }
1299
1300 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1301                                    struct perf_cpu_map *map,
1302                                    perf_event__handler_t process,
1303                                    struct machine *machine)
1304 {
1305         struct perf_record_cpu_map *event;
1306         int err;
1307
1308         event = cpu_map_event__new(map);
1309         if (!event)
1310                 return -ENOMEM;
1311
1312         err = process(tool, (union perf_event *) event, NULL, machine);
1313
1314         free(event);
1315         return err;
1316 }
1317
1318 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1319                                        struct perf_stat_config *config,
1320                                        perf_event__handler_t process,
1321                                        struct machine *machine)
1322 {
1323         struct perf_record_stat_config *event;
1324         int size, i = 0, err;
1325
1326         size  = sizeof(*event);
1327         size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1328
1329         event = zalloc(size);
1330         if (!event)
1331                 return -ENOMEM;
1332
1333         event->header.type = PERF_RECORD_STAT_CONFIG;
1334         event->header.size = size;
1335         event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1336
1337 #define ADD(__term, __val)                                      \
1338         event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1339         event->data[i].val = __val;                             \
1340         i++;
1341
1342         ADD(AGGR_MODE,  config->aggr_mode)
1343         ADD(INTERVAL,   config->interval)
1344         ADD(SCALE,      config->scale)
1345
1346         WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1347                   "stat config terms unbalanced\n");
1348 #undef ADD
1349
1350         err = process(tool, (union perf_event *) event, NULL, machine);
1351
1352         free(event);
1353         return err;
1354 }
1355
1356 int perf_event__synthesize_stat(struct perf_tool *tool,
1357                                 struct perf_cpu cpu, u32 thread, u64 id,
1358                                 struct perf_counts_values *count,
1359                                 perf_event__handler_t process,
1360                                 struct machine *machine)
1361 {
1362         struct perf_record_stat event;
1363
1364         event.header.type = PERF_RECORD_STAT;
1365         event.header.size = sizeof(event);
1366         event.header.misc = 0;
1367
1368         event.id        = id;
1369         event.cpu       = cpu.cpu;
1370         event.thread    = thread;
1371         event.val       = count->val;
1372         event.ena       = count->ena;
1373         event.run       = count->run;
1374
1375         return process(tool, (union perf_event *) &event, NULL, machine);
1376 }
1377
1378 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1379                                       u64 evtime, u64 type,
1380                                       perf_event__handler_t process,
1381                                       struct machine *machine)
1382 {
1383         struct perf_record_stat_round event;
1384
1385         event.header.type = PERF_RECORD_STAT_ROUND;
1386         event.header.size = sizeof(event);
1387         event.header.misc = 0;
1388
1389         event.time = evtime;
1390         event.type = type;
1391
1392         return process(tool, (union perf_event *) &event, NULL, machine);
1393 }
1394
1395 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1396 {
1397         size_t sz, result = sizeof(struct perf_record_sample);
1398
1399         if (type & PERF_SAMPLE_IDENTIFIER)
1400                 result += sizeof(u64);
1401
1402         if (type & PERF_SAMPLE_IP)
1403                 result += sizeof(u64);
1404
1405         if (type & PERF_SAMPLE_TID)
1406                 result += sizeof(u64);
1407
1408         if (type & PERF_SAMPLE_TIME)
1409                 result += sizeof(u64);
1410
1411         if (type & PERF_SAMPLE_ADDR)
1412                 result += sizeof(u64);
1413
1414         if (type & PERF_SAMPLE_ID)
1415                 result += sizeof(u64);
1416
1417         if (type & PERF_SAMPLE_STREAM_ID)
1418                 result += sizeof(u64);
1419
1420         if (type & PERF_SAMPLE_CPU)
1421                 result += sizeof(u64);
1422
1423         if (type & PERF_SAMPLE_PERIOD)
1424                 result += sizeof(u64);
1425
1426         if (type & PERF_SAMPLE_READ) {
1427                 result += sizeof(u64);
1428                 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1429                         result += sizeof(u64);
1430                 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1431                         result += sizeof(u64);
1432                 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1433                 if (read_format & PERF_FORMAT_GROUP) {
1434                         sz = sample->read.group.nr *
1435                              sizeof(struct sample_read_value);
1436                         result += sz;
1437                 } else {
1438                         result += sizeof(u64);
1439                 }
1440         }
1441
1442         if (type & PERF_SAMPLE_CALLCHAIN) {
1443                 sz = (sample->callchain->nr + 1) * sizeof(u64);
1444                 result += sz;
1445         }
1446
1447         if (type & PERF_SAMPLE_RAW) {
1448                 result += sizeof(u32);
1449                 result += sample->raw_size;
1450         }
1451
1452         if (type & PERF_SAMPLE_BRANCH_STACK) {
1453                 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1454                 /* nr, hw_idx */
1455                 sz += 2 * sizeof(u64);
1456                 result += sz;
1457         }
1458
1459         if (type & PERF_SAMPLE_REGS_USER) {
1460                 if (sample->user_regs.abi) {
1461                         result += sizeof(u64);
1462                         sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1463                         result += sz;
1464                 } else {
1465                         result += sizeof(u64);
1466                 }
1467         }
1468
1469         if (type & PERF_SAMPLE_STACK_USER) {
1470                 sz = sample->user_stack.size;
1471                 result += sizeof(u64);
1472                 if (sz) {
1473                         result += sz;
1474                         result += sizeof(u64);
1475                 }
1476         }
1477
1478         if (type & PERF_SAMPLE_WEIGHT_TYPE)
1479                 result += sizeof(u64);
1480
1481         if (type & PERF_SAMPLE_DATA_SRC)
1482                 result += sizeof(u64);
1483
1484         if (type & PERF_SAMPLE_TRANSACTION)
1485                 result += sizeof(u64);
1486
1487         if (type & PERF_SAMPLE_REGS_INTR) {
1488                 if (sample->intr_regs.abi) {
1489                         result += sizeof(u64);
1490                         sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1491                         result += sz;
1492                 } else {
1493                         result += sizeof(u64);
1494                 }
1495         }
1496
1497         if (type & PERF_SAMPLE_PHYS_ADDR)
1498                 result += sizeof(u64);
1499
1500         if (type & PERF_SAMPLE_CGROUP)
1501                 result += sizeof(u64);
1502
1503         if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1504                 result += sizeof(u64);
1505
1506         if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1507                 result += sizeof(u64);
1508
1509         if (type & PERF_SAMPLE_AUX) {
1510                 result += sizeof(u64);
1511                 result += sample->aux_sample.size;
1512         }
1513
1514         return result;
1515 }
1516
1517 void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
1518                                                __u64 *array, u64 type __maybe_unused)
1519 {
1520         *array = data->weight;
1521 }
1522
1523 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1524                                   const struct perf_sample *sample)
1525 {
1526         __u64 *array;
1527         size_t sz;
1528         /*
1529          * used for cross-endian analysis. See git commit 65014ab3
1530          * for why this goofiness is needed.
1531          */
1532         union u64_swap u;
1533
1534         array = event->sample.array;
1535
1536         if (type & PERF_SAMPLE_IDENTIFIER) {
1537                 *array = sample->id;
1538                 array++;
1539         }
1540
1541         if (type & PERF_SAMPLE_IP) {
1542                 *array = sample->ip;
1543                 array++;
1544         }
1545
1546         if (type & PERF_SAMPLE_TID) {
1547                 u.val32[0] = sample->pid;
1548                 u.val32[1] = sample->tid;
1549                 *array = u.val64;
1550                 array++;
1551         }
1552
1553         if (type & PERF_SAMPLE_TIME) {
1554                 *array = sample->time;
1555                 array++;
1556         }
1557
1558         if (type & PERF_SAMPLE_ADDR) {
1559                 *array = sample->addr;
1560                 array++;
1561         }
1562
1563         if (type & PERF_SAMPLE_ID) {
1564                 *array = sample->id;
1565                 array++;
1566         }
1567
1568         if (type & PERF_SAMPLE_STREAM_ID) {
1569                 *array = sample->stream_id;
1570                 array++;
1571         }
1572
1573         if (type & PERF_SAMPLE_CPU) {
1574                 u.val32[0] = sample->cpu;
1575                 u.val32[1] = 0;
1576                 *array = u.val64;
1577                 array++;
1578         }
1579
1580         if (type & PERF_SAMPLE_PERIOD) {
1581                 *array = sample->period;
1582                 array++;
1583         }
1584
1585         if (type & PERF_SAMPLE_READ) {
1586                 if (read_format & PERF_FORMAT_GROUP)
1587                         *array = sample->read.group.nr;
1588                 else
1589                         *array = sample->read.one.value;
1590                 array++;
1591
1592                 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1593                         *array = sample->read.time_enabled;
1594                         array++;
1595                 }
1596
1597                 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1598                         *array = sample->read.time_running;
1599                         array++;
1600                 }
1601
1602                 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1603                 if (read_format & PERF_FORMAT_GROUP) {
1604                         sz = sample->read.group.nr *
1605                              sizeof(struct sample_read_value);
1606                         memcpy(array, sample->read.group.values, sz);
1607                         array = (void *)array + sz;
1608                 } else {
1609                         *array = sample->read.one.id;
1610                         array++;
1611                 }
1612         }
1613
1614         if (type & PERF_SAMPLE_CALLCHAIN) {
1615                 sz = (sample->callchain->nr + 1) * sizeof(u64);
1616                 memcpy(array, sample->callchain, sz);
1617                 array = (void *)array + sz;
1618         }
1619
1620         if (type & PERF_SAMPLE_RAW) {
1621                 u.val32[0] = sample->raw_size;
1622                 *array = u.val64;
1623                 array = (void *)array + sizeof(u32);
1624
1625                 memcpy(array, sample->raw_data, sample->raw_size);
1626                 array = (void *)array + sample->raw_size;
1627         }
1628
1629         if (type & PERF_SAMPLE_BRANCH_STACK) {
1630                 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1631                 /* nr, hw_idx */
1632                 sz += 2 * sizeof(u64);
1633                 memcpy(array, sample->branch_stack, sz);
1634                 array = (void *)array + sz;
1635         }
1636
1637         if (type & PERF_SAMPLE_REGS_USER) {
1638                 if (sample->user_regs.abi) {
1639                         *array++ = sample->user_regs.abi;
1640                         sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1641                         memcpy(array, sample->user_regs.regs, sz);
1642                         array = (void *)array + sz;
1643                 } else {
1644                         *array++ = 0;
1645                 }
1646         }
1647
1648         if (type & PERF_SAMPLE_STACK_USER) {
1649                 sz = sample->user_stack.size;
1650                 *array++ = sz;
1651                 if (sz) {
1652                         memcpy(array, sample->user_stack.data, sz);
1653                         array = (void *)array + sz;
1654                         *array++ = sz;
1655                 }
1656         }
1657
1658         if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1659                 arch_perf_synthesize_sample_weight(sample, array, type);
1660                 array++;
1661         }
1662
1663         if (type & PERF_SAMPLE_DATA_SRC) {
1664                 *array = sample->data_src;
1665                 array++;
1666         }
1667
1668         if (type & PERF_SAMPLE_TRANSACTION) {
1669                 *array = sample->transaction;
1670                 array++;
1671         }
1672
1673         if (type & PERF_SAMPLE_REGS_INTR) {
1674                 if (sample->intr_regs.abi) {
1675                         *array++ = sample->intr_regs.abi;
1676                         sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1677                         memcpy(array, sample->intr_regs.regs, sz);
1678                         array = (void *)array + sz;
1679                 } else {
1680                         *array++ = 0;
1681                 }
1682         }
1683
1684         if (type & PERF_SAMPLE_PHYS_ADDR) {
1685                 *array = sample->phys_addr;
1686                 array++;
1687         }
1688
1689         if (type & PERF_SAMPLE_CGROUP) {
1690                 *array = sample->cgroup;
1691                 array++;
1692         }
1693
1694         if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1695                 *array = sample->data_page_size;
1696                 array++;
1697         }
1698
1699         if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1700                 *array = sample->code_page_size;
1701                 array++;
1702         }
1703
1704         if (type & PERF_SAMPLE_AUX) {
1705                 sz = sample->aux_sample.size;
1706                 *array++ = sz;
1707                 memcpy(array, sample->aux_sample.data, sz);
1708                 array = (void *)array + sz;
1709         }
1710
1711         return 0;
1712 }
1713
1714 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1715                                     struct evlist *evlist, struct machine *machine)
1716 {
1717         union perf_event *ev;
1718         struct evsel *evsel;
1719         size_t nr = 0, i = 0, sz, max_nr, n;
1720         int err;
1721
1722         pr_debug2("Synthesizing id index\n");
1723
1724         max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1725                  sizeof(struct id_index_entry);
1726
1727         evlist__for_each_entry(evlist, evsel)
1728                 nr += evsel->core.ids;
1729
1730         n = nr > max_nr ? max_nr : nr;
1731         sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1732         ev = zalloc(sz);
1733         if (!ev)
1734                 return -ENOMEM;
1735
1736         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1737         ev->id_index.header.size = sz;
1738         ev->id_index.nr = n;
1739
1740         evlist__for_each_entry(evlist, evsel) {
1741                 u32 j;
1742
1743                 for (j = 0; j < evsel->core.ids; j++) {
1744                         struct id_index_entry *e;
1745                         struct perf_sample_id *sid;
1746
1747                         if (i >= n) {
1748                                 err = process(tool, ev, NULL, machine);
1749                                 if (err)
1750                                         goto out_err;
1751                                 nr -= n;
1752                                 i = 0;
1753                         }
1754
1755                         e = &ev->id_index.entries[i++];
1756
1757                         e->id = evsel->core.id[j];
1758
1759                         sid = evlist__id2sid(evlist, e->id);
1760                         if (!sid) {
1761                                 free(ev);
1762                                 return -ENOENT;
1763                         }
1764
1765                         e->idx = sid->idx;
1766                         e->cpu = sid->cpu.cpu;
1767                         e->tid = sid->tid;
1768                 }
1769         }
1770
1771         sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1772         ev->id_index.header.size = sz;
1773         ev->id_index.nr = nr;
1774
1775         err = process(tool, ev, NULL, machine);
1776 out_err:
1777         free(ev);
1778
1779         return err;
1780 }
1781
1782 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1783                                   struct target *target, struct perf_thread_map *threads,
1784                                   perf_event__handler_t process, bool needs_mmap,
1785                                   bool data_mmap, unsigned int nr_threads_synthesize)
1786 {
1787         /*
1788          * When perf runs in non-root PID namespace, and the namespace's proc FS
1789          * is not mounted, nsinfo__is_in_root_namespace() returns false.
1790          * In this case, the proc FS is coming for the parent namespace, thus
1791          * perf tool will wrongly gather process info from its parent PID
1792          * namespace.
1793          *
1794          * To avoid the confusion that the perf tool runs in a child PID
1795          * namespace but it synthesizes thread info from its parent PID
1796          * namespace, returns failure with warning.
1797          */
1798         if (!nsinfo__is_in_root_namespace()) {
1799                 pr_err("Perf runs in non-root PID namespace but it tries to ");
1800                 pr_err("gather process info from its parent PID namespace.\n");
1801                 pr_err("Please mount the proc file system properly, e.g. ");
1802                 pr_err("add the option '--mount-proc' for unshare command.\n");
1803                 return -EPERM;
1804         }
1805
1806         if (target__has_task(target))
1807                 return perf_event__synthesize_thread_map(tool, threads, process, machine,
1808                                                          needs_mmap, data_mmap);
1809         else if (target__has_cpu(target))
1810                 return perf_event__synthesize_threads(tool, process, machine,
1811                                                       needs_mmap, data_mmap,
1812                                                       nr_threads_synthesize);
1813         /* command specified */
1814         return 0;
1815 }
1816
1817 int machine__synthesize_threads(struct machine *machine, struct target *target,
1818                                 struct perf_thread_map *threads, bool needs_mmap,
1819                                 bool data_mmap, unsigned int nr_threads_synthesize)
1820 {
1821         return __machine__synthesize_threads(machine, NULL, target, threads,
1822                                              perf_event__process, needs_mmap,
1823                                              data_mmap, nr_threads_synthesize);
1824 }
1825
1826 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1827 {
1828         struct perf_record_event_update *ev;
1829
1830         size += sizeof(*ev);
1831         size  = PERF_ALIGN(size, sizeof(u64));
1832
1833         ev = zalloc(size);
1834         if (ev) {
1835                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
1836                 ev->header.size = (u16)size;
1837                 ev->type        = type;
1838                 ev->id          = id;
1839         }
1840         return ev;
1841 }
1842
1843 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1844                                              perf_event__handler_t process)
1845 {
1846         size_t size = strlen(evsel->unit);
1847         struct perf_record_event_update *ev;
1848         int err;
1849
1850         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1851         if (ev == NULL)
1852                 return -ENOMEM;
1853
1854         strlcpy(ev->data, evsel->unit, size + 1);
1855         err = process(tool, (union perf_event *)ev, NULL, NULL);
1856         free(ev);
1857         return err;
1858 }
1859
1860 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1861                                               perf_event__handler_t process)
1862 {
1863         struct perf_record_event_update *ev;
1864         struct perf_record_event_update_scale *ev_data;
1865         int err;
1866
1867         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1868         if (ev == NULL)
1869                 return -ENOMEM;
1870
1871         ev_data = (struct perf_record_event_update_scale *)ev->data;
1872         ev_data->scale = evsel->scale;
1873         err = process(tool, (union perf_event *)ev, NULL, NULL);
1874         free(ev);
1875         return err;
1876 }
1877
1878 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1879                                              perf_event__handler_t process)
1880 {
1881         struct perf_record_event_update *ev;
1882         size_t len = strlen(evsel->name);
1883         int err;
1884
1885         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1886         if (ev == NULL)
1887                 return -ENOMEM;
1888
1889         strlcpy(ev->data, evsel->name, len + 1);
1890         err = process(tool, (union perf_event *)ev, NULL, NULL);
1891         free(ev);
1892         return err;
1893 }
1894
1895 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1896                                              perf_event__handler_t process)
1897 {
1898         size_t size = sizeof(struct perf_record_event_update);
1899         struct perf_record_event_update *ev;
1900         int max, err;
1901         u16 type;
1902
1903         if (!evsel->core.own_cpus)
1904                 return 0;
1905
1906         ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1907         if (!ev)
1908                 return -ENOMEM;
1909
1910         ev->header.type = PERF_RECORD_EVENT_UPDATE;
1911         ev->header.size = (u16)size;
1912         ev->type        = PERF_EVENT_UPDATE__CPUS;
1913         ev->id          = evsel->core.id[0];
1914
1915         cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1916                                  evsel->core.own_cpus, type, max);
1917
1918         err = process(tool, (union perf_event *)ev, NULL, NULL);
1919         free(ev);
1920         return err;
1921 }
1922
1923 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1924                                  perf_event__handler_t process)
1925 {
1926         struct evsel *evsel;
1927         int err = 0;
1928
1929         evlist__for_each_entry(evlist, evsel) {
1930                 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1931                                                   evsel->core.id, process);
1932                 if (err) {
1933                         pr_debug("failed to create perf header attribute\n");
1934                         return err;
1935                 }
1936         }
1937
1938         return err;
1939 }
1940
1941 static bool has_unit(struct evsel *evsel)
1942 {
1943         return evsel->unit && *evsel->unit;
1944 }
1945
1946 static bool has_scale(struct evsel *evsel)
1947 {
1948         return evsel->scale != 1;
1949 }
1950
1951 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1952                                       perf_event__handler_t process, bool is_pipe)
1953 {
1954         struct evsel *evsel;
1955         int err;
1956
1957         /*
1958          * Synthesize other events stuff not carried within
1959          * attr event - unit, scale, name
1960          */
1961         evlist__for_each_entry(evsel_list, evsel) {
1962                 if (!evsel->supported)
1963                         continue;
1964
1965                 /*
1966                  * Synthesize unit and scale only if it's defined.
1967                  */
1968                 if (has_unit(evsel)) {
1969                         err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1970                         if (err < 0) {
1971                                 pr_err("Couldn't synthesize evsel unit.\n");
1972                                 return err;
1973                         }
1974                 }
1975
1976                 if (has_scale(evsel)) {
1977                         err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1978                         if (err < 0) {
1979                                 pr_err("Couldn't synthesize evsel evsel.\n");
1980                                 return err;
1981                         }
1982                 }
1983
1984                 if (evsel->core.own_cpus) {
1985                         err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1986                         if (err < 0) {
1987                                 pr_err("Couldn't synthesize evsel cpus.\n");
1988                                 return err;
1989                         }
1990                 }
1991
1992                 /*
1993                  * Name is needed only for pipe output,
1994                  * perf.data carries event names.
1995                  */
1996                 if (is_pipe) {
1997                         err = perf_event__synthesize_event_update_name(tool, evsel, process);
1998                         if (err < 0) {
1999                                 pr_err("Couldn't synthesize evsel name.\n");
2000                                 return err;
2001                         }
2002                 }
2003         }
2004         return 0;
2005 }
2006
2007 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
2008                                 u32 ids, u64 *id, perf_event__handler_t process)
2009 {
2010         union perf_event *ev;
2011         size_t size;
2012         int err;
2013
2014         size = sizeof(struct perf_event_attr);
2015         size = PERF_ALIGN(size, sizeof(u64));
2016         size += sizeof(struct perf_event_header);
2017         size += ids * sizeof(u64);
2018
2019         ev = zalloc(size);
2020
2021         if (ev == NULL)
2022                 return -ENOMEM;
2023
2024         ev->attr.attr = *attr;
2025         memcpy(ev->attr.id, id, ids * sizeof(u64));
2026
2027         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2028         ev->attr.header.size = (u16)size;
2029
2030         if (ev->attr.header.size == size)
2031                 err = process(tool, ev, NULL, NULL);
2032         else
2033                 err = -E2BIG;
2034
2035         free(ev);
2036
2037         return err;
2038 }
2039
2040 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2041                                         perf_event__handler_t process)
2042 {
2043         union perf_event ev;
2044         struct tracing_data *tdata;
2045         ssize_t size = 0, aligned_size = 0, padding;
2046         struct feat_fd ff;
2047
2048         /*
2049          * We are going to store the size of the data followed
2050          * by the data contents. Since the fd descriptor is a pipe,
2051          * we cannot seek back to store the size of the data once
2052          * we know it. Instead we:
2053          *
2054          * - write the tracing data to the temp file
2055          * - get/write the data size to pipe
2056          * - write the tracing data from the temp file
2057          *   to the pipe
2058          */
2059         tdata = tracing_data_get(&evlist->core.entries, fd, true);
2060         if (!tdata)
2061                 return -1;
2062
2063         memset(&ev, 0, sizeof(ev));
2064
2065         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2066         size = tdata->size;
2067         aligned_size = PERF_ALIGN(size, sizeof(u64));
2068         padding = aligned_size - size;
2069         ev.tracing_data.header.size = sizeof(ev.tracing_data);
2070         ev.tracing_data.size = aligned_size;
2071
2072         process(tool, &ev, NULL, NULL);
2073
2074         /*
2075          * The put function will copy all the tracing data
2076          * stored in temp file to the pipe.
2077          */
2078         tracing_data_put(tdata);
2079
2080         ff = (struct feat_fd){ .fd = fd };
2081         if (write_padded(&ff, NULL, 0, padding))
2082                 return -1;
2083
2084         return aligned_size;
2085 }
2086
2087 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2088                                     perf_event__handler_t process, struct machine *machine)
2089 {
2090         union perf_event ev;
2091         size_t len;
2092
2093         if (!pos->hit)
2094                 return 0;
2095
2096         memset(&ev, 0, sizeof(ev));
2097
2098         len = pos->long_name_len + 1;
2099         len = PERF_ALIGN(len, NAME_ALIGN);
2100         memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
2101         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2102         ev.build_id.header.misc = misc;
2103         ev.build_id.pid = machine->pid;
2104         ev.build_id.header.size = sizeof(ev.build_id) + len;
2105         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2106
2107         return process(tool, &ev, NULL, machine);
2108 }
2109
2110 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2111                                        struct evlist *evlist, perf_event__handler_t process, bool attrs)
2112 {
2113         int err;
2114
2115         if (attrs) {
2116                 err = perf_event__synthesize_attrs(tool, evlist, process);
2117                 if (err < 0) {
2118                         pr_err("Couldn't synthesize attrs.\n");
2119                         return err;
2120                 }
2121         }
2122
2123         err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2124         err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2125         if (err < 0) {
2126                 pr_err("Couldn't synthesize thread map.\n");
2127                 return err;
2128         }
2129
2130         err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
2131         if (err < 0) {
2132                 pr_err("Couldn't synthesize thread map.\n");
2133                 return err;
2134         }
2135
2136         err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2137         if (err < 0) {
2138                 pr_err("Couldn't synthesize config.\n");
2139                 return err;
2140         }
2141
2142         return 0;
2143 }
2144
2145 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2146
2147 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2148                                     struct evlist *evlist, perf_event__handler_t process)
2149 {
2150         struct perf_header *header = &session->header;
2151         struct perf_record_header_feature *fe;
2152         struct feat_fd ff;
2153         size_t sz, sz_hdr;
2154         int feat, ret;
2155
2156         sz_hdr = sizeof(fe->header);
2157         sz = sizeof(union perf_event);
2158         /* get a nice alignment */
2159         sz = PERF_ALIGN(sz, page_size);
2160
2161         memset(&ff, 0, sizeof(ff));
2162
2163         ff.buf = malloc(sz);
2164         if (!ff.buf)
2165                 return -ENOMEM;
2166
2167         ff.size = sz - sz_hdr;
2168         ff.ph = &session->header;
2169
2170         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2171                 if (!feat_ops[feat].synthesize) {
2172                         pr_debug("No record header feature for header :%d\n", feat);
2173                         continue;
2174                 }
2175
2176                 ff.offset = sizeof(*fe);
2177
2178                 ret = feat_ops[feat].write(&ff, evlist);
2179                 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2180                         pr_debug("Error writing feature\n");
2181                         continue;
2182                 }
2183                 /* ff.buf may have changed due to realloc in do_write() */
2184                 fe = ff.buf;
2185                 memset(fe, 0, sizeof(*fe));
2186
2187                 fe->feat_id = feat;
2188                 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2189                 fe->header.size = ff.offset;
2190
2191                 ret = process(tool, ff.buf, NULL, NULL);
2192                 if (ret) {
2193                         free(ff.buf);
2194                         return ret;
2195                 }
2196         }
2197
2198         /* Send HEADER_LAST_FEATURE mark. */
2199         fe = ff.buf;
2200         fe->feat_id     = HEADER_LAST_FEATURE;
2201         fe->header.type = PERF_RECORD_HEADER_FEATURE;
2202         fe->header.size = sizeof(*fe);
2203
2204         ret = process(tool, ff.buf, NULL, NULL);
2205
2206         free(ff.buf);
2207         return ret;
2208 }
2209
2210 int perf_event__synthesize_for_pipe(struct perf_tool *tool,
2211                                     struct perf_session *session,
2212                                     struct perf_data *data,
2213                                     perf_event__handler_t process)
2214 {
2215         int err;
2216         int ret = 0;
2217         struct evlist *evlist = session->evlist;
2218
2219         /*
2220          * We need to synthesize events first, because some
2221          * features works on top of them (on report side).
2222          */
2223         err = perf_event__synthesize_attrs(tool, evlist, process);
2224         if (err < 0) {
2225                 pr_err("Couldn't synthesize attrs.\n");
2226                 return err;
2227         }
2228         ret += err;
2229
2230         err = perf_event__synthesize_features(tool, session, evlist, process);
2231         if (err < 0) {
2232                 pr_err("Couldn't synthesize features.\n");
2233                 return err;
2234         }
2235         ret += err;
2236
2237         if (have_tracepoints(&evlist->core.entries)) {
2238                 int fd = perf_data__fd(data);
2239
2240                 /*
2241                  * FIXME err <= 0 here actually means that
2242                  * there were no tracepoints so its not really
2243                  * an error, just that we don't need to
2244                  * synthesize anything.  We really have to
2245                  * return this more properly and also
2246                  * propagate errors that now are calling die()
2247                  */
2248                 err = perf_event__synthesize_tracing_data(tool, fd, evlist,
2249                                                           process);
2250                 if (err <= 0) {
2251                         pr_err("Couldn't record tracing data.\n");
2252                         return err;
2253                 }
2254                 ret += err;
2255         }
2256
2257         return ret;
2258 }
2259
2260 int parse_synth_opt(char *synth)
2261 {
2262         char *p, *q;
2263         int ret = 0;
2264
2265         if (synth == NULL)
2266                 return -1;
2267
2268         for (q = synth; (p = strsep(&q, ",")); p = q) {
2269                 if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
2270                         return 0;
2271
2272                 if (!strcasecmp(p, "all"))
2273                         return PERF_SYNTH_ALL;
2274
2275                 if (!strcasecmp(p, "task"))
2276                         ret |= PERF_SYNTH_TASK;
2277                 else if (!strcasecmp(p, "mmap"))
2278                         ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
2279                 else if (!strcasecmp(p, "cgroup"))
2280                         ret |= PERF_SYNTH_CGROUP;
2281                 else
2282                         return -1;
2283         }
2284
2285         return ret;
2286 }