Merge tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt
[linux-2.6-microblaze.git] / tools / perf / util / synthetic-events.c
1 // SPDX-License-Identifier: GPL-2.0-only 
2
3 #include "util/debug.h"
4 #include "util/dso.h"
5 #include "util/event.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
8 #include "util/map.h"
9 #include "util/map_symbol.h"
10 #include "util/branch.h"
11 #include "util/memswap.h"
12 #include "util/namespaces.h"
13 #include "util/session.h"
14 #include "util/stat.h"
15 #include "util/symbol.h"
16 #include "util/synthetic-events.h"
17 #include "util/target.h"
18 #include "util/time-utils.h"
19 #include "util/cgroup.h"
20 #include <linux/bitops.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/zalloc.h>
24 #include <linux/perf_event.h>
25 #include <asm/bug.h>
26 #include <perf/evsel.h>
27 #include <internal/cpumap.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
33 #include <dirent.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
40 #include <api/io.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <unistd.h>
45
46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47
48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49
50 int perf_tool__process_synth_event(struct perf_tool *tool,
51                                    union perf_event *event,
52                                    struct machine *machine,
53                                    perf_event__handler_t process)
54 {
55         struct perf_sample synth_sample = {
56                 .pid       = -1,
57                 .tid       = -1,
58                 .time      = -1,
59                 .stream_id = -1,
60                 .cpu       = -1,
61                 .period    = 1,
62                 .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63         };
64
65         return process(tool, event, &synth_sample, machine);
66 };
67
68 /*
69  * Assumes that the first 4095 bytes of /proc/pid/stat contains
70  * the comm, tgid and ppid.
71  */
72 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
73                                     pid_t *tgid, pid_t *ppid)
74 {
75         char bf[4096];
76         int fd;
77         size_t size = 0;
78         ssize_t n;
79         char *name, *tgids, *ppids;
80
81         *tgid = -1;
82         *ppid = -1;
83
84         snprintf(bf, sizeof(bf), "/proc/%d/status", pid);
85
86         fd = open(bf, O_RDONLY);
87         if (fd < 0) {
88                 pr_debug("couldn't open %s\n", bf);
89                 return -1;
90         }
91
92         n = read(fd, bf, sizeof(bf) - 1);
93         close(fd);
94         if (n <= 0) {
95                 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
96                            pid);
97                 return -1;
98         }
99         bf[n] = '\0';
100
101         name = strstr(bf, "Name:");
102         tgids = strstr(bf, "Tgid:");
103         ppids = strstr(bf, "PPid:");
104
105         if (name) {
106                 char *nl;
107
108                 name = skip_spaces(name + 5);  /* strlen("Name:") */
109                 nl = strchr(name, '\n');
110                 if (nl)
111                         *nl = '\0';
112
113                 size = strlen(name);
114                 if (size >= len)
115                         size = len - 1;
116                 memcpy(comm, name, size);
117                 comm[size] = '\0';
118         } else {
119                 pr_debug("Name: string not found for pid %d\n", pid);
120         }
121
122         if (tgids) {
123                 tgids += 5;  /* strlen("Tgid:") */
124                 *tgid = atoi(tgids);
125         } else {
126                 pr_debug("Tgid: string not found for pid %d\n", pid);
127         }
128
129         if (ppids) {
130                 ppids += 5;  /* strlen("PPid:") */
131                 *ppid = atoi(ppids);
132         } else {
133                 pr_debug("PPid: string not found for pid %d\n", pid);
134         }
135
136         return 0;
137 }
138
139 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
140                                     struct machine *machine,
141                                     pid_t *tgid, pid_t *ppid)
142 {
143         size_t size;
144
145         *ppid = -1;
146
147         memset(&event->comm, 0, sizeof(event->comm));
148
149         if (machine__is_host(machine)) {
150                 if (perf_event__get_comm_ids(pid, event->comm.comm,
151                                              sizeof(event->comm.comm),
152                                              tgid, ppid) != 0) {
153                         return -1;
154                 }
155         } else {
156                 *tgid = machine->pid;
157         }
158
159         if (*tgid < 0)
160                 return -1;
161
162         event->comm.pid = *tgid;
163         event->comm.header.type = PERF_RECORD_COMM;
164
165         size = strlen(event->comm.comm) + 1;
166         size = PERF_ALIGN(size, sizeof(u64));
167         memset(event->comm.comm + size, 0, machine->id_hdr_size);
168         event->comm.header.size = (sizeof(event->comm) -
169                                 (sizeof(event->comm.comm) - size) +
170                                 machine->id_hdr_size);
171         event->comm.tid = pid;
172
173         return 0;
174 }
175
176 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
177                                          union perf_event *event, pid_t pid,
178                                          perf_event__handler_t process,
179                                          struct machine *machine)
180 {
181         pid_t tgid, ppid;
182
183         if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
184                 return -1;
185
186         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
187                 return -1;
188
189         return tgid;
190 }
191
192 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
193                                          struct perf_ns_link_info *ns_link_info)
194 {
195         struct stat64 st;
196         char proc_ns[128];
197
198         sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
199         if (stat64(proc_ns, &st) == 0) {
200                 ns_link_info->dev = st.st_dev;
201                 ns_link_info->ino = st.st_ino;
202         }
203 }
204
205 int perf_event__synthesize_namespaces(struct perf_tool *tool,
206                                       union perf_event *event,
207                                       pid_t pid, pid_t tgid,
208                                       perf_event__handler_t process,
209                                       struct machine *machine)
210 {
211         u32 idx;
212         struct perf_ns_link_info *ns_link_info;
213
214         if (!tool || !tool->namespace_events)
215                 return 0;
216
217         memset(&event->namespaces, 0, (sizeof(event->namespaces) +
218                (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
219                machine->id_hdr_size));
220
221         event->namespaces.pid = tgid;
222         event->namespaces.tid = pid;
223
224         event->namespaces.nr_namespaces = NR_NAMESPACES;
225
226         ns_link_info = event->namespaces.link_info;
227
228         for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
229                 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
230                                              &ns_link_info[idx]);
231
232         event->namespaces.header.type = PERF_RECORD_NAMESPACES;
233
234         event->namespaces.header.size = (sizeof(event->namespaces) +
235                         (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
236                         machine->id_hdr_size);
237
238         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
239                 return -1;
240
241         return 0;
242 }
243
244 static int perf_event__synthesize_fork(struct perf_tool *tool,
245                                        union perf_event *event,
246                                        pid_t pid, pid_t tgid, pid_t ppid,
247                                        perf_event__handler_t process,
248                                        struct machine *machine)
249 {
250         memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
251
252         /*
253          * for main thread set parent to ppid from status file. For other
254          * threads set parent pid to main thread. ie., assume main thread
255          * spawns all threads in a process
256         */
257         if (tgid == pid) {
258                 event->fork.ppid = ppid;
259                 event->fork.ptid = ppid;
260         } else {
261                 event->fork.ppid = tgid;
262                 event->fork.ptid = tgid;
263         }
264         event->fork.pid  = tgid;
265         event->fork.tid  = pid;
266         event->fork.header.type = PERF_RECORD_FORK;
267         event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
268
269         event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
270
271         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
272                 return -1;
273
274         return 0;
275 }
276
277 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
278                                 u32 *prot, u32 *flags, __u64 *offset,
279                                 u32 *maj, u32 *min,
280                                 __u64 *inode,
281                                 ssize_t pathname_size, char *pathname)
282 {
283         __u64 temp;
284         int ch;
285         char *start_pathname = pathname;
286
287         if (io__get_hex(io, start) != '-')
288                 return false;
289         if (io__get_hex(io, end) != ' ')
290                 return false;
291
292         /* map protection and flags bits */
293         *prot = 0;
294         ch = io__get_char(io);
295         if (ch == 'r')
296                 *prot |= PROT_READ;
297         else if (ch != '-')
298                 return false;
299         ch = io__get_char(io);
300         if (ch == 'w')
301                 *prot |= PROT_WRITE;
302         else if (ch != '-')
303                 return false;
304         ch = io__get_char(io);
305         if (ch == 'x')
306                 *prot |= PROT_EXEC;
307         else if (ch != '-')
308                 return false;
309         ch = io__get_char(io);
310         if (ch == 's')
311                 *flags = MAP_SHARED;
312         else if (ch == 'p')
313                 *flags = MAP_PRIVATE;
314         else
315                 return false;
316         if (io__get_char(io) != ' ')
317                 return false;
318
319         if (io__get_hex(io, offset) != ' ')
320                 return false;
321
322         if (io__get_hex(io, &temp) != ':')
323                 return false;
324         *maj = temp;
325         if (io__get_hex(io, &temp) != ' ')
326                 return false;
327         *min = temp;
328
329         ch = io__get_dec(io, inode);
330         if (ch != ' ') {
331                 *pathname = '\0';
332                 return ch == '\n';
333         }
334         do {
335                 ch = io__get_char(io);
336         } while (ch == ' ');
337         while (true) {
338                 if (ch < 0)
339                         return false;
340                 if (ch == '\0' || ch == '\n' ||
341                     (pathname + 1 - start_pathname) >= pathname_size) {
342                         *pathname = '\0';
343                         return true;
344                 }
345                 *pathname++ = ch;
346                 ch = io__get_char(io);
347         }
348 }
349
350 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
351                                        union perf_event *event,
352                                        pid_t pid, pid_t tgid,
353                                        perf_event__handler_t process,
354                                        struct machine *machine,
355                                        bool mmap_data)
356 {
357         unsigned long long t;
358         char bf[BUFSIZ];
359         struct io io;
360         bool truncation = false;
361         unsigned long long timeout = proc_map_timeout * 1000000ULL;
362         int rc = 0;
363         const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
364         int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
365
366         if (machine__is_default_guest(machine))
367                 return 0;
368
369         snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
370                 machine->root_dir, pid, pid);
371
372         io.fd = open(bf, O_RDONLY, 0);
373         if (io.fd < 0) {
374                 /*
375                  * We raced with a task exiting - just return:
376                  */
377                 pr_debug("couldn't open %s\n", bf);
378                 return -1;
379         }
380         io__init(&io, io.fd, bf, sizeof(bf));
381
382         event->header.type = PERF_RECORD_MMAP2;
383         t = rdclock();
384
385         while (!io.eof) {
386                 static const char anonstr[] = "//anon";
387                 size_t size;
388
389                 /* ensure null termination since stack will be reused. */
390                 event->mmap2.filename[0] = '\0';
391
392                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
393                 if (!read_proc_maps_line(&io,
394                                         &event->mmap2.start,
395                                         &event->mmap2.len,
396                                         &event->mmap2.prot,
397                                         &event->mmap2.flags,
398                                         &event->mmap2.pgoff,
399                                         &event->mmap2.maj,
400                                         &event->mmap2.min,
401                                         &event->mmap2.ino,
402                                         sizeof(event->mmap2.filename),
403                                         event->mmap2.filename))
404                         continue;
405
406                 if ((rdclock() - t) > timeout) {
407                         pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
408                                    "You may want to increase "
409                                    "the time limit by --proc-map-timeout\n",
410                                    machine->root_dir, pid, pid);
411                         truncation = true;
412                         goto out;
413                 }
414
415                 event->mmap2.ino_generation = 0;
416
417                 /*
418                  * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
419                  */
420                 if (machine__is_host(machine))
421                         event->header.misc = PERF_RECORD_MISC_USER;
422                 else
423                         event->header.misc = PERF_RECORD_MISC_GUEST_USER;
424
425                 if ((event->mmap2.prot & PROT_EXEC) == 0) {
426                         if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
427                                 continue;
428
429                         event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
430                 }
431
432 out:
433                 if (truncation)
434                         event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
435
436                 if (!strcmp(event->mmap2.filename, ""))
437                         strcpy(event->mmap2.filename, anonstr);
438
439                 if (hugetlbfs_mnt_len &&
440                     !strncmp(event->mmap2.filename, hugetlbfs_mnt,
441                              hugetlbfs_mnt_len)) {
442                         strcpy(event->mmap2.filename, anonstr);
443                         event->mmap2.flags |= MAP_HUGETLB;
444                 }
445
446                 size = strlen(event->mmap2.filename) + 1;
447                 size = PERF_ALIGN(size, sizeof(u64));
448                 event->mmap2.len -= event->mmap.start;
449                 event->mmap2.header.size = (sizeof(event->mmap2) -
450                                         (sizeof(event->mmap2.filename) - size));
451                 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
452                 event->mmap2.header.size += machine->id_hdr_size;
453                 event->mmap2.pid = tgid;
454                 event->mmap2.tid = pid;
455
456                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
457                         rc = -1;
458                         break;
459                 }
460
461                 if (truncation)
462                         break;
463         }
464
465         close(io.fd);
466         return rc;
467 }
468
469 #ifdef HAVE_FILE_HANDLE
470 static int perf_event__synthesize_cgroup(struct perf_tool *tool,
471                                          union perf_event *event,
472                                          char *path, size_t mount_len,
473                                          perf_event__handler_t process,
474                                          struct machine *machine)
475 {
476         size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
477         size_t path_len = strlen(path) - mount_len + 1;
478         struct {
479                 struct file_handle fh;
480                 uint64_t cgroup_id;
481         } handle;
482         int mount_id;
483
484         while (path_len % sizeof(u64))
485                 path[mount_len + path_len++] = '\0';
486
487         memset(&event->cgroup, 0, event_size);
488
489         event->cgroup.header.type = PERF_RECORD_CGROUP;
490         event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
491
492         handle.fh.handle_bytes = sizeof(handle.cgroup_id);
493         if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
494                 pr_debug("stat failed: %s\n", path);
495                 return -1;
496         }
497
498         event->cgroup.id = handle.cgroup_id;
499         strncpy(event->cgroup.path, path + mount_len, path_len);
500         memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
501
502         if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
503                 pr_debug("process synth event failed\n");
504                 return -1;
505         }
506
507         return 0;
508 }
509
510 static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
511                                         union perf_event *event,
512                                         char *path, size_t mount_len,
513                                         perf_event__handler_t process,
514                                         struct machine *machine)
515 {
516         size_t pos = strlen(path);
517         DIR *d;
518         struct dirent *dent;
519         int ret = 0;
520
521         if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
522                                           process, machine) < 0)
523                 return -1;
524
525         d = opendir(path);
526         if (d == NULL) {
527                 pr_debug("failed to open directory: %s\n", path);
528                 return -1;
529         }
530
531         while ((dent = readdir(d)) != NULL) {
532                 if (dent->d_type != DT_DIR)
533                         continue;
534                 if (!strcmp(dent->d_name, ".") ||
535                     !strcmp(dent->d_name, ".."))
536                         continue;
537
538                 /* any sane path should be less than PATH_MAX */
539                 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
540                         continue;
541
542                 if (path[pos - 1] != '/')
543                         strcat(path, "/");
544                 strcat(path, dent->d_name);
545
546                 ret = perf_event__walk_cgroup_tree(tool, event, path,
547                                                    mount_len, process, machine);
548                 if (ret < 0)
549                         break;
550
551                 path[pos] = '\0';
552         }
553
554         closedir(d);
555         return ret;
556 }
557
558 int perf_event__synthesize_cgroups(struct perf_tool *tool,
559                                    perf_event__handler_t process,
560                                    struct machine *machine)
561 {
562         union perf_event event;
563         char cgrp_root[PATH_MAX];
564         size_t mount_len;  /* length of mount point in the path */
565
566         if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
567                 pr_debug("cannot find cgroup mount point\n");
568                 return -1;
569         }
570
571         mount_len = strlen(cgrp_root);
572         /* make sure the path starts with a slash (after mount point) */
573         strcat(cgrp_root, "/");
574
575         if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
576                                          process, machine) < 0)
577                 return -1;
578
579         return 0;
580 }
581 #else
582 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
583                                    perf_event__handler_t process __maybe_unused,
584                                    struct machine *machine __maybe_unused)
585 {
586         return -1;
587 }
588 #endif
589
590 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
591                                    struct machine *machine)
592 {
593         int rc = 0;
594         struct map *pos;
595         struct maps *maps = machine__kernel_maps(machine);
596         union perf_event *event = zalloc((sizeof(event->mmap) +
597                                           machine->id_hdr_size));
598         if (event == NULL) {
599                 pr_debug("Not enough memory synthesizing mmap event "
600                          "for kernel modules\n");
601                 return -1;
602         }
603
604         event->header.type = PERF_RECORD_MMAP;
605
606         /*
607          * kernel uses 0 for user space maps, see kernel/perf_event.c
608          * __perf_event_mmap
609          */
610         if (machine__is_host(machine))
611                 event->header.misc = PERF_RECORD_MISC_KERNEL;
612         else
613                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
614
615         maps__for_each_entry(maps, pos) {
616                 size_t size;
617
618                 if (!__map__is_kmodule(pos))
619                         continue;
620
621                 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
622                 event->mmap.header.type = PERF_RECORD_MMAP;
623                 event->mmap.header.size = (sizeof(event->mmap) -
624                                         (sizeof(event->mmap.filename) - size));
625                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
626                 event->mmap.header.size += machine->id_hdr_size;
627                 event->mmap.start = pos->start;
628                 event->mmap.len   = pos->end - pos->start;
629                 event->mmap.pid   = machine->pid;
630
631                 memcpy(event->mmap.filename, pos->dso->long_name,
632                        pos->dso->long_name_len + 1);
633                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
634                         rc = -1;
635                         break;
636                 }
637         }
638
639         free(event);
640         return rc;
641 }
642
643 static int __event__synthesize_thread(union perf_event *comm_event,
644                                       union perf_event *mmap_event,
645                                       union perf_event *fork_event,
646                                       union perf_event *namespaces_event,
647                                       pid_t pid, int full, perf_event__handler_t process,
648                                       struct perf_tool *tool, struct machine *machine, bool mmap_data)
649 {
650         char filename[PATH_MAX];
651         DIR *tasks;
652         struct dirent *dirent;
653         pid_t tgid, ppid;
654         int rc = 0;
655
656         /* special case: only send one comm event using passed in pid */
657         if (!full) {
658                 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
659                                                    process, machine);
660
661                 if (tgid == -1)
662                         return -1;
663
664                 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
665                                                       tgid, process, machine) < 0)
666                         return -1;
667
668                 /*
669                  * send mmap only for thread group leader
670                  * see thread__init_maps()
671                  */
672                 if (pid == tgid &&
673                     perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
674                                                        process, machine, mmap_data))
675                         return -1;
676
677                 return 0;
678         }
679
680         if (machine__is_default_guest(machine))
681                 return 0;
682
683         snprintf(filename, sizeof(filename), "%s/proc/%d/task",
684                  machine->root_dir, pid);
685
686         tasks = opendir(filename);
687         if (tasks == NULL) {
688                 pr_debug("couldn't open %s\n", filename);
689                 return 0;
690         }
691
692         while ((dirent = readdir(tasks)) != NULL) {
693                 char *end;
694                 pid_t _pid;
695
696                 _pid = strtol(dirent->d_name, &end, 10);
697                 if (*end)
698                         continue;
699
700                 rc = -1;
701                 if (perf_event__prepare_comm(comm_event, _pid, machine,
702                                              &tgid, &ppid) != 0)
703                         break;
704
705                 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
706                                                 ppid, process, machine) < 0)
707                         break;
708
709                 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
710                                                       tgid, process, machine) < 0)
711                         break;
712
713                 /*
714                  * Send the prepared comm event
715                  */
716                 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
717                         break;
718
719                 rc = 0;
720                 if (_pid == pid) {
721                         /* process the parent's maps too */
722                         rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
723                                                 process, machine, mmap_data);
724                         if (rc)
725                                 break;
726                 }
727         }
728
729         closedir(tasks);
730         return rc;
731 }
732
733 int perf_event__synthesize_thread_map(struct perf_tool *tool,
734                                       struct perf_thread_map *threads,
735                                       perf_event__handler_t process,
736                                       struct machine *machine,
737                                       bool mmap_data)
738 {
739         union perf_event *comm_event, *mmap_event, *fork_event;
740         union perf_event *namespaces_event;
741         int err = -1, thread, j;
742
743         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
744         if (comm_event == NULL)
745                 goto out;
746
747         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
748         if (mmap_event == NULL)
749                 goto out_free_comm;
750
751         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
752         if (fork_event == NULL)
753                 goto out_free_mmap;
754
755         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
756                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
757                                   machine->id_hdr_size);
758         if (namespaces_event == NULL)
759                 goto out_free_fork;
760
761         err = 0;
762         for (thread = 0; thread < threads->nr; ++thread) {
763                 if (__event__synthesize_thread(comm_event, mmap_event,
764                                                fork_event, namespaces_event,
765                                                perf_thread_map__pid(threads, thread), 0,
766                                                process, tool, machine,
767                                                mmap_data)) {
768                         err = -1;
769                         break;
770                 }
771
772                 /*
773                  * comm.pid is set to thread group id by
774                  * perf_event__synthesize_comm
775                  */
776                 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
777                         bool need_leader = true;
778
779                         /* is thread group leader in thread_map? */
780                         for (j = 0; j < threads->nr; ++j) {
781                                 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
782                                         need_leader = false;
783                                         break;
784                                 }
785                         }
786
787                         /* if not, generate events for it */
788                         if (need_leader &&
789                             __event__synthesize_thread(comm_event, mmap_event,
790                                                        fork_event, namespaces_event,
791                                                        comm_event->comm.pid, 0,
792                                                        process, tool, machine,
793                                                        mmap_data)) {
794                                 err = -1;
795                                 break;
796                         }
797                 }
798         }
799         free(namespaces_event);
800 out_free_fork:
801         free(fork_event);
802 out_free_mmap:
803         free(mmap_event);
804 out_free_comm:
805         free(comm_event);
806 out:
807         return err;
808 }
809
810 static int __perf_event__synthesize_threads(struct perf_tool *tool,
811                                             perf_event__handler_t process,
812                                             struct machine *machine,
813                                             bool mmap_data,
814                                             struct dirent **dirent,
815                                             int start,
816                                             int num)
817 {
818         union perf_event *comm_event, *mmap_event, *fork_event;
819         union perf_event *namespaces_event;
820         int err = -1;
821         char *end;
822         pid_t pid;
823         int i;
824
825         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
826         if (comm_event == NULL)
827                 goto out;
828
829         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
830         if (mmap_event == NULL)
831                 goto out_free_comm;
832
833         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
834         if (fork_event == NULL)
835                 goto out_free_mmap;
836
837         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
838                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
839                                   machine->id_hdr_size);
840         if (namespaces_event == NULL)
841                 goto out_free_fork;
842
843         for (i = start; i < start + num; i++) {
844                 if (!isdigit(dirent[i]->d_name[0]))
845                         continue;
846
847                 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
848                 /* only interested in proper numerical dirents */
849                 if (*end)
850                         continue;
851                 /*
852                  * We may race with exiting thread, so don't stop just because
853                  * one thread couldn't be synthesized.
854                  */
855                 __event__synthesize_thread(comm_event, mmap_event, fork_event,
856                                            namespaces_event, pid, 1, process,
857                                            tool, machine, mmap_data);
858         }
859         err = 0;
860
861         free(namespaces_event);
862 out_free_fork:
863         free(fork_event);
864 out_free_mmap:
865         free(mmap_event);
866 out_free_comm:
867         free(comm_event);
868 out:
869         return err;
870 }
871
872 struct synthesize_threads_arg {
873         struct perf_tool *tool;
874         perf_event__handler_t process;
875         struct machine *machine;
876         bool mmap_data;
877         struct dirent **dirent;
878         int num;
879         int start;
880 };
881
882 static void *synthesize_threads_worker(void *arg)
883 {
884         struct synthesize_threads_arg *args = arg;
885
886         __perf_event__synthesize_threads(args->tool, args->process,
887                                          args->machine, args->mmap_data,
888                                          args->dirent,
889                                          args->start, args->num);
890         return NULL;
891 }
892
893 int perf_event__synthesize_threads(struct perf_tool *tool,
894                                    perf_event__handler_t process,
895                                    struct machine *machine,
896                                    bool mmap_data,
897                                    unsigned int nr_threads_synthesize)
898 {
899         struct synthesize_threads_arg *args = NULL;
900         pthread_t *synthesize_threads = NULL;
901         char proc_path[PATH_MAX];
902         struct dirent **dirent;
903         int num_per_thread;
904         int m, n, i, j;
905         int thread_nr;
906         int base = 0;
907         int err = -1;
908
909
910         if (machine__is_default_guest(machine))
911                 return 0;
912
913         snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
914         n = scandir(proc_path, &dirent, 0, alphasort);
915         if (n < 0)
916                 return err;
917
918         if (nr_threads_synthesize == UINT_MAX)
919                 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
920         else
921                 thread_nr = nr_threads_synthesize;
922
923         if (thread_nr <= 1) {
924                 err = __perf_event__synthesize_threads(tool, process,
925                                                        machine, mmap_data,
926                                                        dirent, base, n);
927                 goto free_dirent;
928         }
929         if (thread_nr > n)
930                 thread_nr = n;
931
932         synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
933         if (synthesize_threads == NULL)
934                 goto free_dirent;
935
936         args = calloc(sizeof(*args), thread_nr);
937         if (args == NULL)
938                 goto free_threads;
939
940         num_per_thread = n / thread_nr;
941         m = n % thread_nr;
942         for (i = 0; i < thread_nr; i++) {
943                 args[i].tool = tool;
944                 args[i].process = process;
945                 args[i].machine = machine;
946                 args[i].mmap_data = mmap_data;
947                 args[i].dirent = dirent;
948         }
949         for (i = 0; i < m; i++) {
950                 args[i].num = num_per_thread + 1;
951                 args[i].start = i * args[i].num;
952         }
953         if (i != 0)
954                 base = args[i-1].start + args[i-1].num;
955         for (j = i; j < thread_nr; j++) {
956                 args[j].num = num_per_thread;
957                 args[j].start = base + (j - i) * args[i].num;
958         }
959
960         for (i = 0; i < thread_nr; i++) {
961                 if (pthread_create(&synthesize_threads[i], NULL,
962                                    synthesize_threads_worker, &args[i]))
963                         goto out_join;
964         }
965         err = 0;
966 out_join:
967         for (i = 0; i < thread_nr; i++)
968                 pthread_join(synthesize_threads[i], NULL);
969         free(args);
970 free_threads:
971         free(synthesize_threads);
972 free_dirent:
973         for (i = 0; i < n; i++)
974                 zfree(&dirent[i]);
975         free(dirent);
976
977         return err;
978 }
979
980 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
981                                               perf_event__handler_t process __maybe_unused,
982                                               struct machine *machine __maybe_unused)
983 {
984         return 0;
985 }
986
987 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
988                                                 perf_event__handler_t process,
989                                                 struct machine *machine)
990 {
991         size_t size;
992         struct map *map = machine__kernel_map(machine);
993         struct kmap *kmap;
994         int err;
995         union perf_event *event;
996
997         if (map == NULL)
998                 return -1;
999
1000         kmap = map__kmap(map);
1001         if (!kmap->ref_reloc_sym)
1002                 return -1;
1003
1004         /*
1005          * We should get this from /sys/kernel/sections/.text, but till that is
1006          * available use this, and after it is use this as a fallback for older
1007          * kernels.
1008          */
1009         event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
1010         if (event == NULL) {
1011                 pr_debug("Not enough memory synthesizing mmap event "
1012                          "for kernel modules\n");
1013                 return -1;
1014         }
1015
1016         if (machine__is_host(machine)) {
1017                 /*
1018                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
1019                  * see kernel/perf_event.c __perf_event_mmap
1020                  */
1021                 event->header.misc = PERF_RECORD_MISC_KERNEL;
1022         } else {
1023                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1024         }
1025
1026         size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1027                         "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1028         size = PERF_ALIGN(size, sizeof(u64));
1029         event->mmap.header.type = PERF_RECORD_MMAP;
1030         event->mmap.header.size = (sizeof(event->mmap) -
1031                         (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1032         event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1033         event->mmap.start = map->start;
1034         event->mmap.len   = map->end - event->mmap.start;
1035         event->mmap.pid   = machine->pid;
1036
1037         err = perf_tool__process_synth_event(tool, event, machine, process);
1038         free(event);
1039
1040         return err;
1041 }
1042
1043 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1044                                        perf_event__handler_t process,
1045                                        struct machine *machine)
1046 {
1047         int err;
1048
1049         err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1050         if (err < 0)
1051                 return err;
1052
1053         return perf_event__synthesize_extra_kmaps(tool, process, machine);
1054 }
1055
1056 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1057                                       struct perf_thread_map *threads,
1058                                       perf_event__handler_t process,
1059                                       struct machine *machine)
1060 {
1061         union perf_event *event;
1062         int i, err, size;
1063
1064         size  = sizeof(event->thread_map);
1065         size += threads->nr * sizeof(event->thread_map.entries[0]);
1066
1067         event = zalloc(size);
1068         if (!event)
1069                 return -ENOMEM;
1070
1071         event->header.type = PERF_RECORD_THREAD_MAP;
1072         event->header.size = size;
1073         event->thread_map.nr = threads->nr;
1074
1075         for (i = 0; i < threads->nr; i++) {
1076                 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1077                 char *comm = perf_thread_map__comm(threads, i);
1078
1079                 if (!comm)
1080                         comm = (char *) "";
1081
1082                 entry->pid = perf_thread_map__pid(threads, i);
1083                 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1084         }
1085
1086         err = process(tool, event, NULL, machine);
1087
1088         free(event);
1089         return err;
1090 }
1091
1092 static void synthesize_cpus(struct cpu_map_entries *cpus,
1093                             struct perf_cpu_map *map)
1094 {
1095         int i;
1096
1097         cpus->nr = map->nr;
1098
1099         for (i = 0; i < map->nr; i++)
1100                 cpus->cpu[i] = map->map[i];
1101 }
1102
1103 static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1104                             struct perf_cpu_map *map, int max)
1105 {
1106         int i;
1107
1108         mask->nr = BITS_TO_LONGS(max);
1109         mask->long_size = sizeof(long);
1110
1111         for (i = 0; i < map->nr; i++)
1112                 set_bit(map->map[i], mask->mask);
1113 }
1114
1115 static size_t cpus_size(struct perf_cpu_map *map)
1116 {
1117         return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1118 }
1119
1120 static size_t mask_size(struct perf_cpu_map *map, int *max)
1121 {
1122         int i;
1123
1124         *max = 0;
1125
1126         for (i = 0; i < map->nr; i++) {
1127                 /* bit possition of the cpu is + 1 */
1128                 int bit = map->map[i] + 1;
1129
1130                 if (bit > *max)
1131                         *max = bit;
1132         }
1133
1134         return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1135 }
1136
1137 void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1138 {
1139         size_t size_cpus, size_mask;
1140         bool is_dummy = perf_cpu_map__empty(map);
1141
1142         /*
1143          * Both array and mask data have variable size based
1144          * on the number of cpus and their actual values.
1145          * The size of the 'struct perf_record_cpu_map_data' is:
1146          *
1147          *   array = size of 'struct cpu_map_entries' +
1148          *           number of cpus * sizeof(u64)
1149          *
1150          *   mask  = size of 'struct perf_record_record_cpu_map' +
1151          *           maximum cpu bit converted to size of longs
1152          *
1153          * and finaly + the size of 'struct perf_record_cpu_map_data'.
1154          */
1155         size_cpus = cpus_size(map);
1156         size_mask = mask_size(map, max);
1157
1158         if (is_dummy || (size_cpus < size_mask)) {
1159                 *size += size_cpus;
1160                 *type  = PERF_CPU_MAP__CPUS;
1161         } else {
1162                 *size += size_mask;
1163                 *type  = PERF_CPU_MAP__MASK;
1164         }
1165
1166         *size += sizeof(struct perf_record_cpu_map_data);
1167         *size = PERF_ALIGN(*size, sizeof(u64));
1168         return zalloc(*size);
1169 }
1170
1171 void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1172                               u16 type, int max)
1173 {
1174         data->type = type;
1175
1176         switch (type) {
1177         case PERF_CPU_MAP__CPUS:
1178                 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1179                 break;
1180         case PERF_CPU_MAP__MASK:
1181                 synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1182         default:
1183                 break;
1184         }
1185 }
1186
1187 static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1188 {
1189         size_t size = sizeof(struct perf_record_cpu_map);
1190         struct perf_record_cpu_map *event;
1191         int max;
1192         u16 type;
1193
1194         event = cpu_map_data__alloc(map, &size, &type, &max);
1195         if (!event)
1196                 return NULL;
1197
1198         event->header.type = PERF_RECORD_CPU_MAP;
1199         event->header.size = size;
1200         event->data.type   = type;
1201
1202         cpu_map_data__synthesize(&event->data, map, type, max);
1203         return event;
1204 }
1205
1206 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1207                                    struct perf_cpu_map *map,
1208                                    perf_event__handler_t process,
1209                                    struct machine *machine)
1210 {
1211         struct perf_record_cpu_map *event;
1212         int err;
1213
1214         event = cpu_map_event__new(map);
1215         if (!event)
1216                 return -ENOMEM;
1217
1218         err = process(tool, (union perf_event *) event, NULL, machine);
1219
1220         free(event);
1221         return err;
1222 }
1223
1224 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1225                                        struct perf_stat_config *config,
1226                                        perf_event__handler_t process,
1227                                        struct machine *machine)
1228 {
1229         struct perf_record_stat_config *event;
1230         int size, i = 0, err;
1231
1232         size  = sizeof(*event);
1233         size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1234
1235         event = zalloc(size);
1236         if (!event)
1237                 return -ENOMEM;
1238
1239         event->header.type = PERF_RECORD_STAT_CONFIG;
1240         event->header.size = size;
1241         event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1242
1243 #define ADD(__term, __val)                                      \
1244         event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1245         event->data[i].val = __val;                             \
1246         i++;
1247
1248         ADD(AGGR_MODE,  config->aggr_mode)
1249         ADD(INTERVAL,   config->interval)
1250         ADD(SCALE,      config->scale)
1251
1252         WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1253                   "stat config terms unbalanced\n");
1254 #undef ADD
1255
1256         err = process(tool, (union perf_event *) event, NULL, machine);
1257
1258         free(event);
1259         return err;
1260 }
1261
1262 int perf_event__synthesize_stat(struct perf_tool *tool,
1263                                 u32 cpu, u32 thread, u64 id,
1264                                 struct perf_counts_values *count,
1265                                 perf_event__handler_t process,
1266                                 struct machine *machine)
1267 {
1268         struct perf_record_stat event;
1269
1270         event.header.type = PERF_RECORD_STAT;
1271         event.header.size = sizeof(event);
1272         event.header.misc = 0;
1273
1274         event.id        = id;
1275         event.cpu       = cpu;
1276         event.thread    = thread;
1277         event.val       = count->val;
1278         event.ena       = count->ena;
1279         event.run       = count->run;
1280
1281         return process(tool, (union perf_event *) &event, NULL, machine);
1282 }
1283
1284 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1285                                       u64 evtime, u64 type,
1286                                       perf_event__handler_t process,
1287                                       struct machine *machine)
1288 {
1289         struct perf_record_stat_round event;
1290
1291         event.header.type = PERF_RECORD_STAT_ROUND;
1292         event.header.size = sizeof(event);
1293         event.header.misc = 0;
1294
1295         event.time = evtime;
1296         event.type = type;
1297
1298         return process(tool, (union perf_event *) &event, NULL, machine);
1299 }
1300
1301 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1302 {
1303         size_t sz, result = sizeof(struct perf_record_sample);
1304
1305         if (type & PERF_SAMPLE_IDENTIFIER)
1306                 result += sizeof(u64);
1307
1308         if (type & PERF_SAMPLE_IP)
1309                 result += sizeof(u64);
1310
1311         if (type & PERF_SAMPLE_TID)
1312                 result += sizeof(u64);
1313
1314         if (type & PERF_SAMPLE_TIME)
1315                 result += sizeof(u64);
1316
1317         if (type & PERF_SAMPLE_ADDR)
1318                 result += sizeof(u64);
1319
1320         if (type & PERF_SAMPLE_ID)
1321                 result += sizeof(u64);
1322
1323         if (type & PERF_SAMPLE_STREAM_ID)
1324                 result += sizeof(u64);
1325
1326         if (type & PERF_SAMPLE_CPU)
1327                 result += sizeof(u64);
1328
1329         if (type & PERF_SAMPLE_PERIOD)
1330                 result += sizeof(u64);
1331
1332         if (type & PERF_SAMPLE_READ) {
1333                 result += sizeof(u64);
1334                 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1335                         result += sizeof(u64);
1336                 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1337                         result += sizeof(u64);
1338                 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1339                 if (read_format & PERF_FORMAT_GROUP) {
1340                         sz = sample->read.group.nr *
1341                              sizeof(struct sample_read_value);
1342                         result += sz;
1343                 } else {
1344                         result += sizeof(u64);
1345                 }
1346         }
1347
1348         if (type & PERF_SAMPLE_CALLCHAIN) {
1349                 sz = (sample->callchain->nr + 1) * sizeof(u64);
1350                 result += sz;
1351         }
1352
1353         if (type & PERF_SAMPLE_RAW) {
1354                 result += sizeof(u32);
1355                 result += sample->raw_size;
1356         }
1357
1358         if (type & PERF_SAMPLE_BRANCH_STACK) {
1359                 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1360                 /* nr, hw_idx */
1361                 sz += 2 * sizeof(u64);
1362                 result += sz;
1363         }
1364
1365         if (type & PERF_SAMPLE_REGS_USER) {
1366                 if (sample->user_regs.abi) {
1367                         result += sizeof(u64);
1368                         sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1369                         result += sz;
1370                 } else {
1371                         result += sizeof(u64);
1372                 }
1373         }
1374
1375         if (type & PERF_SAMPLE_STACK_USER) {
1376                 sz = sample->user_stack.size;
1377                 result += sizeof(u64);
1378                 if (sz) {
1379                         result += sz;
1380                         result += sizeof(u64);
1381                 }
1382         }
1383
1384         if (type & PERF_SAMPLE_WEIGHT)
1385                 result += sizeof(u64);
1386
1387         if (type & PERF_SAMPLE_DATA_SRC)
1388                 result += sizeof(u64);
1389
1390         if (type & PERF_SAMPLE_TRANSACTION)
1391                 result += sizeof(u64);
1392
1393         if (type & PERF_SAMPLE_REGS_INTR) {
1394                 if (sample->intr_regs.abi) {
1395                         result += sizeof(u64);
1396                         sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1397                         result += sz;
1398                 } else {
1399                         result += sizeof(u64);
1400                 }
1401         }
1402
1403         if (type & PERF_SAMPLE_PHYS_ADDR)
1404                 result += sizeof(u64);
1405
1406         if (type & PERF_SAMPLE_CGROUP)
1407                 result += sizeof(u64);
1408
1409         if (type & PERF_SAMPLE_AUX) {
1410                 result += sizeof(u64);
1411                 result += sample->aux_sample.size;
1412         }
1413
1414         return result;
1415 }
1416
1417 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1418                                   const struct perf_sample *sample)
1419 {
1420         __u64 *array;
1421         size_t sz;
1422         /*
1423          * used for cross-endian analysis. See git commit 65014ab3
1424          * for why this goofiness is needed.
1425          */
1426         union u64_swap u;
1427
1428         array = event->sample.array;
1429
1430         if (type & PERF_SAMPLE_IDENTIFIER) {
1431                 *array = sample->id;
1432                 array++;
1433         }
1434
1435         if (type & PERF_SAMPLE_IP) {
1436                 *array = sample->ip;
1437                 array++;
1438         }
1439
1440         if (type & PERF_SAMPLE_TID) {
1441                 u.val32[0] = sample->pid;
1442                 u.val32[1] = sample->tid;
1443                 *array = u.val64;
1444                 array++;
1445         }
1446
1447         if (type & PERF_SAMPLE_TIME) {
1448                 *array = sample->time;
1449                 array++;
1450         }
1451
1452         if (type & PERF_SAMPLE_ADDR) {
1453                 *array = sample->addr;
1454                 array++;
1455         }
1456
1457         if (type & PERF_SAMPLE_ID) {
1458                 *array = sample->id;
1459                 array++;
1460         }
1461
1462         if (type & PERF_SAMPLE_STREAM_ID) {
1463                 *array = sample->stream_id;
1464                 array++;
1465         }
1466
1467         if (type & PERF_SAMPLE_CPU) {
1468                 u.val32[0] = sample->cpu;
1469                 u.val32[1] = 0;
1470                 *array = u.val64;
1471                 array++;
1472         }
1473
1474         if (type & PERF_SAMPLE_PERIOD) {
1475                 *array = sample->period;
1476                 array++;
1477         }
1478
1479         if (type & PERF_SAMPLE_READ) {
1480                 if (read_format & PERF_FORMAT_GROUP)
1481                         *array = sample->read.group.nr;
1482                 else
1483                         *array = sample->read.one.value;
1484                 array++;
1485
1486                 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1487                         *array = sample->read.time_enabled;
1488                         array++;
1489                 }
1490
1491                 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1492                         *array = sample->read.time_running;
1493                         array++;
1494                 }
1495
1496                 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1497                 if (read_format & PERF_FORMAT_GROUP) {
1498                         sz = sample->read.group.nr *
1499                              sizeof(struct sample_read_value);
1500                         memcpy(array, sample->read.group.values, sz);
1501                         array = (void *)array + sz;
1502                 } else {
1503                         *array = sample->read.one.id;
1504                         array++;
1505                 }
1506         }
1507
1508         if (type & PERF_SAMPLE_CALLCHAIN) {
1509                 sz = (sample->callchain->nr + 1) * sizeof(u64);
1510                 memcpy(array, sample->callchain, sz);
1511                 array = (void *)array + sz;
1512         }
1513
1514         if (type & PERF_SAMPLE_RAW) {
1515                 u.val32[0] = sample->raw_size;
1516                 *array = u.val64;
1517                 array = (void *)array + sizeof(u32);
1518
1519                 memcpy(array, sample->raw_data, sample->raw_size);
1520                 array = (void *)array + sample->raw_size;
1521         }
1522
1523         if (type & PERF_SAMPLE_BRANCH_STACK) {
1524                 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1525                 /* nr, hw_idx */
1526                 sz += 2 * sizeof(u64);
1527                 memcpy(array, sample->branch_stack, sz);
1528                 array = (void *)array + sz;
1529         }
1530
1531         if (type & PERF_SAMPLE_REGS_USER) {
1532                 if (sample->user_regs.abi) {
1533                         *array++ = sample->user_regs.abi;
1534                         sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1535                         memcpy(array, sample->user_regs.regs, sz);
1536                         array = (void *)array + sz;
1537                 } else {
1538                         *array++ = 0;
1539                 }
1540         }
1541
1542         if (type & PERF_SAMPLE_STACK_USER) {
1543                 sz = sample->user_stack.size;
1544                 *array++ = sz;
1545                 if (sz) {
1546                         memcpy(array, sample->user_stack.data, sz);
1547                         array = (void *)array + sz;
1548                         *array++ = sz;
1549                 }
1550         }
1551
1552         if (type & PERF_SAMPLE_WEIGHT) {
1553                 *array = sample->weight;
1554                 array++;
1555         }
1556
1557         if (type & PERF_SAMPLE_DATA_SRC) {
1558                 *array = sample->data_src;
1559                 array++;
1560         }
1561
1562         if (type & PERF_SAMPLE_TRANSACTION) {
1563                 *array = sample->transaction;
1564                 array++;
1565         }
1566
1567         if (type & PERF_SAMPLE_REGS_INTR) {
1568                 if (sample->intr_regs.abi) {
1569                         *array++ = sample->intr_regs.abi;
1570                         sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1571                         memcpy(array, sample->intr_regs.regs, sz);
1572                         array = (void *)array + sz;
1573                 } else {
1574                         *array++ = 0;
1575                 }
1576         }
1577
1578         if (type & PERF_SAMPLE_PHYS_ADDR) {
1579                 *array = sample->phys_addr;
1580                 array++;
1581         }
1582
1583         if (type & PERF_SAMPLE_CGROUP) {
1584                 *array = sample->cgroup;
1585                 array++;
1586         }
1587
1588         if (type & PERF_SAMPLE_AUX) {
1589                 sz = sample->aux_sample.size;
1590                 *array++ = sz;
1591                 memcpy(array, sample->aux_sample.data, sz);
1592                 array = (void *)array + sz;
1593         }
1594
1595         return 0;
1596 }
1597
1598 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1599                                     struct evlist *evlist, struct machine *machine)
1600 {
1601         union perf_event *ev;
1602         struct evsel *evsel;
1603         size_t nr = 0, i = 0, sz, max_nr, n;
1604         int err;
1605
1606         pr_debug2("Synthesizing id index\n");
1607
1608         max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1609                  sizeof(struct id_index_entry);
1610
1611         evlist__for_each_entry(evlist, evsel)
1612                 nr += evsel->core.ids;
1613
1614         n = nr > max_nr ? max_nr : nr;
1615         sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1616         ev = zalloc(sz);
1617         if (!ev)
1618                 return -ENOMEM;
1619
1620         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1621         ev->id_index.header.size = sz;
1622         ev->id_index.nr = n;
1623
1624         evlist__for_each_entry(evlist, evsel) {
1625                 u32 j;
1626
1627                 for (j = 0; j < evsel->core.ids; j++) {
1628                         struct id_index_entry *e;
1629                         struct perf_sample_id *sid;
1630
1631                         if (i >= n) {
1632                                 err = process(tool, ev, NULL, machine);
1633                                 if (err)
1634                                         goto out_err;
1635                                 nr -= n;
1636                                 i = 0;
1637                         }
1638
1639                         e = &ev->id_index.entries[i++];
1640
1641                         e->id = evsel->core.id[j];
1642
1643                         sid = perf_evlist__id2sid(evlist, e->id);
1644                         if (!sid) {
1645                                 free(ev);
1646                                 return -ENOENT;
1647                         }
1648
1649                         e->idx = sid->idx;
1650                         e->cpu = sid->cpu;
1651                         e->tid = sid->tid;
1652                 }
1653         }
1654
1655         sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1656         ev->id_index.header.size = sz;
1657         ev->id_index.nr = nr;
1658
1659         err = process(tool, ev, NULL, machine);
1660 out_err:
1661         free(ev);
1662
1663         return err;
1664 }
1665
1666 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1667                                   struct target *target, struct perf_thread_map *threads,
1668                                   perf_event__handler_t process, bool data_mmap,
1669                                   unsigned int nr_threads_synthesize)
1670 {
1671         if (target__has_task(target))
1672                 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1673         else if (target__has_cpu(target))
1674                 return perf_event__synthesize_threads(tool, process,
1675                                                       machine, data_mmap,
1676                                                       nr_threads_synthesize);
1677         /* command specified */
1678         return 0;
1679 }
1680
1681 int machine__synthesize_threads(struct machine *machine, struct target *target,
1682                                 struct perf_thread_map *threads, bool data_mmap,
1683                                 unsigned int nr_threads_synthesize)
1684 {
1685         return __machine__synthesize_threads(machine, NULL, target, threads,
1686                                              perf_event__process, data_mmap,
1687                                              nr_threads_synthesize);
1688 }
1689
1690 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1691 {
1692         struct perf_record_event_update *ev;
1693
1694         size += sizeof(*ev);
1695         size  = PERF_ALIGN(size, sizeof(u64));
1696
1697         ev = zalloc(size);
1698         if (ev) {
1699                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
1700                 ev->header.size = (u16)size;
1701                 ev->type        = type;
1702                 ev->id          = id;
1703         }
1704         return ev;
1705 }
1706
1707 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1708                                              perf_event__handler_t process)
1709 {
1710         size_t size = strlen(evsel->unit);
1711         struct perf_record_event_update *ev;
1712         int err;
1713
1714         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1715         if (ev == NULL)
1716                 return -ENOMEM;
1717
1718         strlcpy(ev->data, evsel->unit, size + 1);
1719         err = process(tool, (union perf_event *)ev, NULL, NULL);
1720         free(ev);
1721         return err;
1722 }
1723
1724 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1725                                               perf_event__handler_t process)
1726 {
1727         struct perf_record_event_update *ev;
1728         struct perf_record_event_update_scale *ev_data;
1729         int err;
1730
1731         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1732         if (ev == NULL)
1733                 return -ENOMEM;
1734
1735         ev_data = (struct perf_record_event_update_scale *)ev->data;
1736         ev_data->scale = evsel->scale;
1737         err = process(tool, (union perf_event *)ev, NULL, NULL);
1738         free(ev);
1739         return err;
1740 }
1741
1742 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1743                                              perf_event__handler_t process)
1744 {
1745         struct perf_record_event_update *ev;
1746         size_t len = strlen(evsel->name);
1747         int err;
1748
1749         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1750         if (ev == NULL)
1751                 return -ENOMEM;
1752
1753         strlcpy(ev->data, evsel->name, len + 1);
1754         err = process(tool, (union perf_event *)ev, NULL, NULL);
1755         free(ev);
1756         return err;
1757 }
1758
1759 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1760                                              perf_event__handler_t process)
1761 {
1762         size_t size = sizeof(struct perf_record_event_update);
1763         struct perf_record_event_update *ev;
1764         int max, err;
1765         u16 type;
1766
1767         if (!evsel->core.own_cpus)
1768                 return 0;
1769
1770         ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1771         if (!ev)
1772                 return -ENOMEM;
1773
1774         ev->header.type = PERF_RECORD_EVENT_UPDATE;
1775         ev->header.size = (u16)size;
1776         ev->type        = PERF_EVENT_UPDATE__CPUS;
1777         ev->id          = evsel->core.id[0];
1778
1779         cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1780                                  evsel->core.own_cpus, type, max);
1781
1782         err = process(tool, (union perf_event *)ev, NULL, NULL);
1783         free(ev);
1784         return err;
1785 }
1786
1787 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1788                                  perf_event__handler_t process)
1789 {
1790         struct evsel *evsel;
1791         int err = 0;
1792
1793         evlist__for_each_entry(evlist, evsel) {
1794                 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1795                                                   evsel->core.id, process);
1796                 if (err) {
1797                         pr_debug("failed to create perf header attribute\n");
1798                         return err;
1799                 }
1800         }
1801
1802         return err;
1803 }
1804
1805 static bool has_unit(struct evsel *evsel)
1806 {
1807         return evsel->unit && *evsel->unit;
1808 }
1809
1810 static bool has_scale(struct evsel *evsel)
1811 {
1812         return evsel->scale != 1;
1813 }
1814
1815 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1816                                       perf_event__handler_t process, bool is_pipe)
1817 {
1818         struct evsel *evsel;
1819         int err;
1820
1821         /*
1822          * Synthesize other events stuff not carried within
1823          * attr event - unit, scale, name
1824          */
1825         evlist__for_each_entry(evsel_list, evsel) {
1826                 if (!evsel->supported)
1827                         continue;
1828
1829                 /*
1830                  * Synthesize unit and scale only if it's defined.
1831                  */
1832                 if (has_unit(evsel)) {
1833                         err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1834                         if (err < 0) {
1835                                 pr_err("Couldn't synthesize evsel unit.\n");
1836                                 return err;
1837                         }
1838                 }
1839
1840                 if (has_scale(evsel)) {
1841                         err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1842                         if (err < 0) {
1843                                 pr_err("Couldn't synthesize evsel evsel.\n");
1844                                 return err;
1845                         }
1846                 }
1847
1848                 if (evsel->core.own_cpus) {
1849                         err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1850                         if (err < 0) {
1851                                 pr_err("Couldn't synthesize evsel cpus.\n");
1852                                 return err;
1853                         }
1854                 }
1855
1856                 /*
1857                  * Name is needed only for pipe output,
1858                  * perf.data carries event names.
1859                  */
1860                 if (is_pipe) {
1861                         err = perf_event__synthesize_event_update_name(tool, evsel, process);
1862                         if (err < 0) {
1863                                 pr_err("Couldn't synthesize evsel name.\n");
1864                                 return err;
1865                         }
1866                 }
1867         }
1868         return 0;
1869 }
1870
1871 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1872                                 u32 ids, u64 *id, perf_event__handler_t process)
1873 {
1874         union perf_event *ev;
1875         size_t size;
1876         int err;
1877
1878         size = sizeof(struct perf_event_attr);
1879         size = PERF_ALIGN(size, sizeof(u64));
1880         size += sizeof(struct perf_event_header);
1881         size += ids * sizeof(u64);
1882
1883         ev = zalloc(size);
1884
1885         if (ev == NULL)
1886                 return -ENOMEM;
1887
1888         ev->attr.attr = *attr;
1889         memcpy(ev->attr.id, id, ids * sizeof(u64));
1890
1891         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
1892         ev->attr.header.size = (u16)size;
1893
1894         if (ev->attr.header.size == size)
1895                 err = process(tool, ev, NULL, NULL);
1896         else
1897                 err = -E2BIG;
1898
1899         free(ev);
1900
1901         return err;
1902 }
1903
1904 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
1905                                         perf_event__handler_t process)
1906 {
1907         union perf_event ev;
1908         struct tracing_data *tdata;
1909         ssize_t size = 0, aligned_size = 0, padding;
1910         struct feat_fd ff;
1911
1912         /*
1913          * We are going to store the size of the data followed
1914          * by the data contents. Since the fd descriptor is a pipe,
1915          * we cannot seek back to store the size of the data once
1916          * we know it. Instead we:
1917          *
1918          * - write the tracing data to the temp file
1919          * - get/write the data size to pipe
1920          * - write the tracing data from the temp file
1921          *   to the pipe
1922          */
1923         tdata = tracing_data_get(&evlist->core.entries, fd, true);
1924         if (!tdata)
1925                 return -1;
1926
1927         memset(&ev, 0, sizeof(ev));
1928
1929         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1930         size = tdata->size;
1931         aligned_size = PERF_ALIGN(size, sizeof(u64));
1932         padding = aligned_size - size;
1933         ev.tracing_data.header.size = sizeof(ev.tracing_data);
1934         ev.tracing_data.size = aligned_size;
1935
1936         process(tool, &ev, NULL, NULL);
1937
1938         /*
1939          * The put function will copy all the tracing data
1940          * stored in temp file to the pipe.
1941          */
1942         tracing_data_put(tdata);
1943
1944         ff = (struct feat_fd){ .fd = fd };
1945         if (write_padded(&ff, NULL, 0, padding))
1946                 return -1;
1947
1948         return aligned_size;
1949 }
1950
1951 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
1952                                     perf_event__handler_t process, struct machine *machine)
1953 {
1954         union perf_event ev;
1955         size_t len;
1956
1957         if (!pos->hit)
1958                 return 0;
1959
1960         memset(&ev, 0, sizeof(ev));
1961
1962         len = pos->long_name_len + 1;
1963         len = PERF_ALIGN(len, NAME_ALIGN);
1964         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1965         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1966         ev.build_id.header.misc = misc;
1967         ev.build_id.pid = machine->pid;
1968         ev.build_id.header.size = sizeof(ev.build_id) + len;
1969         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1970
1971         return process(tool, &ev, NULL, machine);
1972 }
1973
1974 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
1975                                        struct evlist *evlist, perf_event__handler_t process, bool attrs)
1976 {
1977         int err;
1978
1979         if (attrs) {
1980                 err = perf_event__synthesize_attrs(tool, evlist, process);
1981                 if (err < 0) {
1982                         pr_err("Couldn't synthesize attrs.\n");
1983                         return err;
1984                 }
1985         }
1986
1987         err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
1988         err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
1989         if (err < 0) {
1990                 pr_err("Couldn't synthesize thread map.\n");
1991                 return err;
1992         }
1993
1994         err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
1995         if (err < 0) {
1996                 pr_err("Couldn't synthesize thread map.\n");
1997                 return err;
1998         }
1999
2000         err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2001         if (err < 0) {
2002                 pr_err("Couldn't synthesize config.\n");
2003                 return err;
2004         }
2005
2006         return 0;
2007 }
2008
2009 int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
2010                                        struct perf_tool *tool __maybe_unused,
2011                                        perf_event__handler_t process __maybe_unused,
2012                                        struct machine *machine __maybe_unused)
2013 {
2014         return 0;
2015 }
2016
2017 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2018
2019 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2020                                     struct evlist *evlist, perf_event__handler_t process)
2021 {
2022         struct perf_header *header = &session->header;
2023         struct perf_record_header_feature *fe;
2024         struct feat_fd ff;
2025         size_t sz, sz_hdr;
2026         int feat, ret;
2027
2028         sz_hdr = sizeof(fe->header);
2029         sz = sizeof(union perf_event);
2030         /* get a nice alignment */
2031         sz = PERF_ALIGN(sz, page_size);
2032
2033         memset(&ff, 0, sizeof(ff));
2034
2035         ff.buf = malloc(sz);
2036         if (!ff.buf)
2037                 return -ENOMEM;
2038
2039         ff.size = sz - sz_hdr;
2040         ff.ph = &session->header;
2041
2042         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2043                 if (!feat_ops[feat].synthesize) {
2044                         pr_debug("No record header feature for header :%d\n", feat);
2045                         continue;
2046                 }
2047
2048                 ff.offset = sizeof(*fe);
2049
2050                 ret = feat_ops[feat].write(&ff, evlist);
2051                 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2052                         pr_debug("Error writing feature\n");
2053                         continue;
2054                 }
2055                 /* ff.buf may have changed due to realloc in do_write() */
2056                 fe = ff.buf;
2057                 memset(fe, 0, sizeof(*fe));
2058
2059                 fe->feat_id = feat;
2060                 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2061                 fe->header.size = ff.offset;
2062
2063                 ret = process(tool, ff.buf, NULL, NULL);
2064                 if (ret) {
2065                         free(ff.buf);
2066                         return ret;
2067                 }
2068         }
2069
2070         /* Send HEADER_LAST_FEATURE mark. */
2071         fe = ff.buf;
2072         fe->feat_id     = HEADER_LAST_FEATURE;
2073         fe->header.type = PERF_RECORD_HEADER_FEATURE;
2074         fe->header.size = sizeof(*fe);
2075
2076         ret = process(tool, ff.buf, NULL, NULL);
2077
2078         free(ff.buf);
2079         return ret;
2080 }