4e9266f751754af0141a24d64ec90ecfdcab5e6e
[linux-2.6-microblaze.git] / tools / perf / util / synthetic-events.c
1 // SPDX-License-Identifier: GPL-2.0-only 
2
3 #include "util/debug.h"
4 #include "util/dso.h"
5 #include "util/event.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
8 #include "util/map.h"
9 #include "util/map_symbol.h"
10 #include "util/branch.h"
11 #include "util/memswap.h"
12 #include "util/namespaces.h"
13 #include "util/session.h"
14 #include "util/stat.h"
15 #include "util/symbol.h"
16 #include "util/synthetic-events.h"
17 #include "util/target.h"
18 #include "util/time-utils.h"
19 #include "util/cgroup.h"
20 #include <linux/bitops.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/zalloc.h>
24 #include <linux/perf_event.h>
25 #include <asm/bug.h>
26 #include <perf/evsel.h>
27 #include <internal/cpumap.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
33 #include <dirent.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
40 #include <api/io.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <unistd.h>
45
46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47
48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49
50 int perf_tool__process_synth_event(struct perf_tool *tool,
51                                    union perf_event *event,
52                                    struct machine *machine,
53                                    perf_event__handler_t process)
54 {
55         struct perf_sample synth_sample = {
56                 .pid       = -1,
57                 .tid       = -1,
58                 .time      = -1,
59                 .stream_id = -1,
60                 .cpu       = -1,
61                 .period    = 1,
62                 .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63         };
64
65         return process(tool, event, &synth_sample, machine);
66 };
67
68 /*
69  * Assumes that the first 4095 bytes of /proc/pid/stat contains
70  * the comm, tgid and ppid.
71  */
72 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
73                                     pid_t *tgid, pid_t *ppid, bool *kernel)
74 {
75         char bf[4096];
76         int fd;
77         size_t size = 0;
78         ssize_t n;
79         char *name, *tgids, *ppids, *vmpeak, *threads;
80
81         *tgid = -1;
82         *ppid = -1;
83
84         if (pid)
85                 snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
86         else
87                 snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
88
89         fd = open(bf, O_RDONLY);
90         if (fd < 0) {
91                 pr_debug("couldn't open %s\n", bf);
92                 return -1;
93         }
94
95         n = read(fd, bf, sizeof(bf) - 1);
96         close(fd);
97         if (n <= 0) {
98                 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
99                            tid);
100                 return -1;
101         }
102         bf[n] = '\0';
103
104         name = strstr(bf, "Name:");
105         tgids = strstr(name ?: bf, "Tgid:");
106         ppids = strstr(tgids ?: bf, "PPid:");
107         vmpeak = strstr(ppids ?: bf, "VmPeak:");
108
109         if (vmpeak)
110                 threads = NULL;
111         else
112                 threads = strstr(ppids ?: bf, "Threads:");
113
114         if (name) {
115                 char *nl;
116
117                 name = skip_spaces(name + 5);  /* strlen("Name:") */
118                 nl = strchr(name, '\n');
119                 if (nl)
120                         *nl = '\0';
121
122                 size = strlen(name);
123                 if (size >= len)
124                         size = len - 1;
125                 memcpy(comm, name, size);
126                 comm[size] = '\0';
127         } else {
128                 pr_debug("Name: string not found for pid %d\n", tid);
129         }
130
131         if (tgids) {
132                 tgids += 5;  /* strlen("Tgid:") */
133                 *tgid = atoi(tgids);
134         } else {
135                 pr_debug("Tgid: string not found for pid %d\n", tid);
136         }
137
138         if (ppids) {
139                 ppids += 5;  /* strlen("PPid:") */
140                 *ppid = atoi(ppids);
141         } else {
142                 pr_debug("PPid: string not found for pid %d\n", tid);
143         }
144
145         if (!vmpeak && threads)
146                 *kernel = true;
147         else
148                 *kernel = false;
149
150         return 0;
151 }
152
153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
154                                     struct machine *machine,
155                                     pid_t *tgid, pid_t *ppid, bool *kernel)
156 {
157         size_t size;
158
159         *ppid = -1;
160
161         memset(&event->comm, 0, sizeof(event->comm));
162
163         if (machine__is_host(machine)) {
164                 if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
165                                              sizeof(event->comm.comm),
166                                              tgid, ppid, kernel) != 0) {
167                         return -1;
168                 }
169         } else {
170                 *tgid = machine->pid;
171         }
172
173         if (*tgid < 0)
174                 return -1;
175
176         event->comm.pid = *tgid;
177         event->comm.header.type = PERF_RECORD_COMM;
178
179         size = strlen(event->comm.comm) + 1;
180         size = PERF_ALIGN(size, sizeof(u64));
181         memset(event->comm.comm + size, 0, machine->id_hdr_size);
182         event->comm.header.size = (sizeof(event->comm) -
183                                 (sizeof(event->comm.comm) - size) +
184                                 machine->id_hdr_size);
185         event->comm.tid = tid;
186
187         return 0;
188 }
189
190 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191                                          union perf_event *event, pid_t pid,
192                                          perf_event__handler_t process,
193                                          struct machine *machine)
194 {
195         pid_t tgid, ppid;
196         bool kernel_thread;
197
198         if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
199                                      &kernel_thread) != 0)
200                 return -1;
201
202         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
203                 return -1;
204
205         return tgid;
206 }
207
208 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
209                                          struct perf_ns_link_info *ns_link_info)
210 {
211         struct stat64 st;
212         char proc_ns[128];
213
214         sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
215         if (stat64(proc_ns, &st) == 0) {
216                 ns_link_info->dev = st.st_dev;
217                 ns_link_info->ino = st.st_ino;
218         }
219 }
220
221 int perf_event__synthesize_namespaces(struct perf_tool *tool,
222                                       union perf_event *event,
223                                       pid_t pid, pid_t tgid,
224                                       perf_event__handler_t process,
225                                       struct machine *machine)
226 {
227         u32 idx;
228         struct perf_ns_link_info *ns_link_info;
229
230         if (!tool || !tool->namespace_events)
231                 return 0;
232
233         memset(&event->namespaces, 0, (sizeof(event->namespaces) +
234                (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
235                machine->id_hdr_size));
236
237         event->namespaces.pid = tgid;
238         event->namespaces.tid = pid;
239
240         event->namespaces.nr_namespaces = NR_NAMESPACES;
241
242         ns_link_info = event->namespaces.link_info;
243
244         for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
245                 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
246                                              &ns_link_info[idx]);
247
248         event->namespaces.header.type = PERF_RECORD_NAMESPACES;
249
250         event->namespaces.header.size = (sizeof(event->namespaces) +
251                         (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
252                         machine->id_hdr_size);
253
254         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
255                 return -1;
256
257         return 0;
258 }
259
260 static int perf_event__synthesize_fork(struct perf_tool *tool,
261                                        union perf_event *event,
262                                        pid_t pid, pid_t tgid, pid_t ppid,
263                                        perf_event__handler_t process,
264                                        struct machine *machine)
265 {
266         memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
267
268         /*
269          * for main thread set parent to ppid from status file. For other
270          * threads set parent pid to main thread. ie., assume main thread
271          * spawns all threads in a process
272         */
273         if (tgid == pid) {
274                 event->fork.ppid = ppid;
275                 event->fork.ptid = ppid;
276         } else {
277                 event->fork.ppid = tgid;
278                 event->fork.ptid = tgid;
279         }
280         event->fork.pid  = tgid;
281         event->fork.tid  = pid;
282         event->fork.header.type = PERF_RECORD_FORK;
283         event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
284
285         event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
286
287         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
288                 return -1;
289
290         return 0;
291 }
292
293 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
294                                 u32 *prot, u32 *flags, __u64 *offset,
295                                 u32 *maj, u32 *min,
296                                 __u64 *inode,
297                                 ssize_t pathname_size, char *pathname)
298 {
299         __u64 temp;
300         int ch;
301         char *start_pathname = pathname;
302
303         if (io__get_hex(io, start) != '-')
304                 return false;
305         if (io__get_hex(io, end) != ' ')
306                 return false;
307
308         /* map protection and flags bits */
309         *prot = 0;
310         ch = io__get_char(io);
311         if (ch == 'r')
312                 *prot |= PROT_READ;
313         else if (ch != '-')
314                 return false;
315         ch = io__get_char(io);
316         if (ch == 'w')
317                 *prot |= PROT_WRITE;
318         else if (ch != '-')
319                 return false;
320         ch = io__get_char(io);
321         if (ch == 'x')
322                 *prot |= PROT_EXEC;
323         else if (ch != '-')
324                 return false;
325         ch = io__get_char(io);
326         if (ch == 's')
327                 *flags = MAP_SHARED;
328         else if (ch == 'p')
329                 *flags = MAP_PRIVATE;
330         else
331                 return false;
332         if (io__get_char(io) != ' ')
333                 return false;
334
335         if (io__get_hex(io, offset) != ' ')
336                 return false;
337
338         if (io__get_hex(io, &temp) != ':')
339                 return false;
340         *maj = temp;
341         if (io__get_hex(io, &temp) != ' ')
342                 return false;
343         *min = temp;
344
345         ch = io__get_dec(io, inode);
346         if (ch != ' ') {
347                 *pathname = '\0';
348                 return ch == '\n';
349         }
350         do {
351                 ch = io__get_char(io);
352         } while (ch == ' ');
353         while (true) {
354                 if (ch < 0)
355                         return false;
356                 if (ch == '\0' || ch == '\n' ||
357                     (pathname + 1 - start_pathname) >= pathname_size) {
358                         *pathname = '\0';
359                         return true;
360                 }
361                 *pathname++ = ch;
362                 ch = io__get_char(io);
363         }
364 }
365
366 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
367                                              bool is_kernel)
368 {
369         struct build_id bid;
370         int rc;
371
372         if (is_kernel)
373                 rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
374         else
375                 rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
376
377         if (rc == 0) {
378                 memcpy(event->build_id, bid.data, sizeof(bid.data));
379                 event->build_id_size = (u8) bid.size;
380                 event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
381                 event->__reserved_1 = 0;
382                 event->__reserved_2 = 0;
383         } else {
384                 if (event->filename[0] == '/') {
385                         pr_debug2("Failed to read build ID for %s\n",
386                                   event->filename);
387                 }
388         }
389 }
390
391 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
392                                        union perf_event *event,
393                                        pid_t pid, pid_t tgid,
394                                        perf_event__handler_t process,
395                                        struct machine *machine,
396                                        bool mmap_data)
397 {
398         unsigned long long t;
399         char bf[BUFSIZ];
400         struct io io;
401         bool truncation = false;
402         unsigned long long timeout = proc_map_timeout * 1000000ULL;
403         int rc = 0;
404         const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
405         int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
406
407         if (machine__is_default_guest(machine))
408                 return 0;
409
410         snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
411                 machine->root_dir, pid, pid);
412
413         io.fd = open(bf, O_RDONLY, 0);
414         if (io.fd < 0) {
415                 /*
416                  * We raced with a task exiting - just return:
417                  */
418                 pr_debug("couldn't open %s\n", bf);
419                 return -1;
420         }
421         io__init(&io, io.fd, bf, sizeof(bf));
422
423         event->header.type = PERF_RECORD_MMAP2;
424         t = rdclock();
425
426         while (!io.eof) {
427                 static const char anonstr[] = "//anon";
428                 size_t size;
429
430                 /* ensure null termination since stack will be reused. */
431                 event->mmap2.filename[0] = '\0';
432
433                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
434                 if (!read_proc_maps_line(&io,
435                                         &event->mmap2.start,
436                                         &event->mmap2.len,
437                                         &event->mmap2.prot,
438                                         &event->mmap2.flags,
439                                         &event->mmap2.pgoff,
440                                         &event->mmap2.maj,
441                                         &event->mmap2.min,
442                                         &event->mmap2.ino,
443                                         sizeof(event->mmap2.filename),
444                                         event->mmap2.filename))
445                         continue;
446
447                 if ((rdclock() - t) > timeout) {
448                         pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
449                                    "You may want to increase "
450                                    "the time limit by --proc-map-timeout\n",
451                                    machine->root_dir, pid, pid);
452                         truncation = true;
453                         goto out;
454                 }
455
456                 event->mmap2.ino_generation = 0;
457
458                 /*
459                  * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
460                  */
461                 if (machine__is_host(machine))
462                         event->header.misc = PERF_RECORD_MISC_USER;
463                 else
464                         event->header.misc = PERF_RECORD_MISC_GUEST_USER;
465
466                 if ((event->mmap2.prot & PROT_EXEC) == 0) {
467                         if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
468                                 continue;
469
470                         event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
471                 }
472
473 out:
474                 if (truncation)
475                         event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
476
477                 if (!strcmp(event->mmap2.filename, ""))
478                         strcpy(event->mmap2.filename, anonstr);
479
480                 if (hugetlbfs_mnt_len &&
481                     !strncmp(event->mmap2.filename, hugetlbfs_mnt,
482                              hugetlbfs_mnt_len)) {
483                         strcpy(event->mmap2.filename, anonstr);
484                         event->mmap2.flags |= MAP_HUGETLB;
485                 }
486
487                 size = strlen(event->mmap2.filename) + 1;
488                 size = PERF_ALIGN(size, sizeof(u64));
489                 event->mmap2.len -= event->mmap.start;
490                 event->mmap2.header.size = (sizeof(event->mmap2) -
491                                         (sizeof(event->mmap2.filename) - size));
492                 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
493                 event->mmap2.header.size += machine->id_hdr_size;
494                 event->mmap2.pid = tgid;
495                 event->mmap2.tid = pid;
496
497                 if (symbol_conf.buildid_mmap2)
498                         perf_record_mmap2__read_build_id(&event->mmap2, false);
499
500                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
501                         rc = -1;
502                         break;
503                 }
504
505                 if (truncation)
506                         break;
507         }
508
509         close(io.fd);
510         return rc;
511 }
512
513 #ifdef HAVE_FILE_HANDLE
514 static int perf_event__synthesize_cgroup(struct perf_tool *tool,
515                                          union perf_event *event,
516                                          char *path, size_t mount_len,
517                                          perf_event__handler_t process,
518                                          struct machine *machine)
519 {
520         size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
521         size_t path_len = strlen(path) - mount_len + 1;
522         struct {
523                 struct file_handle fh;
524                 uint64_t cgroup_id;
525         } handle;
526         int mount_id;
527
528         while (path_len % sizeof(u64))
529                 path[mount_len + path_len++] = '\0';
530
531         memset(&event->cgroup, 0, event_size);
532
533         event->cgroup.header.type = PERF_RECORD_CGROUP;
534         event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
535
536         handle.fh.handle_bytes = sizeof(handle.cgroup_id);
537         if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
538                 pr_debug("stat failed: %s\n", path);
539                 return -1;
540         }
541
542         event->cgroup.id = handle.cgroup_id;
543         strncpy(event->cgroup.path, path + mount_len, path_len);
544         memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
545
546         if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
547                 pr_debug("process synth event failed\n");
548                 return -1;
549         }
550
551         return 0;
552 }
553
554 static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
555                                         union perf_event *event,
556                                         char *path, size_t mount_len,
557                                         perf_event__handler_t process,
558                                         struct machine *machine)
559 {
560         size_t pos = strlen(path);
561         DIR *d;
562         struct dirent *dent;
563         int ret = 0;
564
565         if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
566                                           process, machine) < 0)
567                 return -1;
568
569         d = opendir(path);
570         if (d == NULL) {
571                 pr_debug("failed to open directory: %s\n", path);
572                 return -1;
573         }
574
575         while ((dent = readdir(d)) != NULL) {
576                 if (dent->d_type != DT_DIR)
577                         continue;
578                 if (!strcmp(dent->d_name, ".") ||
579                     !strcmp(dent->d_name, ".."))
580                         continue;
581
582                 /* any sane path should be less than PATH_MAX */
583                 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
584                         continue;
585
586                 if (path[pos - 1] != '/')
587                         strcat(path, "/");
588                 strcat(path, dent->d_name);
589
590                 ret = perf_event__walk_cgroup_tree(tool, event, path,
591                                                    mount_len, process, machine);
592                 if (ret < 0)
593                         break;
594
595                 path[pos] = '\0';
596         }
597
598         closedir(d);
599         return ret;
600 }
601
602 int perf_event__synthesize_cgroups(struct perf_tool *tool,
603                                    perf_event__handler_t process,
604                                    struct machine *machine)
605 {
606         union perf_event event;
607         char cgrp_root[PATH_MAX];
608         size_t mount_len;  /* length of mount point in the path */
609
610         if (!tool || !tool->cgroup_events)
611                 return 0;
612
613         if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
614                 pr_debug("cannot find cgroup mount point\n");
615                 return -1;
616         }
617
618         mount_len = strlen(cgrp_root);
619         /* make sure the path starts with a slash (after mount point) */
620         strcat(cgrp_root, "/");
621
622         if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
623                                          process, machine) < 0)
624                 return -1;
625
626         return 0;
627 }
628 #else
629 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
630                                    perf_event__handler_t process __maybe_unused,
631                                    struct machine *machine __maybe_unused)
632 {
633         return -1;
634 }
635 #endif
636
637 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
638                                    struct machine *machine)
639 {
640         int rc = 0;
641         struct map *pos;
642         struct maps *maps = machine__kernel_maps(machine);
643         union perf_event *event;
644         size_t size = symbol_conf.buildid_mmap2 ?
645                         sizeof(event->mmap2) : sizeof(event->mmap);
646
647         event = zalloc(size + machine->id_hdr_size);
648         if (event == NULL) {
649                 pr_debug("Not enough memory synthesizing mmap event "
650                          "for kernel modules\n");
651                 return -1;
652         }
653
654         /*
655          * kernel uses 0 for user space maps, see kernel/perf_event.c
656          * __perf_event_mmap
657          */
658         if (machine__is_host(machine))
659                 event->header.misc = PERF_RECORD_MISC_KERNEL;
660         else
661                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
662
663         maps__for_each_entry(maps, pos) {
664                 if (!__map__is_kmodule(pos))
665                         continue;
666
667                 if (symbol_conf.buildid_mmap2) {
668                         size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
669                         event->mmap2.header.type = PERF_RECORD_MMAP2;
670                         event->mmap2.header.size = (sizeof(event->mmap2) -
671                                                 (sizeof(event->mmap2.filename) - size));
672                         memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
673                         event->mmap2.header.size += machine->id_hdr_size;
674                         event->mmap2.start = pos->start;
675                         event->mmap2.len   = pos->end - pos->start;
676                         event->mmap2.pid   = machine->pid;
677
678                         memcpy(event->mmap2.filename, pos->dso->long_name,
679                                pos->dso->long_name_len + 1);
680
681                         perf_record_mmap2__read_build_id(&event->mmap2, false);
682                 } else {
683                         size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
684                         event->mmap.header.type = PERF_RECORD_MMAP;
685                         event->mmap.header.size = (sizeof(event->mmap) -
686                                                 (sizeof(event->mmap.filename) - size));
687                         memset(event->mmap.filename + size, 0, machine->id_hdr_size);
688                         event->mmap.header.size += machine->id_hdr_size;
689                         event->mmap.start = pos->start;
690                         event->mmap.len   = pos->end - pos->start;
691                         event->mmap.pid   = machine->pid;
692
693                         memcpy(event->mmap.filename, pos->dso->long_name,
694                                pos->dso->long_name_len + 1);
695                 }
696
697                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
698                         rc = -1;
699                         break;
700                 }
701         }
702
703         free(event);
704         return rc;
705 }
706
707 static int filter_task(const struct dirent *dirent)
708 {
709         return isdigit(dirent->d_name[0]);
710 }
711
712 static int __event__synthesize_thread(union perf_event *comm_event,
713                                       union perf_event *mmap_event,
714                                       union perf_event *fork_event,
715                                       union perf_event *namespaces_event,
716                                       pid_t pid, int full, perf_event__handler_t process,
717                                       struct perf_tool *tool, struct machine *machine, bool mmap_data)
718 {
719         char filename[PATH_MAX];
720         struct dirent **dirent;
721         pid_t tgid, ppid;
722         int rc = 0;
723         int i, n;
724
725         /* special case: only send one comm event using passed in pid */
726         if (!full) {
727                 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
728                                                    process, machine);
729
730                 if (tgid == -1)
731                         return -1;
732
733                 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
734                                                       tgid, process, machine) < 0)
735                         return -1;
736
737                 /*
738                  * send mmap only for thread group leader
739                  * see thread__init_maps()
740                  */
741                 if (pid == tgid &&
742                     perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
743                                                        process, machine, mmap_data))
744                         return -1;
745
746                 return 0;
747         }
748
749         if (machine__is_default_guest(machine))
750                 return 0;
751
752         snprintf(filename, sizeof(filename), "%s/proc/%d/task",
753                  machine->root_dir, pid);
754
755         n = scandir(filename, &dirent, filter_task, alphasort);
756         if (n < 0)
757                 return n;
758
759         for (i = 0; i < n; i++) {
760                 char *end;
761                 pid_t _pid;
762                 bool kernel_thread;
763
764                 _pid = strtol(dirent[i]->d_name, &end, 10);
765                 if (*end)
766                         continue;
767
768                 rc = -1;
769                 if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
770                                              &tgid, &ppid, &kernel_thread) != 0)
771                         break;
772
773                 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
774                                                 ppid, process, machine) < 0)
775                         break;
776
777                 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
778                                                       tgid, process, machine) < 0)
779                         break;
780
781                 /*
782                  * Send the prepared comm event
783                  */
784                 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
785                         break;
786
787                 rc = 0;
788                 if (_pid == pid && !kernel_thread) {
789                         /* process the parent's maps too */
790                         rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
791                                                 process, machine, mmap_data);
792                         if (rc)
793                                 break;
794                 }
795         }
796
797         for (i = 0; i < n; i++)
798                 zfree(&dirent[i]);
799         free(dirent);
800
801         return rc;
802 }
803
804 int perf_event__synthesize_thread_map(struct perf_tool *tool,
805                                       struct perf_thread_map *threads,
806                                       perf_event__handler_t process,
807                                       struct machine *machine,
808                                       bool mmap_data)
809 {
810         union perf_event *comm_event, *mmap_event, *fork_event;
811         union perf_event *namespaces_event;
812         int err = -1, thread, j;
813
814         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
815         if (comm_event == NULL)
816                 goto out;
817
818         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
819         if (mmap_event == NULL)
820                 goto out_free_comm;
821
822         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
823         if (fork_event == NULL)
824                 goto out_free_mmap;
825
826         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
827                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
828                                   machine->id_hdr_size);
829         if (namespaces_event == NULL)
830                 goto out_free_fork;
831
832         err = 0;
833         for (thread = 0; thread < threads->nr; ++thread) {
834                 if (__event__synthesize_thread(comm_event, mmap_event,
835                                                fork_event, namespaces_event,
836                                                perf_thread_map__pid(threads, thread), 0,
837                                                process, tool, machine,
838                                                mmap_data)) {
839                         err = -1;
840                         break;
841                 }
842
843                 /*
844                  * comm.pid is set to thread group id by
845                  * perf_event__synthesize_comm
846                  */
847                 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
848                         bool need_leader = true;
849
850                         /* is thread group leader in thread_map? */
851                         for (j = 0; j < threads->nr; ++j) {
852                                 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
853                                         need_leader = false;
854                                         break;
855                                 }
856                         }
857
858                         /* if not, generate events for it */
859                         if (need_leader &&
860                             __event__synthesize_thread(comm_event, mmap_event,
861                                                        fork_event, namespaces_event,
862                                                        comm_event->comm.pid, 0,
863                                                        process, tool, machine,
864                                                        mmap_data)) {
865                                 err = -1;
866                                 break;
867                         }
868                 }
869         }
870         free(namespaces_event);
871 out_free_fork:
872         free(fork_event);
873 out_free_mmap:
874         free(mmap_event);
875 out_free_comm:
876         free(comm_event);
877 out:
878         return err;
879 }
880
881 static int __perf_event__synthesize_threads(struct perf_tool *tool,
882                                             perf_event__handler_t process,
883                                             struct machine *machine,
884                                             bool mmap_data,
885                                             struct dirent **dirent,
886                                             int start,
887                                             int num)
888 {
889         union perf_event *comm_event, *mmap_event, *fork_event;
890         union perf_event *namespaces_event;
891         int err = -1;
892         char *end;
893         pid_t pid;
894         int i;
895
896         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
897         if (comm_event == NULL)
898                 goto out;
899
900         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
901         if (mmap_event == NULL)
902                 goto out_free_comm;
903
904         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
905         if (fork_event == NULL)
906                 goto out_free_mmap;
907
908         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
909                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
910                                   machine->id_hdr_size);
911         if (namespaces_event == NULL)
912                 goto out_free_fork;
913
914         for (i = start; i < start + num; i++) {
915                 if (!isdigit(dirent[i]->d_name[0]))
916                         continue;
917
918                 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
919                 /* only interested in proper numerical dirents */
920                 if (*end)
921                         continue;
922                 /*
923                  * We may race with exiting thread, so don't stop just because
924                  * one thread couldn't be synthesized.
925                  */
926                 __event__synthesize_thread(comm_event, mmap_event, fork_event,
927                                            namespaces_event, pid, 1, process,
928                                            tool, machine, mmap_data);
929         }
930         err = 0;
931
932         free(namespaces_event);
933 out_free_fork:
934         free(fork_event);
935 out_free_mmap:
936         free(mmap_event);
937 out_free_comm:
938         free(comm_event);
939 out:
940         return err;
941 }
942
943 struct synthesize_threads_arg {
944         struct perf_tool *tool;
945         perf_event__handler_t process;
946         struct machine *machine;
947         bool mmap_data;
948         struct dirent **dirent;
949         int num;
950         int start;
951 };
952
953 static void *synthesize_threads_worker(void *arg)
954 {
955         struct synthesize_threads_arg *args = arg;
956
957         __perf_event__synthesize_threads(args->tool, args->process,
958                                          args->machine, args->mmap_data,
959                                          args->dirent,
960                                          args->start, args->num);
961         return NULL;
962 }
963
964 int perf_event__synthesize_threads(struct perf_tool *tool,
965                                    perf_event__handler_t process,
966                                    struct machine *machine,
967                                    bool mmap_data,
968                                    unsigned int nr_threads_synthesize)
969 {
970         struct synthesize_threads_arg *args = NULL;
971         pthread_t *synthesize_threads = NULL;
972         char proc_path[PATH_MAX];
973         struct dirent **dirent;
974         int num_per_thread;
975         int m, n, i, j;
976         int thread_nr;
977         int base = 0;
978         int err = -1;
979
980
981         if (machine__is_default_guest(machine))
982                 return 0;
983
984         snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
985         n = scandir(proc_path, &dirent, filter_task, alphasort);
986         if (n < 0)
987                 return err;
988
989         if (nr_threads_synthesize == UINT_MAX)
990                 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
991         else
992                 thread_nr = nr_threads_synthesize;
993
994         if (thread_nr <= 1) {
995                 err = __perf_event__synthesize_threads(tool, process,
996                                                        machine, mmap_data,
997                                                        dirent, base, n);
998                 goto free_dirent;
999         }
1000         if (thread_nr > n)
1001                 thread_nr = n;
1002
1003         synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1004         if (synthesize_threads == NULL)
1005                 goto free_dirent;
1006
1007         args = calloc(sizeof(*args), thread_nr);
1008         if (args == NULL)
1009                 goto free_threads;
1010
1011         num_per_thread = n / thread_nr;
1012         m = n % thread_nr;
1013         for (i = 0; i < thread_nr; i++) {
1014                 args[i].tool = tool;
1015                 args[i].process = process;
1016                 args[i].machine = machine;
1017                 args[i].mmap_data = mmap_data;
1018                 args[i].dirent = dirent;
1019         }
1020         for (i = 0; i < m; i++) {
1021                 args[i].num = num_per_thread + 1;
1022                 args[i].start = i * args[i].num;
1023         }
1024         if (i != 0)
1025                 base = args[i-1].start + args[i-1].num;
1026         for (j = i; j < thread_nr; j++) {
1027                 args[j].num = num_per_thread;
1028                 args[j].start = base + (j - i) * args[i].num;
1029         }
1030
1031         for (i = 0; i < thread_nr; i++) {
1032                 if (pthread_create(&synthesize_threads[i], NULL,
1033                                    synthesize_threads_worker, &args[i]))
1034                         goto out_join;
1035         }
1036         err = 0;
1037 out_join:
1038         for (i = 0; i < thread_nr; i++)
1039                 pthread_join(synthesize_threads[i], NULL);
1040         free(args);
1041 free_threads:
1042         free(synthesize_threads);
1043 free_dirent:
1044         for (i = 0; i < n; i++)
1045                 zfree(&dirent[i]);
1046         free(dirent);
1047
1048         return err;
1049 }
1050
1051 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1052                                               perf_event__handler_t process __maybe_unused,
1053                                               struct machine *machine __maybe_unused)
1054 {
1055         return 0;
1056 }
1057
1058 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1059                                                 perf_event__handler_t process,
1060                                                 struct machine *machine)
1061 {
1062         union perf_event *event;
1063         size_t size = symbol_conf.buildid_mmap2 ?
1064                         sizeof(event->mmap2) : sizeof(event->mmap);
1065         struct map *map = machine__kernel_map(machine);
1066         struct kmap *kmap;
1067         int err;
1068
1069         if (map == NULL)
1070                 return -1;
1071
1072         kmap = map__kmap(map);
1073         if (!kmap->ref_reloc_sym)
1074                 return -1;
1075
1076         /*
1077          * We should get this from /sys/kernel/sections/.text, but till that is
1078          * available use this, and after it is use this as a fallback for older
1079          * kernels.
1080          */
1081         event = zalloc(size + machine->id_hdr_size);
1082         if (event == NULL) {
1083                 pr_debug("Not enough memory synthesizing mmap event "
1084                          "for kernel modules\n");
1085                 return -1;
1086         }
1087
1088         if (machine__is_host(machine)) {
1089                 /*
1090                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
1091                  * see kernel/perf_event.c __perf_event_mmap
1092                  */
1093                 event->header.misc = PERF_RECORD_MISC_KERNEL;
1094         } else {
1095                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1096         }
1097
1098         if (symbol_conf.buildid_mmap2) {
1099                 size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1100                                 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1101                 size = PERF_ALIGN(size, sizeof(u64));
1102                 event->mmap2.header.type = PERF_RECORD_MMAP2;
1103                 event->mmap2.header.size = (sizeof(event->mmap2) -
1104                                 (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1105                 event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1106                 event->mmap2.start = map->start;
1107                 event->mmap2.len   = map->end - event->mmap.start;
1108                 event->mmap2.pid   = machine->pid;
1109
1110                 perf_record_mmap2__read_build_id(&event->mmap2, true);
1111         } else {
1112                 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1113                                 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1114                 size = PERF_ALIGN(size, sizeof(u64));
1115                 event->mmap.header.type = PERF_RECORD_MMAP;
1116                 event->mmap.header.size = (sizeof(event->mmap) -
1117                                 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1118                 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1119                 event->mmap.start = map->start;
1120                 event->mmap.len   = map->end - event->mmap.start;
1121                 event->mmap.pid   = machine->pid;
1122         }
1123
1124         err = perf_tool__process_synth_event(tool, event, machine, process);
1125         free(event);
1126
1127         return err;
1128 }
1129
1130 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1131                                        perf_event__handler_t process,
1132                                        struct machine *machine)
1133 {
1134         int err;
1135
1136         err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1137         if (err < 0)
1138                 return err;
1139
1140         return perf_event__synthesize_extra_kmaps(tool, process, machine);
1141 }
1142
1143 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1144                                       struct perf_thread_map *threads,
1145                                       perf_event__handler_t process,
1146                                       struct machine *machine)
1147 {
1148         union perf_event *event;
1149         int i, err, size;
1150
1151         size  = sizeof(event->thread_map);
1152         size += threads->nr * sizeof(event->thread_map.entries[0]);
1153
1154         event = zalloc(size);
1155         if (!event)
1156                 return -ENOMEM;
1157
1158         event->header.type = PERF_RECORD_THREAD_MAP;
1159         event->header.size = size;
1160         event->thread_map.nr = threads->nr;
1161
1162         for (i = 0; i < threads->nr; i++) {
1163                 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1164                 char *comm = perf_thread_map__comm(threads, i);
1165
1166                 if (!comm)
1167                         comm = (char *) "";
1168
1169                 entry->pid = perf_thread_map__pid(threads, i);
1170                 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1171         }
1172
1173         err = process(tool, event, NULL, machine);
1174
1175         free(event);
1176         return err;
1177 }
1178
1179 static void synthesize_cpus(struct cpu_map_entries *cpus,
1180                             struct perf_cpu_map *map)
1181 {
1182         int i;
1183
1184         cpus->nr = map->nr;
1185
1186         for (i = 0; i < map->nr; i++)
1187                 cpus->cpu[i] = map->map[i];
1188 }
1189
1190 static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1191                             struct perf_cpu_map *map, int max)
1192 {
1193         int i;
1194
1195         mask->nr = BITS_TO_LONGS(max);
1196         mask->long_size = sizeof(long);
1197
1198         for (i = 0; i < map->nr; i++)
1199                 set_bit(map->map[i], mask->mask);
1200 }
1201
1202 static size_t cpus_size(struct perf_cpu_map *map)
1203 {
1204         return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1205 }
1206
1207 static size_t mask_size(struct perf_cpu_map *map, int *max)
1208 {
1209         int i;
1210
1211         *max = 0;
1212
1213         for (i = 0; i < map->nr; i++) {
1214                 /* bit possition of the cpu is + 1 */
1215                 int bit = map->map[i] + 1;
1216
1217                 if (bit > *max)
1218                         *max = bit;
1219         }
1220
1221         return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1222 }
1223
1224 void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1225 {
1226         size_t size_cpus, size_mask;
1227         bool is_dummy = perf_cpu_map__empty(map);
1228
1229         /*
1230          * Both array and mask data have variable size based
1231          * on the number of cpus and their actual values.
1232          * The size of the 'struct perf_record_cpu_map_data' is:
1233          *
1234          *   array = size of 'struct cpu_map_entries' +
1235          *           number of cpus * sizeof(u64)
1236          *
1237          *   mask  = size of 'struct perf_record_record_cpu_map' +
1238          *           maximum cpu bit converted to size of longs
1239          *
1240          * and finaly + the size of 'struct perf_record_cpu_map_data'.
1241          */
1242         size_cpus = cpus_size(map);
1243         size_mask = mask_size(map, max);
1244
1245         if (is_dummy || (size_cpus < size_mask)) {
1246                 *size += size_cpus;
1247                 *type  = PERF_CPU_MAP__CPUS;
1248         } else {
1249                 *size += size_mask;
1250                 *type  = PERF_CPU_MAP__MASK;
1251         }
1252
1253         *size += sizeof(struct perf_record_cpu_map_data);
1254         *size = PERF_ALIGN(*size, sizeof(u64));
1255         return zalloc(*size);
1256 }
1257
1258 void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1259                               u16 type, int max)
1260 {
1261         data->type = type;
1262
1263         switch (type) {
1264         case PERF_CPU_MAP__CPUS:
1265                 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1266                 break;
1267         case PERF_CPU_MAP__MASK:
1268                 synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1269         default:
1270                 break;
1271         }
1272 }
1273
1274 static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1275 {
1276         size_t size = sizeof(struct perf_record_cpu_map);
1277         struct perf_record_cpu_map *event;
1278         int max;
1279         u16 type;
1280
1281         event = cpu_map_data__alloc(map, &size, &type, &max);
1282         if (!event)
1283                 return NULL;
1284
1285         event->header.type = PERF_RECORD_CPU_MAP;
1286         event->header.size = size;
1287         event->data.type   = type;
1288
1289         cpu_map_data__synthesize(&event->data, map, type, max);
1290         return event;
1291 }
1292
1293 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1294                                    struct perf_cpu_map *map,
1295                                    perf_event__handler_t process,
1296                                    struct machine *machine)
1297 {
1298         struct perf_record_cpu_map *event;
1299         int err;
1300
1301         event = cpu_map_event__new(map);
1302         if (!event)
1303                 return -ENOMEM;
1304
1305         err = process(tool, (union perf_event *) event, NULL, machine);
1306
1307         free(event);
1308         return err;
1309 }
1310
1311 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1312                                        struct perf_stat_config *config,
1313                                        perf_event__handler_t process,
1314                                        struct machine *machine)
1315 {
1316         struct perf_record_stat_config *event;
1317         int size, i = 0, err;
1318
1319         size  = sizeof(*event);
1320         size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1321
1322         event = zalloc(size);
1323         if (!event)
1324                 return -ENOMEM;
1325
1326         event->header.type = PERF_RECORD_STAT_CONFIG;
1327         event->header.size = size;
1328         event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1329
1330 #define ADD(__term, __val)                                      \
1331         event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1332         event->data[i].val = __val;                             \
1333         i++;
1334
1335         ADD(AGGR_MODE,  config->aggr_mode)
1336         ADD(INTERVAL,   config->interval)
1337         ADD(SCALE,      config->scale)
1338
1339         WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1340                   "stat config terms unbalanced\n");
1341 #undef ADD
1342
1343         err = process(tool, (union perf_event *) event, NULL, machine);
1344
1345         free(event);
1346         return err;
1347 }
1348
1349 int perf_event__synthesize_stat(struct perf_tool *tool,
1350                                 u32 cpu, u32 thread, u64 id,
1351                                 struct perf_counts_values *count,
1352                                 perf_event__handler_t process,
1353                                 struct machine *machine)
1354 {
1355         struct perf_record_stat event;
1356
1357         event.header.type = PERF_RECORD_STAT;
1358         event.header.size = sizeof(event);
1359         event.header.misc = 0;
1360
1361         event.id        = id;
1362         event.cpu       = cpu;
1363         event.thread    = thread;
1364         event.val       = count->val;
1365         event.ena       = count->ena;
1366         event.run       = count->run;
1367
1368         return process(tool, (union perf_event *) &event, NULL, machine);
1369 }
1370
1371 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1372                                       u64 evtime, u64 type,
1373                                       perf_event__handler_t process,
1374                                       struct machine *machine)
1375 {
1376         struct perf_record_stat_round event;
1377
1378         event.header.type = PERF_RECORD_STAT_ROUND;
1379         event.header.size = sizeof(event);
1380         event.header.misc = 0;
1381
1382         event.time = evtime;
1383         event.type = type;
1384
1385         return process(tool, (union perf_event *) &event, NULL, machine);
1386 }
1387
1388 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1389 {
1390         size_t sz, result = sizeof(struct perf_record_sample);
1391
1392         if (type & PERF_SAMPLE_IDENTIFIER)
1393                 result += sizeof(u64);
1394
1395         if (type & PERF_SAMPLE_IP)
1396                 result += sizeof(u64);
1397
1398         if (type & PERF_SAMPLE_TID)
1399                 result += sizeof(u64);
1400
1401         if (type & PERF_SAMPLE_TIME)
1402                 result += sizeof(u64);
1403
1404         if (type & PERF_SAMPLE_ADDR)
1405                 result += sizeof(u64);
1406
1407         if (type & PERF_SAMPLE_ID)
1408                 result += sizeof(u64);
1409
1410         if (type & PERF_SAMPLE_STREAM_ID)
1411                 result += sizeof(u64);
1412
1413         if (type & PERF_SAMPLE_CPU)
1414                 result += sizeof(u64);
1415
1416         if (type & PERF_SAMPLE_PERIOD)
1417                 result += sizeof(u64);
1418
1419         if (type & PERF_SAMPLE_READ) {
1420                 result += sizeof(u64);
1421                 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1422                         result += sizeof(u64);
1423                 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1424                         result += sizeof(u64);
1425                 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1426                 if (read_format & PERF_FORMAT_GROUP) {
1427                         sz = sample->read.group.nr *
1428                              sizeof(struct sample_read_value);
1429                         result += sz;
1430                 } else {
1431                         result += sizeof(u64);
1432                 }
1433         }
1434
1435         if (type & PERF_SAMPLE_CALLCHAIN) {
1436                 sz = (sample->callchain->nr + 1) * sizeof(u64);
1437                 result += sz;
1438         }
1439
1440         if (type & PERF_SAMPLE_RAW) {
1441                 result += sizeof(u32);
1442                 result += sample->raw_size;
1443         }
1444
1445         if (type & PERF_SAMPLE_BRANCH_STACK) {
1446                 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1447                 /* nr, hw_idx */
1448                 sz += 2 * sizeof(u64);
1449                 result += sz;
1450         }
1451
1452         if (type & PERF_SAMPLE_REGS_USER) {
1453                 if (sample->user_regs.abi) {
1454                         result += sizeof(u64);
1455                         sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1456                         result += sz;
1457                 } else {
1458                         result += sizeof(u64);
1459                 }
1460         }
1461
1462         if (type & PERF_SAMPLE_STACK_USER) {
1463                 sz = sample->user_stack.size;
1464                 result += sizeof(u64);
1465                 if (sz) {
1466                         result += sz;
1467                         result += sizeof(u64);
1468                 }
1469         }
1470
1471         if (type & PERF_SAMPLE_WEIGHT_TYPE)
1472                 result += sizeof(u64);
1473
1474         if (type & PERF_SAMPLE_DATA_SRC)
1475                 result += sizeof(u64);
1476
1477         if (type & PERF_SAMPLE_TRANSACTION)
1478                 result += sizeof(u64);
1479
1480         if (type & PERF_SAMPLE_REGS_INTR) {
1481                 if (sample->intr_regs.abi) {
1482                         result += sizeof(u64);
1483                         sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1484                         result += sz;
1485                 } else {
1486                         result += sizeof(u64);
1487                 }
1488         }
1489
1490         if (type & PERF_SAMPLE_PHYS_ADDR)
1491                 result += sizeof(u64);
1492
1493         if (type & PERF_SAMPLE_CGROUP)
1494                 result += sizeof(u64);
1495
1496         if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1497                 result += sizeof(u64);
1498
1499         if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1500                 result += sizeof(u64);
1501
1502         if (type & PERF_SAMPLE_AUX) {
1503                 result += sizeof(u64);
1504                 result += sample->aux_sample.size;
1505         }
1506
1507         return result;
1508 }
1509
1510 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1511                                   const struct perf_sample *sample)
1512 {
1513         __u64 *array;
1514         size_t sz;
1515         /*
1516          * used for cross-endian analysis. See git commit 65014ab3
1517          * for why this goofiness is needed.
1518          */
1519         union u64_swap u;
1520
1521         array = event->sample.array;
1522
1523         if (type & PERF_SAMPLE_IDENTIFIER) {
1524                 *array = sample->id;
1525                 array++;
1526         }
1527
1528         if (type & PERF_SAMPLE_IP) {
1529                 *array = sample->ip;
1530                 array++;
1531         }
1532
1533         if (type & PERF_SAMPLE_TID) {
1534                 u.val32[0] = sample->pid;
1535                 u.val32[1] = sample->tid;
1536                 *array = u.val64;
1537                 array++;
1538         }
1539
1540         if (type & PERF_SAMPLE_TIME) {
1541                 *array = sample->time;
1542                 array++;
1543         }
1544
1545         if (type & PERF_SAMPLE_ADDR) {
1546                 *array = sample->addr;
1547                 array++;
1548         }
1549
1550         if (type & PERF_SAMPLE_ID) {
1551                 *array = sample->id;
1552                 array++;
1553         }
1554
1555         if (type & PERF_SAMPLE_STREAM_ID) {
1556                 *array = sample->stream_id;
1557                 array++;
1558         }
1559
1560         if (type & PERF_SAMPLE_CPU) {
1561                 u.val32[0] = sample->cpu;
1562                 u.val32[1] = 0;
1563                 *array = u.val64;
1564                 array++;
1565         }
1566
1567         if (type & PERF_SAMPLE_PERIOD) {
1568                 *array = sample->period;
1569                 array++;
1570         }
1571
1572         if (type & PERF_SAMPLE_READ) {
1573                 if (read_format & PERF_FORMAT_GROUP)
1574                         *array = sample->read.group.nr;
1575                 else
1576                         *array = sample->read.one.value;
1577                 array++;
1578
1579                 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1580                         *array = sample->read.time_enabled;
1581                         array++;
1582                 }
1583
1584                 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1585                         *array = sample->read.time_running;
1586                         array++;
1587                 }
1588
1589                 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1590                 if (read_format & PERF_FORMAT_GROUP) {
1591                         sz = sample->read.group.nr *
1592                              sizeof(struct sample_read_value);
1593                         memcpy(array, sample->read.group.values, sz);
1594                         array = (void *)array + sz;
1595                 } else {
1596                         *array = sample->read.one.id;
1597                         array++;
1598                 }
1599         }
1600
1601         if (type & PERF_SAMPLE_CALLCHAIN) {
1602                 sz = (sample->callchain->nr + 1) * sizeof(u64);
1603                 memcpy(array, sample->callchain, sz);
1604                 array = (void *)array + sz;
1605         }
1606
1607         if (type & PERF_SAMPLE_RAW) {
1608                 u.val32[0] = sample->raw_size;
1609                 *array = u.val64;
1610                 array = (void *)array + sizeof(u32);
1611
1612                 memcpy(array, sample->raw_data, sample->raw_size);
1613                 array = (void *)array + sample->raw_size;
1614         }
1615
1616         if (type & PERF_SAMPLE_BRANCH_STACK) {
1617                 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1618                 /* nr, hw_idx */
1619                 sz += 2 * sizeof(u64);
1620                 memcpy(array, sample->branch_stack, sz);
1621                 array = (void *)array + sz;
1622         }
1623
1624         if (type & PERF_SAMPLE_REGS_USER) {
1625                 if (sample->user_regs.abi) {
1626                         *array++ = sample->user_regs.abi;
1627                         sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1628                         memcpy(array, sample->user_regs.regs, sz);
1629                         array = (void *)array + sz;
1630                 } else {
1631                         *array++ = 0;
1632                 }
1633         }
1634
1635         if (type & PERF_SAMPLE_STACK_USER) {
1636                 sz = sample->user_stack.size;
1637                 *array++ = sz;
1638                 if (sz) {
1639                         memcpy(array, sample->user_stack.data, sz);
1640                         array = (void *)array + sz;
1641                         *array++ = sz;
1642                 }
1643         }
1644
1645         if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1646                 *array = sample->weight;
1647                 if (type & PERF_SAMPLE_WEIGHT_STRUCT)
1648                         *array &= 0xffffffff;
1649                 array++;
1650         }
1651
1652         if (type & PERF_SAMPLE_DATA_SRC) {
1653                 *array = sample->data_src;
1654                 array++;
1655         }
1656
1657         if (type & PERF_SAMPLE_TRANSACTION) {
1658                 *array = sample->transaction;
1659                 array++;
1660         }
1661
1662         if (type & PERF_SAMPLE_REGS_INTR) {
1663                 if (sample->intr_regs.abi) {
1664                         *array++ = sample->intr_regs.abi;
1665                         sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1666                         memcpy(array, sample->intr_regs.regs, sz);
1667                         array = (void *)array + sz;
1668                 } else {
1669                         *array++ = 0;
1670                 }
1671         }
1672
1673         if (type & PERF_SAMPLE_PHYS_ADDR) {
1674                 *array = sample->phys_addr;
1675                 array++;
1676         }
1677
1678         if (type & PERF_SAMPLE_CGROUP) {
1679                 *array = sample->cgroup;
1680                 array++;
1681         }
1682
1683         if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1684                 *array = sample->data_page_size;
1685                 array++;
1686         }
1687
1688         if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1689                 *array = sample->code_page_size;
1690                 array++;
1691         }
1692
1693         if (type & PERF_SAMPLE_AUX) {
1694                 sz = sample->aux_sample.size;
1695                 *array++ = sz;
1696                 memcpy(array, sample->aux_sample.data, sz);
1697                 array = (void *)array + sz;
1698         }
1699
1700         return 0;
1701 }
1702
1703 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1704                                     struct evlist *evlist, struct machine *machine)
1705 {
1706         union perf_event *ev;
1707         struct evsel *evsel;
1708         size_t nr = 0, i = 0, sz, max_nr, n;
1709         int err;
1710
1711         pr_debug2("Synthesizing id index\n");
1712
1713         max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1714                  sizeof(struct id_index_entry);
1715
1716         evlist__for_each_entry(evlist, evsel)
1717                 nr += evsel->core.ids;
1718
1719         n = nr > max_nr ? max_nr : nr;
1720         sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1721         ev = zalloc(sz);
1722         if (!ev)
1723                 return -ENOMEM;
1724
1725         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1726         ev->id_index.header.size = sz;
1727         ev->id_index.nr = n;
1728
1729         evlist__for_each_entry(evlist, evsel) {
1730                 u32 j;
1731
1732                 for (j = 0; j < evsel->core.ids; j++) {
1733                         struct id_index_entry *e;
1734                         struct perf_sample_id *sid;
1735
1736                         if (i >= n) {
1737                                 err = process(tool, ev, NULL, machine);
1738                                 if (err)
1739                                         goto out_err;
1740                                 nr -= n;
1741                                 i = 0;
1742                         }
1743
1744                         e = &ev->id_index.entries[i++];
1745
1746                         e->id = evsel->core.id[j];
1747
1748                         sid = evlist__id2sid(evlist, e->id);
1749                         if (!sid) {
1750                                 free(ev);
1751                                 return -ENOENT;
1752                         }
1753
1754                         e->idx = sid->idx;
1755                         e->cpu = sid->cpu;
1756                         e->tid = sid->tid;
1757                 }
1758         }
1759
1760         sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1761         ev->id_index.header.size = sz;
1762         ev->id_index.nr = nr;
1763
1764         err = process(tool, ev, NULL, machine);
1765 out_err:
1766         free(ev);
1767
1768         return err;
1769 }
1770
1771 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1772                                   struct target *target, struct perf_thread_map *threads,
1773                                   perf_event__handler_t process, bool data_mmap,
1774                                   unsigned int nr_threads_synthesize)
1775 {
1776         if (target__has_task(target))
1777                 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1778         else if (target__has_cpu(target))
1779                 return perf_event__synthesize_threads(tool, process,
1780                                                       machine, data_mmap,
1781                                                       nr_threads_synthesize);
1782         /* command specified */
1783         return 0;
1784 }
1785
1786 int machine__synthesize_threads(struct machine *machine, struct target *target,
1787                                 struct perf_thread_map *threads, bool data_mmap,
1788                                 unsigned int nr_threads_synthesize)
1789 {
1790         return __machine__synthesize_threads(machine, NULL, target, threads,
1791                                              perf_event__process, data_mmap,
1792                                              nr_threads_synthesize);
1793 }
1794
1795 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1796 {
1797         struct perf_record_event_update *ev;
1798
1799         size += sizeof(*ev);
1800         size  = PERF_ALIGN(size, sizeof(u64));
1801
1802         ev = zalloc(size);
1803         if (ev) {
1804                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
1805                 ev->header.size = (u16)size;
1806                 ev->type        = type;
1807                 ev->id          = id;
1808         }
1809         return ev;
1810 }
1811
1812 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1813                                              perf_event__handler_t process)
1814 {
1815         size_t size = strlen(evsel->unit);
1816         struct perf_record_event_update *ev;
1817         int err;
1818
1819         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1820         if (ev == NULL)
1821                 return -ENOMEM;
1822
1823         strlcpy(ev->data, evsel->unit, size + 1);
1824         err = process(tool, (union perf_event *)ev, NULL, NULL);
1825         free(ev);
1826         return err;
1827 }
1828
1829 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1830                                               perf_event__handler_t process)
1831 {
1832         struct perf_record_event_update *ev;
1833         struct perf_record_event_update_scale *ev_data;
1834         int err;
1835
1836         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1837         if (ev == NULL)
1838                 return -ENOMEM;
1839
1840         ev_data = (struct perf_record_event_update_scale *)ev->data;
1841         ev_data->scale = evsel->scale;
1842         err = process(tool, (union perf_event *)ev, NULL, NULL);
1843         free(ev);
1844         return err;
1845 }
1846
1847 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1848                                              perf_event__handler_t process)
1849 {
1850         struct perf_record_event_update *ev;
1851         size_t len = strlen(evsel->name);
1852         int err;
1853
1854         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1855         if (ev == NULL)
1856                 return -ENOMEM;
1857
1858         strlcpy(ev->data, evsel->name, len + 1);
1859         err = process(tool, (union perf_event *)ev, NULL, NULL);
1860         free(ev);
1861         return err;
1862 }
1863
1864 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1865                                              perf_event__handler_t process)
1866 {
1867         size_t size = sizeof(struct perf_record_event_update);
1868         struct perf_record_event_update *ev;
1869         int max, err;
1870         u16 type;
1871
1872         if (!evsel->core.own_cpus)
1873                 return 0;
1874
1875         ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1876         if (!ev)
1877                 return -ENOMEM;
1878
1879         ev->header.type = PERF_RECORD_EVENT_UPDATE;
1880         ev->header.size = (u16)size;
1881         ev->type        = PERF_EVENT_UPDATE__CPUS;
1882         ev->id          = evsel->core.id[0];
1883
1884         cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1885                                  evsel->core.own_cpus, type, max);
1886
1887         err = process(tool, (union perf_event *)ev, NULL, NULL);
1888         free(ev);
1889         return err;
1890 }
1891
1892 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1893                                  perf_event__handler_t process)
1894 {
1895         struct evsel *evsel;
1896         int err = 0;
1897
1898         evlist__for_each_entry(evlist, evsel) {
1899                 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1900                                                   evsel->core.id, process);
1901                 if (err) {
1902                         pr_debug("failed to create perf header attribute\n");
1903                         return err;
1904                 }
1905         }
1906
1907         return err;
1908 }
1909
1910 static bool has_unit(struct evsel *evsel)
1911 {
1912         return evsel->unit && *evsel->unit;
1913 }
1914
1915 static bool has_scale(struct evsel *evsel)
1916 {
1917         return evsel->scale != 1;
1918 }
1919
1920 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1921                                       perf_event__handler_t process, bool is_pipe)
1922 {
1923         struct evsel *evsel;
1924         int err;
1925
1926         /*
1927          * Synthesize other events stuff not carried within
1928          * attr event - unit, scale, name
1929          */
1930         evlist__for_each_entry(evsel_list, evsel) {
1931                 if (!evsel->supported)
1932                         continue;
1933
1934                 /*
1935                  * Synthesize unit and scale only if it's defined.
1936                  */
1937                 if (has_unit(evsel)) {
1938                         err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1939                         if (err < 0) {
1940                                 pr_err("Couldn't synthesize evsel unit.\n");
1941                                 return err;
1942                         }
1943                 }
1944
1945                 if (has_scale(evsel)) {
1946                         err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1947                         if (err < 0) {
1948                                 pr_err("Couldn't synthesize evsel evsel.\n");
1949                                 return err;
1950                         }
1951                 }
1952
1953                 if (evsel->core.own_cpus) {
1954                         err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1955                         if (err < 0) {
1956                                 pr_err("Couldn't synthesize evsel cpus.\n");
1957                                 return err;
1958                         }
1959                 }
1960
1961                 /*
1962                  * Name is needed only for pipe output,
1963                  * perf.data carries event names.
1964                  */
1965                 if (is_pipe) {
1966                         err = perf_event__synthesize_event_update_name(tool, evsel, process);
1967                         if (err < 0) {
1968                                 pr_err("Couldn't synthesize evsel name.\n");
1969                                 return err;
1970                         }
1971                 }
1972         }
1973         return 0;
1974 }
1975
1976 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1977                                 u32 ids, u64 *id, perf_event__handler_t process)
1978 {
1979         union perf_event *ev;
1980         size_t size;
1981         int err;
1982
1983         size = sizeof(struct perf_event_attr);
1984         size = PERF_ALIGN(size, sizeof(u64));
1985         size += sizeof(struct perf_event_header);
1986         size += ids * sizeof(u64);
1987
1988         ev = zalloc(size);
1989
1990         if (ev == NULL)
1991                 return -ENOMEM;
1992
1993         ev->attr.attr = *attr;
1994         memcpy(ev->attr.id, id, ids * sizeof(u64));
1995
1996         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
1997         ev->attr.header.size = (u16)size;
1998
1999         if (ev->attr.header.size == size)
2000                 err = process(tool, ev, NULL, NULL);
2001         else
2002                 err = -E2BIG;
2003
2004         free(ev);
2005
2006         return err;
2007 }
2008
2009 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2010                                         perf_event__handler_t process)
2011 {
2012         union perf_event ev;
2013         struct tracing_data *tdata;
2014         ssize_t size = 0, aligned_size = 0, padding;
2015         struct feat_fd ff;
2016
2017         /*
2018          * We are going to store the size of the data followed
2019          * by the data contents. Since the fd descriptor is a pipe,
2020          * we cannot seek back to store the size of the data once
2021          * we know it. Instead we:
2022          *
2023          * - write the tracing data to the temp file
2024          * - get/write the data size to pipe
2025          * - write the tracing data from the temp file
2026          *   to the pipe
2027          */
2028         tdata = tracing_data_get(&evlist->core.entries, fd, true);
2029         if (!tdata)
2030                 return -1;
2031
2032         memset(&ev, 0, sizeof(ev));
2033
2034         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2035         size = tdata->size;
2036         aligned_size = PERF_ALIGN(size, sizeof(u64));
2037         padding = aligned_size - size;
2038         ev.tracing_data.header.size = sizeof(ev.tracing_data);
2039         ev.tracing_data.size = aligned_size;
2040
2041         process(tool, &ev, NULL, NULL);
2042
2043         /*
2044          * The put function will copy all the tracing data
2045          * stored in temp file to the pipe.
2046          */
2047         tracing_data_put(tdata);
2048
2049         ff = (struct feat_fd){ .fd = fd };
2050         if (write_padded(&ff, NULL, 0, padding))
2051                 return -1;
2052
2053         return aligned_size;
2054 }
2055
2056 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2057                                     perf_event__handler_t process, struct machine *machine)
2058 {
2059         union perf_event ev;
2060         size_t len;
2061
2062         if (!pos->hit)
2063                 return 0;
2064
2065         memset(&ev, 0, sizeof(ev));
2066
2067         len = pos->long_name_len + 1;
2068         len = PERF_ALIGN(len, NAME_ALIGN);
2069         memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
2070         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2071         ev.build_id.header.misc = misc;
2072         ev.build_id.pid = machine->pid;
2073         ev.build_id.header.size = sizeof(ev.build_id) + len;
2074         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2075
2076         return process(tool, &ev, NULL, machine);
2077 }
2078
2079 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2080                                        struct evlist *evlist, perf_event__handler_t process, bool attrs)
2081 {
2082         int err;
2083
2084         if (attrs) {
2085                 err = perf_event__synthesize_attrs(tool, evlist, process);
2086                 if (err < 0) {
2087                         pr_err("Couldn't synthesize attrs.\n");
2088                         return err;
2089                 }
2090         }
2091
2092         err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2093         err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2094         if (err < 0) {
2095                 pr_err("Couldn't synthesize thread map.\n");
2096                 return err;
2097         }
2098
2099         err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
2100         if (err < 0) {
2101                 pr_err("Couldn't synthesize thread map.\n");
2102                 return err;
2103         }
2104
2105         err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2106         if (err < 0) {
2107                 pr_err("Couldn't synthesize config.\n");
2108                 return err;
2109         }
2110
2111         return 0;
2112 }
2113
2114 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2115
2116 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2117                                     struct evlist *evlist, perf_event__handler_t process)
2118 {
2119         struct perf_header *header = &session->header;
2120         struct perf_record_header_feature *fe;
2121         struct feat_fd ff;
2122         size_t sz, sz_hdr;
2123         int feat, ret;
2124
2125         sz_hdr = sizeof(fe->header);
2126         sz = sizeof(union perf_event);
2127         /* get a nice alignment */
2128         sz = PERF_ALIGN(sz, page_size);
2129
2130         memset(&ff, 0, sizeof(ff));
2131
2132         ff.buf = malloc(sz);
2133         if (!ff.buf)
2134                 return -ENOMEM;
2135
2136         ff.size = sz - sz_hdr;
2137         ff.ph = &session->header;
2138
2139         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2140                 if (!feat_ops[feat].synthesize) {
2141                         pr_debug("No record header feature for header :%d\n", feat);
2142                         continue;
2143                 }
2144
2145                 ff.offset = sizeof(*fe);
2146
2147                 ret = feat_ops[feat].write(&ff, evlist);
2148                 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2149                         pr_debug("Error writing feature\n");
2150                         continue;
2151                 }
2152                 /* ff.buf may have changed due to realloc in do_write() */
2153                 fe = ff.buf;
2154                 memset(fe, 0, sizeof(*fe));
2155
2156                 fe->feat_id = feat;
2157                 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2158                 fe->header.size = ff.offset;
2159
2160                 ret = process(tool, ff.buf, NULL, NULL);
2161                 if (ret) {
2162                         free(ff.buf);
2163                         return ret;
2164                 }
2165         }
2166
2167         /* Send HEADER_LAST_FEATURE mark. */
2168         fe = ff.buf;
2169         fe->feat_id     = HEADER_LAST_FEATURE;
2170         fe->header.type = PERF_RECORD_HEADER_FEATURE;
2171         fe->header.size = sizeof(*fe);
2172
2173         ret = process(tool, ff.buf, NULL, NULL);
2174
2175         free(ff.buf);
2176         return ret;
2177 }