perf tools: Rename struct perf_data_file to perf_data
[linux-2.6-microblaze.git] / tools / perf / util / header.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include "util.h"
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <linux/compiler.h>
12 #include <linux/list.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/stringify.h>
16 #include <sys/stat.h>
17 #include <sys/types.h>
18 #include <sys/utsname.h>
19 #include <unistd.h>
20
21 #include "evlist.h"
22 #include "evsel.h"
23 #include "header.h"
24 #include "memswap.h"
25 #include "../perf.h"
26 #include "trace-event.h"
27 #include "session.h"
28 #include "symbol.h"
29 #include "debug.h"
30 #include "cpumap.h"
31 #include "pmu.h"
32 #include "vdso.h"
33 #include "strbuf.h"
34 #include "build-id.h"
35 #include "data.h"
36 #include <api/fs/fs.h>
37 #include "asm/bug.h"
38 #include "tool.h"
39
40 #include "sane_ctype.h"
41
42 /*
43  * magic2 = "PERFILE2"
44  * must be a numerical value to let the endianness
45  * determine the memory layout. That way we are able
46  * to detect endianness when reading the perf.data file
47  * back.
48  *
49  * we check for legacy (PERFFILE) format.
50  */
51 static const char *__perf_magic1 = "PERFFILE";
52 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
53 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
54
55 #define PERF_MAGIC      __perf_magic2
56
57 const char perf_version_string[] = PERF_VERSION;
58
59 struct perf_file_attr {
60         struct perf_event_attr  attr;
61         struct perf_file_section        ids;
62 };
63
64 struct feat_fd {
65         struct perf_header      *ph;
66         int                     fd;
67         void                    *buf;   /* Either buf != NULL or fd >= 0 */
68         ssize_t                 offset;
69         size_t                  size;
70         struct perf_evsel       *events;
71 };
72
73 void perf_header__set_feat(struct perf_header *header, int feat)
74 {
75         set_bit(feat, header->adds_features);
76 }
77
78 void perf_header__clear_feat(struct perf_header *header, int feat)
79 {
80         clear_bit(feat, header->adds_features);
81 }
82
83 bool perf_header__has_feat(const struct perf_header *header, int feat)
84 {
85         return test_bit(feat, header->adds_features);
86 }
87
88 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
89 {
90         ssize_t ret = writen(ff->fd, buf, size);
91
92         if (ret != (ssize_t)size)
93                 return ret < 0 ? (int)ret : -1;
94         return 0;
95 }
96
97 static int __do_write_buf(struct feat_fd *ff,  const void *buf, size_t size)
98 {
99         /* struct perf_event_header::size is u16 */
100         const size_t max_size = 0xffff - sizeof(struct perf_event_header);
101         size_t new_size = ff->size;
102         void *addr;
103
104         if (size + ff->offset > max_size)
105                 return -E2BIG;
106
107         while (size > (new_size - ff->offset))
108                 new_size <<= 1;
109         new_size = min(max_size, new_size);
110
111         if (ff->size < new_size) {
112                 addr = realloc(ff->buf, new_size);
113                 if (!addr)
114                         return -ENOMEM;
115                 ff->buf = addr;
116                 ff->size = new_size;
117         }
118
119         memcpy(ff->buf + ff->offset, buf, size);
120         ff->offset += size;
121
122         return 0;
123 }
124
125 /* Return: 0 if succeded, -ERR if failed. */
126 int do_write(struct feat_fd *ff, const void *buf, size_t size)
127 {
128         if (!ff->buf)
129                 return __do_write_fd(ff, buf, size);
130         return __do_write_buf(ff, buf, size);
131 }
132
133 /* Return: 0 if succeded, -ERR if failed. */
134 int write_padded(struct feat_fd *ff, const void *bf,
135                  size_t count, size_t count_aligned)
136 {
137         static const char zero_buf[NAME_ALIGN];
138         int err = do_write(ff, bf, count);
139
140         if (!err)
141                 err = do_write(ff, zero_buf, count_aligned - count);
142
143         return err;
144 }
145
146 #define string_size(str)                                                \
147         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
148
149 /* Return: 0 if succeded, -ERR if failed. */
150 static int do_write_string(struct feat_fd *ff, const char *str)
151 {
152         u32 len, olen;
153         int ret;
154
155         olen = strlen(str) + 1;
156         len = PERF_ALIGN(olen, NAME_ALIGN);
157
158         /* write len, incl. \0 */
159         ret = do_write(ff, &len, sizeof(len));
160         if (ret < 0)
161                 return ret;
162
163         return write_padded(ff, str, olen, len);
164 }
165
166 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
167 {
168         ssize_t ret = readn(ff->fd, addr, size);
169
170         if (ret != size)
171                 return ret < 0 ? (int)ret : -1;
172         return 0;
173 }
174
175 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
176 {
177         if (size > (ssize_t)ff->size - ff->offset)
178                 return -1;
179
180         memcpy(addr, ff->buf + ff->offset, size);
181         ff->offset += size;
182
183         return 0;
184
185 }
186
187 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
188 {
189         if (!ff->buf)
190                 return __do_read_fd(ff, addr, size);
191         return __do_read_buf(ff, addr, size);
192 }
193
194 static int do_read_u32(struct feat_fd *ff, u32 *addr)
195 {
196         int ret;
197
198         ret = __do_read(ff, addr, sizeof(*addr));
199         if (ret)
200                 return ret;
201
202         if (ff->ph->needs_swap)
203                 *addr = bswap_32(*addr);
204         return 0;
205 }
206
207 static int do_read_u64(struct feat_fd *ff, u64 *addr)
208 {
209         int ret;
210
211         ret = __do_read(ff, addr, sizeof(*addr));
212         if (ret)
213                 return ret;
214
215         if (ff->ph->needs_swap)
216                 *addr = bswap_64(*addr);
217         return 0;
218 }
219
220 static char *do_read_string(struct feat_fd *ff)
221 {
222         u32 len;
223         char *buf;
224
225         if (do_read_u32(ff, &len))
226                 return NULL;
227
228         buf = malloc(len);
229         if (!buf)
230                 return NULL;
231
232         if (!__do_read(ff, buf, len)) {
233                 /*
234                  * strings are padded by zeroes
235                  * thus the actual strlen of buf
236                  * may be less than len
237                  */
238                 return buf;
239         }
240
241         free(buf);
242         return NULL;
243 }
244
245 static int write_tracing_data(struct feat_fd *ff,
246                               struct perf_evlist *evlist)
247 {
248         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
249                 return -1;
250
251         return read_tracing_data(ff->fd, &evlist->entries);
252 }
253
254 static int write_build_id(struct feat_fd *ff,
255                           struct perf_evlist *evlist __maybe_unused)
256 {
257         struct perf_session *session;
258         int err;
259
260         session = container_of(ff->ph, struct perf_session, header);
261
262         if (!perf_session__read_build_ids(session, true))
263                 return -1;
264
265         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
266                 return -1;
267
268         err = perf_session__write_buildid_table(session, ff);
269         if (err < 0) {
270                 pr_debug("failed to write buildid table\n");
271                 return err;
272         }
273         perf_session__cache_build_ids(session);
274
275         return 0;
276 }
277
278 static int write_hostname(struct feat_fd *ff,
279                           struct perf_evlist *evlist __maybe_unused)
280 {
281         struct utsname uts;
282         int ret;
283
284         ret = uname(&uts);
285         if (ret < 0)
286                 return -1;
287
288         return do_write_string(ff, uts.nodename);
289 }
290
291 static int write_osrelease(struct feat_fd *ff,
292                            struct perf_evlist *evlist __maybe_unused)
293 {
294         struct utsname uts;
295         int ret;
296
297         ret = uname(&uts);
298         if (ret < 0)
299                 return -1;
300
301         return do_write_string(ff, uts.release);
302 }
303
304 static int write_arch(struct feat_fd *ff,
305                       struct perf_evlist *evlist __maybe_unused)
306 {
307         struct utsname uts;
308         int ret;
309
310         ret = uname(&uts);
311         if (ret < 0)
312                 return -1;
313
314         return do_write_string(ff, uts.machine);
315 }
316
317 static int write_version(struct feat_fd *ff,
318                          struct perf_evlist *evlist __maybe_unused)
319 {
320         return do_write_string(ff, perf_version_string);
321 }
322
323 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
324 {
325         FILE *file;
326         char *buf = NULL;
327         char *s, *p;
328         const char *search = cpuinfo_proc;
329         size_t len = 0;
330         int ret = -1;
331
332         if (!search)
333                 return -1;
334
335         file = fopen("/proc/cpuinfo", "r");
336         if (!file)
337                 return -1;
338
339         while (getline(&buf, &len, file) > 0) {
340                 ret = strncmp(buf, search, strlen(search));
341                 if (!ret)
342                         break;
343         }
344
345         if (ret) {
346                 ret = -1;
347                 goto done;
348         }
349
350         s = buf;
351
352         p = strchr(buf, ':');
353         if (p && *(p+1) == ' ' && *(p+2))
354                 s = p + 2;
355         p = strchr(s, '\n');
356         if (p)
357                 *p = '\0';
358
359         /* squash extra space characters (branding string) */
360         p = s;
361         while (*p) {
362                 if (isspace(*p)) {
363                         char *r = p + 1;
364                         char *q = r;
365                         *p = ' ';
366                         while (*q && isspace(*q))
367                                 q++;
368                         if (q != (p+1))
369                                 while ((*r++ = *q++));
370                 }
371                 p++;
372         }
373         ret = do_write_string(ff, s);
374 done:
375         free(buf);
376         fclose(file);
377         return ret;
378 }
379
380 static int write_cpudesc(struct feat_fd *ff,
381                        struct perf_evlist *evlist __maybe_unused)
382 {
383         const char *cpuinfo_procs[] = CPUINFO_PROC;
384         unsigned int i;
385
386         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
387                 int ret;
388                 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
389                 if (ret >= 0)
390                         return ret;
391         }
392         return -1;
393 }
394
395
396 static int write_nrcpus(struct feat_fd *ff,
397                         struct perf_evlist *evlist __maybe_unused)
398 {
399         long nr;
400         u32 nrc, nra;
401         int ret;
402
403         nrc = cpu__max_present_cpu();
404
405         nr = sysconf(_SC_NPROCESSORS_ONLN);
406         if (nr < 0)
407                 return -1;
408
409         nra = (u32)(nr & UINT_MAX);
410
411         ret = do_write(ff, &nrc, sizeof(nrc));
412         if (ret < 0)
413                 return ret;
414
415         return do_write(ff, &nra, sizeof(nra));
416 }
417
418 static int write_event_desc(struct feat_fd *ff,
419                             struct perf_evlist *evlist)
420 {
421         struct perf_evsel *evsel;
422         u32 nre, nri, sz;
423         int ret;
424
425         nre = evlist->nr_entries;
426
427         /*
428          * write number of events
429          */
430         ret = do_write(ff, &nre, sizeof(nre));
431         if (ret < 0)
432                 return ret;
433
434         /*
435          * size of perf_event_attr struct
436          */
437         sz = (u32)sizeof(evsel->attr);
438         ret = do_write(ff, &sz, sizeof(sz));
439         if (ret < 0)
440                 return ret;
441
442         evlist__for_each_entry(evlist, evsel) {
443                 ret = do_write(ff, &evsel->attr, sz);
444                 if (ret < 0)
445                         return ret;
446                 /*
447                  * write number of unique id per event
448                  * there is one id per instance of an event
449                  *
450                  * copy into an nri to be independent of the
451                  * type of ids,
452                  */
453                 nri = evsel->ids;
454                 ret = do_write(ff, &nri, sizeof(nri));
455                 if (ret < 0)
456                         return ret;
457
458                 /*
459                  * write event string as passed on cmdline
460                  */
461                 ret = do_write_string(ff, perf_evsel__name(evsel));
462                 if (ret < 0)
463                         return ret;
464                 /*
465                  * write unique ids for this event
466                  */
467                 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
468                 if (ret < 0)
469                         return ret;
470         }
471         return 0;
472 }
473
474 static int write_cmdline(struct feat_fd *ff,
475                          struct perf_evlist *evlist __maybe_unused)
476 {
477         char buf[MAXPATHLEN];
478         u32 n;
479         int i, ret;
480
481         /* actual path to perf binary */
482         ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
483         if (ret <= 0)
484                 return -1;
485
486         /* readlink() does not add null termination */
487         buf[ret] = '\0';
488
489         /* account for binary path */
490         n = perf_env.nr_cmdline + 1;
491
492         ret = do_write(ff, &n, sizeof(n));
493         if (ret < 0)
494                 return ret;
495
496         ret = do_write_string(ff, buf);
497         if (ret < 0)
498                 return ret;
499
500         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
501                 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
502                 if (ret < 0)
503                         return ret;
504         }
505         return 0;
506 }
507
508 #define CORE_SIB_FMT \
509         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
510 #define THRD_SIB_FMT \
511         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
512
513 struct cpu_topo {
514         u32 cpu_nr;
515         u32 core_sib;
516         u32 thread_sib;
517         char **core_siblings;
518         char **thread_siblings;
519 };
520
521 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
522 {
523         FILE *fp;
524         char filename[MAXPATHLEN];
525         char *buf = NULL, *p;
526         size_t len = 0;
527         ssize_t sret;
528         u32 i = 0;
529         int ret = -1;
530
531         sprintf(filename, CORE_SIB_FMT, cpu);
532         fp = fopen(filename, "r");
533         if (!fp)
534                 goto try_threads;
535
536         sret = getline(&buf, &len, fp);
537         fclose(fp);
538         if (sret <= 0)
539                 goto try_threads;
540
541         p = strchr(buf, '\n');
542         if (p)
543                 *p = '\0';
544
545         for (i = 0; i < tp->core_sib; i++) {
546                 if (!strcmp(buf, tp->core_siblings[i]))
547                         break;
548         }
549         if (i == tp->core_sib) {
550                 tp->core_siblings[i] = buf;
551                 tp->core_sib++;
552                 buf = NULL;
553                 len = 0;
554         }
555         ret = 0;
556
557 try_threads:
558         sprintf(filename, THRD_SIB_FMT, cpu);
559         fp = fopen(filename, "r");
560         if (!fp)
561                 goto done;
562
563         if (getline(&buf, &len, fp) <= 0)
564                 goto done;
565
566         p = strchr(buf, '\n');
567         if (p)
568                 *p = '\0';
569
570         for (i = 0; i < tp->thread_sib; i++) {
571                 if (!strcmp(buf, tp->thread_siblings[i]))
572                         break;
573         }
574         if (i == tp->thread_sib) {
575                 tp->thread_siblings[i] = buf;
576                 tp->thread_sib++;
577                 buf = NULL;
578         }
579         ret = 0;
580 done:
581         if(fp)
582                 fclose(fp);
583         free(buf);
584         return ret;
585 }
586
587 static void free_cpu_topo(struct cpu_topo *tp)
588 {
589         u32 i;
590
591         if (!tp)
592                 return;
593
594         for (i = 0 ; i < tp->core_sib; i++)
595                 zfree(&tp->core_siblings[i]);
596
597         for (i = 0 ; i < tp->thread_sib; i++)
598                 zfree(&tp->thread_siblings[i]);
599
600         free(tp);
601 }
602
603 static struct cpu_topo *build_cpu_topology(void)
604 {
605         struct cpu_topo *tp = NULL;
606         void *addr;
607         u32 nr, i;
608         size_t sz;
609         long ncpus;
610         int ret = -1;
611         struct cpu_map *map;
612
613         ncpus = cpu__max_present_cpu();
614
615         /* build online CPU map */
616         map = cpu_map__new(NULL);
617         if (map == NULL) {
618                 pr_debug("failed to get system cpumap\n");
619                 return NULL;
620         }
621
622         nr = (u32)(ncpus & UINT_MAX);
623
624         sz = nr * sizeof(char *);
625         addr = calloc(1, sizeof(*tp) + 2 * sz);
626         if (!addr)
627                 goto out_free;
628
629         tp = addr;
630         tp->cpu_nr = nr;
631         addr += sizeof(*tp);
632         tp->core_siblings = addr;
633         addr += sz;
634         tp->thread_siblings = addr;
635
636         for (i = 0; i < nr; i++) {
637                 if (!cpu_map__has(map, i))
638                         continue;
639
640                 ret = build_cpu_topo(tp, i);
641                 if (ret < 0)
642                         break;
643         }
644
645 out_free:
646         cpu_map__put(map);
647         if (ret) {
648                 free_cpu_topo(tp);
649                 tp = NULL;
650         }
651         return tp;
652 }
653
654 static int write_cpu_topology(struct feat_fd *ff,
655                               struct perf_evlist *evlist __maybe_unused)
656 {
657         struct cpu_topo *tp;
658         u32 i;
659         int ret, j;
660
661         tp = build_cpu_topology();
662         if (!tp)
663                 return -1;
664
665         ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
666         if (ret < 0)
667                 goto done;
668
669         for (i = 0; i < tp->core_sib; i++) {
670                 ret = do_write_string(ff, tp->core_siblings[i]);
671                 if (ret < 0)
672                         goto done;
673         }
674         ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
675         if (ret < 0)
676                 goto done;
677
678         for (i = 0; i < tp->thread_sib; i++) {
679                 ret = do_write_string(ff, tp->thread_siblings[i]);
680                 if (ret < 0)
681                         break;
682         }
683
684         ret = perf_env__read_cpu_topology_map(&perf_env);
685         if (ret < 0)
686                 goto done;
687
688         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
689                 ret = do_write(ff, &perf_env.cpu[j].core_id,
690                                sizeof(perf_env.cpu[j].core_id));
691                 if (ret < 0)
692                         return ret;
693                 ret = do_write(ff, &perf_env.cpu[j].socket_id,
694                                sizeof(perf_env.cpu[j].socket_id));
695                 if (ret < 0)
696                         return ret;
697         }
698 done:
699         free_cpu_topo(tp);
700         return ret;
701 }
702
703
704
705 static int write_total_mem(struct feat_fd *ff,
706                            struct perf_evlist *evlist __maybe_unused)
707 {
708         char *buf = NULL;
709         FILE *fp;
710         size_t len = 0;
711         int ret = -1, n;
712         uint64_t mem;
713
714         fp = fopen("/proc/meminfo", "r");
715         if (!fp)
716                 return -1;
717
718         while (getline(&buf, &len, fp) > 0) {
719                 ret = strncmp(buf, "MemTotal:", 9);
720                 if (!ret)
721                         break;
722         }
723         if (!ret) {
724                 n = sscanf(buf, "%*s %"PRIu64, &mem);
725                 if (n == 1)
726                         ret = do_write(ff, &mem, sizeof(mem));
727         } else
728                 ret = -1;
729         free(buf);
730         fclose(fp);
731         return ret;
732 }
733
734 static int write_topo_node(struct feat_fd *ff, int node)
735 {
736         char str[MAXPATHLEN];
737         char field[32];
738         char *buf = NULL, *p;
739         size_t len = 0;
740         FILE *fp;
741         u64 mem_total, mem_free, mem;
742         int ret = -1;
743
744         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
745         fp = fopen(str, "r");
746         if (!fp)
747                 return -1;
748
749         while (getline(&buf, &len, fp) > 0) {
750                 /* skip over invalid lines */
751                 if (!strchr(buf, ':'))
752                         continue;
753                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
754                         goto done;
755                 if (!strcmp(field, "MemTotal:"))
756                         mem_total = mem;
757                 if (!strcmp(field, "MemFree:"))
758                         mem_free = mem;
759         }
760
761         fclose(fp);
762         fp = NULL;
763
764         ret = do_write(ff, &mem_total, sizeof(u64));
765         if (ret)
766                 goto done;
767
768         ret = do_write(ff, &mem_free, sizeof(u64));
769         if (ret)
770                 goto done;
771
772         ret = -1;
773         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
774
775         fp = fopen(str, "r");
776         if (!fp)
777                 goto done;
778
779         if (getline(&buf, &len, fp) <= 0)
780                 goto done;
781
782         p = strchr(buf, '\n');
783         if (p)
784                 *p = '\0';
785
786         ret = do_write_string(ff, buf);
787 done:
788         free(buf);
789         if (fp)
790                 fclose(fp);
791         return ret;
792 }
793
794 static int write_numa_topology(struct feat_fd *ff,
795                                struct perf_evlist *evlist __maybe_unused)
796 {
797         char *buf = NULL;
798         size_t len = 0;
799         FILE *fp;
800         struct cpu_map *node_map = NULL;
801         char *c;
802         u32 nr, i, j;
803         int ret = -1;
804
805         fp = fopen("/sys/devices/system/node/online", "r");
806         if (!fp)
807                 return -1;
808
809         if (getline(&buf, &len, fp) <= 0)
810                 goto done;
811
812         c = strchr(buf, '\n');
813         if (c)
814                 *c = '\0';
815
816         node_map = cpu_map__new(buf);
817         if (!node_map)
818                 goto done;
819
820         nr = (u32)node_map->nr;
821
822         ret = do_write(ff, &nr, sizeof(nr));
823         if (ret < 0)
824                 goto done;
825
826         for (i = 0; i < nr; i++) {
827                 j = (u32)node_map->map[i];
828                 ret = do_write(ff, &j, sizeof(j));
829                 if (ret < 0)
830                         break;
831
832                 ret = write_topo_node(ff, i);
833                 if (ret < 0)
834                         break;
835         }
836 done:
837         free(buf);
838         fclose(fp);
839         cpu_map__put(node_map);
840         return ret;
841 }
842
843 /*
844  * File format:
845  *
846  * struct pmu_mappings {
847  *      u32     pmu_num;
848  *      struct pmu_map {
849  *              u32     type;
850  *              char    name[];
851  *      }[pmu_num];
852  * };
853  */
854
855 static int write_pmu_mappings(struct feat_fd *ff,
856                               struct perf_evlist *evlist __maybe_unused)
857 {
858         struct perf_pmu *pmu = NULL;
859         u32 pmu_num = 0;
860         int ret;
861
862         /*
863          * Do a first pass to count number of pmu to avoid lseek so this
864          * works in pipe mode as well.
865          */
866         while ((pmu = perf_pmu__scan(pmu))) {
867                 if (!pmu->name)
868                         continue;
869                 pmu_num++;
870         }
871
872         ret = do_write(ff, &pmu_num, sizeof(pmu_num));
873         if (ret < 0)
874                 return ret;
875
876         while ((pmu = perf_pmu__scan(pmu))) {
877                 if (!pmu->name)
878                         continue;
879
880                 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
881                 if (ret < 0)
882                         return ret;
883
884                 ret = do_write_string(ff, pmu->name);
885                 if (ret < 0)
886                         return ret;
887         }
888
889         return 0;
890 }
891
892 /*
893  * File format:
894  *
895  * struct group_descs {
896  *      u32     nr_groups;
897  *      struct group_desc {
898  *              char    name[];
899  *              u32     leader_idx;
900  *              u32     nr_members;
901  *      }[nr_groups];
902  * };
903  */
904 static int write_group_desc(struct feat_fd *ff,
905                             struct perf_evlist *evlist)
906 {
907         u32 nr_groups = evlist->nr_groups;
908         struct perf_evsel *evsel;
909         int ret;
910
911         ret = do_write(ff, &nr_groups, sizeof(nr_groups));
912         if (ret < 0)
913                 return ret;
914
915         evlist__for_each_entry(evlist, evsel) {
916                 if (perf_evsel__is_group_leader(evsel) &&
917                     evsel->nr_members > 1) {
918                         const char *name = evsel->group_name ?: "{anon_group}";
919                         u32 leader_idx = evsel->idx;
920                         u32 nr_members = evsel->nr_members;
921
922                         ret = do_write_string(ff, name);
923                         if (ret < 0)
924                                 return ret;
925
926                         ret = do_write(ff, &leader_idx, sizeof(leader_idx));
927                         if (ret < 0)
928                                 return ret;
929
930                         ret = do_write(ff, &nr_members, sizeof(nr_members));
931                         if (ret < 0)
932                                 return ret;
933                 }
934         }
935         return 0;
936 }
937
938 /*
939  * default get_cpuid(): nothing gets recorded
940  * actual implementation must be in arch/$(SRCARCH)/util/header.c
941  */
942 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
943 {
944         return -1;
945 }
946
947 static int write_cpuid(struct feat_fd *ff,
948                        struct perf_evlist *evlist __maybe_unused)
949 {
950         char buffer[64];
951         int ret;
952
953         ret = get_cpuid(buffer, sizeof(buffer));
954         if (!ret)
955                 goto write_it;
956
957         return -1;
958 write_it:
959         return do_write_string(ff, buffer);
960 }
961
962 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
963                               struct perf_evlist *evlist __maybe_unused)
964 {
965         return 0;
966 }
967
968 static int write_auxtrace(struct feat_fd *ff,
969                           struct perf_evlist *evlist __maybe_unused)
970 {
971         struct perf_session *session;
972         int err;
973
974         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
975                 return -1;
976
977         session = container_of(ff->ph, struct perf_session, header);
978
979         err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
980         if (err < 0)
981                 pr_err("Failed to write auxtrace index\n");
982         return err;
983 }
984
985 static int cpu_cache_level__sort(const void *a, const void *b)
986 {
987         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
988         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
989
990         return cache_a->level - cache_b->level;
991 }
992
993 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
994 {
995         if (a->level != b->level)
996                 return false;
997
998         if (a->line_size != b->line_size)
999                 return false;
1000
1001         if (a->sets != b->sets)
1002                 return false;
1003
1004         if (a->ways != b->ways)
1005                 return false;
1006
1007         if (strcmp(a->type, b->type))
1008                 return false;
1009
1010         if (strcmp(a->size, b->size))
1011                 return false;
1012
1013         if (strcmp(a->map, b->map))
1014                 return false;
1015
1016         return true;
1017 }
1018
1019 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1020 {
1021         char path[PATH_MAX], file[PATH_MAX];
1022         struct stat st;
1023         size_t len;
1024
1025         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1026         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1027
1028         if (stat(file, &st))
1029                 return 1;
1030
1031         scnprintf(file, PATH_MAX, "%s/level", path);
1032         if (sysfs__read_int(file, (int *) &cache->level))
1033                 return -1;
1034
1035         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1036         if (sysfs__read_int(file, (int *) &cache->line_size))
1037                 return -1;
1038
1039         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1040         if (sysfs__read_int(file, (int *) &cache->sets))
1041                 return -1;
1042
1043         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1044         if (sysfs__read_int(file, (int *) &cache->ways))
1045                 return -1;
1046
1047         scnprintf(file, PATH_MAX, "%s/type", path);
1048         if (sysfs__read_str(file, &cache->type, &len))
1049                 return -1;
1050
1051         cache->type[len] = 0;
1052         cache->type = rtrim(cache->type);
1053
1054         scnprintf(file, PATH_MAX, "%s/size", path);
1055         if (sysfs__read_str(file, &cache->size, &len)) {
1056                 free(cache->type);
1057                 return -1;
1058         }
1059
1060         cache->size[len] = 0;
1061         cache->size = rtrim(cache->size);
1062
1063         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1064         if (sysfs__read_str(file, &cache->map, &len)) {
1065                 free(cache->map);
1066                 free(cache->type);
1067                 return -1;
1068         }
1069
1070         cache->map[len] = 0;
1071         cache->map = rtrim(cache->map);
1072         return 0;
1073 }
1074
1075 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1076 {
1077         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1078 }
1079
1080 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1081 {
1082         u32 i, cnt = 0;
1083         long ncpus;
1084         u32 nr, cpu;
1085         u16 level;
1086
1087         ncpus = sysconf(_SC_NPROCESSORS_CONF);
1088         if (ncpus < 0)
1089                 return -1;
1090
1091         nr = (u32)(ncpus & UINT_MAX);
1092
1093         for (cpu = 0; cpu < nr; cpu++) {
1094                 for (level = 0; level < 10; level++) {
1095                         struct cpu_cache_level c;
1096                         int err;
1097
1098                         err = cpu_cache_level__read(&c, cpu, level);
1099                         if (err < 0)
1100                                 return err;
1101
1102                         if (err == 1)
1103                                 break;
1104
1105                         for (i = 0; i < cnt; i++) {
1106                                 if (cpu_cache_level__cmp(&c, &caches[i]))
1107                                         break;
1108                         }
1109
1110                         if (i == cnt)
1111                                 caches[cnt++] = c;
1112                         else
1113                                 cpu_cache_level__free(&c);
1114
1115                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1116                                 goto out;
1117                 }
1118         }
1119  out:
1120         *cntp = cnt;
1121         return 0;
1122 }
1123
1124 #define MAX_CACHES 2000
1125
1126 static int write_cache(struct feat_fd *ff,
1127                        struct perf_evlist *evlist __maybe_unused)
1128 {
1129         struct cpu_cache_level caches[MAX_CACHES];
1130         u32 cnt = 0, i, version = 1;
1131         int ret;
1132
1133         ret = build_caches(caches, MAX_CACHES, &cnt);
1134         if (ret)
1135                 goto out;
1136
1137         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1138
1139         ret = do_write(ff, &version, sizeof(u32));
1140         if (ret < 0)
1141                 goto out;
1142
1143         ret = do_write(ff, &cnt, sizeof(u32));
1144         if (ret < 0)
1145                 goto out;
1146
1147         for (i = 0; i < cnt; i++) {
1148                 struct cpu_cache_level *c = &caches[i];
1149
1150                 #define _W(v)                                   \
1151                         ret = do_write(ff, &c->v, sizeof(u32)); \
1152                         if (ret < 0)                            \
1153                                 goto out;
1154
1155                 _W(level)
1156                 _W(line_size)
1157                 _W(sets)
1158                 _W(ways)
1159                 #undef _W
1160
1161                 #define _W(v)                                           \
1162                         ret = do_write_string(ff, (const char *) c->v); \
1163                         if (ret < 0)                                    \
1164                                 goto out;
1165
1166                 _W(type)
1167                 _W(size)
1168                 _W(map)
1169                 #undef _W
1170         }
1171
1172 out:
1173         for (i = 0; i < cnt; i++)
1174                 cpu_cache_level__free(&caches[i]);
1175         return ret;
1176 }
1177
1178 static int write_stat(struct feat_fd *ff __maybe_unused,
1179                       struct perf_evlist *evlist __maybe_unused)
1180 {
1181         return 0;
1182 }
1183
1184 static void print_hostname(struct feat_fd *ff, FILE *fp)
1185 {
1186         fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1187 }
1188
1189 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1190 {
1191         fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1192 }
1193
1194 static void print_arch(struct feat_fd *ff, FILE *fp)
1195 {
1196         fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1197 }
1198
1199 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1200 {
1201         fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1202 }
1203
1204 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1205 {
1206         fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1207         fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1208 }
1209
1210 static void print_version(struct feat_fd *ff, FILE *fp)
1211 {
1212         fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1213 }
1214
1215 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1216 {
1217         int nr, i;
1218
1219         nr = ff->ph->env.nr_cmdline;
1220
1221         fprintf(fp, "# cmdline : ");
1222
1223         for (i = 0; i < nr; i++)
1224                 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1225         fputc('\n', fp);
1226 }
1227
1228 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1229 {
1230         struct perf_header *ph = ff->ph;
1231         int cpu_nr = ph->env.nr_cpus_avail;
1232         int nr, i;
1233         char *str;
1234
1235         nr = ph->env.nr_sibling_cores;
1236         str = ph->env.sibling_cores;
1237
1238         for (i = 0; i < nr; i++) {
1239                 fprintf(fp, "# sibling cores   : %s\n", str);
1240                 str += strlen(str) + 1;
1241         }
1242
1243         nr = ph->env.nr_sibling_threads;
1244         str = ph->env.sibling_threads;
1245
1246         for (i = 0; i < nr; i++) {
1247                 fprintf(fp, "# sibling threads : %s\n", str);
1248                 str += strlen(str) + 1;
1249         }
1250
1251         if (ph->env.cpu != NULL) {
1252                 for (i = 0; i < cpu_nr; i++)
1253                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1254                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1255         } else
1256                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1257 }
1258
1259 static void free_event_desc(struct perf_evsel *events)
1260 {
1261         struct perf_evsel *evsel;
1262
1263         if (!events)
1264                 return;
1265
1266         for (evsel = events; evsel->attr.size; evsel++) {
1267                 zfree(&evsel->name);
1268                 zfree(&evsel->id);
1269         }
1270
1271         free(events);
1272 }
1273
1274 static struct perf_evsel *read_event_desc(struct feat_fd *ff)
1275 {
1276         struct perf_evsel *evsel, *events = NULL;
1277         u64 *id;
1278         void *buf = NULL;
1279         u32 nre, sz, nr, i, j;
1280         size_t msz;
1281
1282         /* number of events */
1283         if (do_read_u32(ff, &nre))
1284                 goto error;
1285
1286         if (do_read_u32(ff, &sz))
1287                 goto error;
1288
1289         /* buffer to hold on file attr struct */
1290         buf = malloc(sz);
1291         if (!buf)
1292                 goto error;
1293
1294         /* the last event terminates with evsel->attr.size == 0: */
1295         events = calloc(nre + 1, sizeof(*events));
1296         if (!events)
1297                 goto error;
1298
1299         msz = sizeof(evsel->attr);
1300         if (sz < msz)
1301                 msz = sz;
1302
1303         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1304                 evsel->idx = i;
1305
1306                 /*
1307                  * must read entire on-file attr struct to
1308                  * sync up with layout.
1309                  */
1310                 if (__do_read(ff, buf, sz))
1311                         goto error;
1312
1313                 if (ff->ph->needs_swap)
1314                         perf_event__attr_swap(buf);
1315
1316                 memcpy(&evsel->attr, buf, msz);
1317
1318                 if (do_read_u32(ff, &nr))
1319                         goto error;
1320
1321                 if (ff->ph->needs_swap)
1322                         evsel->needs_swap = true;
1323
1324                 evsel->name = do_read_string(ff);
1325                 if (!evsel->name)
1326                         goto error;
1327
1328                 if (!nr)
1329                         continue;
1330
1331                 id = calloc(nr, sizeof(*id));
1332                 if (!id)
1333                         goto error;
1334                 evsel->ids = nr;
1335                 evsel->id = id;
1336
1337                 for (j = 0 ; j < nr; j++) {
1338                         if (do_read_u64(ff, id))
1339                                 goto error;
1340                         id++;
1341                 }
1342         }
1343 out:
1344         free(buf);
1345         return events;
1346 error:
1347         free_event_desc(events);
1348         events = NULL;
1349         goto out;
1350 }
1351
1352 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1353                                 void *priv __maybe_unused)
1354 {
1355         return fprintf(fp, ", %s = %s", name, val);
1356 }
1357
1358 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1359 {
1360         struct perf_evsel *evsel, *events;
1361         u32 j;
1362         u64 *id;
1363
1364         if (ff->events)
1365                 events = ff->events;
1366         else
1367                 events = read_event_desc(ff);
1368
1369         if (!events) {
1370                 fprintf(fp, "# event desc: not available or unable to read\n");
1371                 return;
1372         }
1373
1374         for (evsel = events; evsel->attr.size; evsel++) {
1375                 fprintf(fp, "# event : name = %s, ", evsel->name);
1376
1377                 if (evsel->ids) {
1378                         fprintf(fp, ", id = {");
1379                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1380                                 if (j)
1381                                         fputc(',', fp);
1382                                 fprintf(fp, " %"PRIu64, *id);
1383                         }
1384                         fprintf(fp, " }");
1385                 }
1386
1387                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1388
1389                 fputc('\n', fp);
1390         }
1391
1392         free_event_desc(events);
1393         ff->events = NULL;
1394 }
1395
1396 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1397 {
1398         fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1399 }
1400
1401 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1402 {
1403         int i;
1404         struct numa_node *n;
1405
1406         for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1407                 n = &ff->ph->env.numa_nodes[i];
1408
1409                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1410                             " free = %"PRIu64" kB\n",
1411                         n->node, n->mem_total, n->mem_free);
1412
1413                 fprintf(fp, "# node%u cpu list : ", n->node);
1414                 cpu_map__fprintf(n->map, fp);
1415         }
1416 }
1417
1418 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1419 {
1420         fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1421 }
1422
1423 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1424 {
1425         fprintf(fp, "# contains samples with branch stack\n");
1426 }
1427
1428 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1429 {
1430         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1431 }
1432
1433 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1434 {
1435         fprintf(fp, "# contains stat data\n");
1436 }
1437
1438 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1439 {
1440         int i;
1441
1442         fprintf(fp, "# CPU cache info:\n");
1443         for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1444                 fprintf(fp, "#  ");
1445                 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1446         }
1447 }
1448
1449 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1450 {
1451         const char *delimiter = "# pmu mappings: ";
1452         char *str, *tmp;
1453         u32 pmu_num;
1454         u32 type;
1455
1456         pmu_num = ff->ph->env.nr_pmu_mappings;
1457         if (!pmu_num) {
1458                 fprintf(fp, "# pmu mappings: not available\n");
1459                 return;
1460         }
1461
1462         str = ff->ph->env.pmu_mappings;
1463
1464         while (pmu_num) {
1465                 type = strtoul(str, &tmp, 0);
1466                 if (*tmp != ':')
1467                         goto error;
1468
1469                 str = tmp + 1;
1470                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1471
1472                 delimiter = ", ";
1473                 str += strlen(str) + 1;
1474                 pmu_num--;
1475         }
1476
1477         fprintf(fp, "\n");
1478
1479         if (!pmu_num)
1480                 return;
1481 error:
1482         fprintf(fp, "# pmu mappings: unable to read\n");
1483 }
1484
1485 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1486 {
1487         struct perf_session *session;
1488         struct perf_evsel *evsel;
1489         u32 nr = 0;
1490
1491         session = container_of(ff->ph, struct perf_session, header);
1492
1493         evlist__for_each_entry(session->evlist, evsel) {
1494                 if (perf_evsel__is_group_leader(evsel) &&
1495                     evsel->nr_members > 1) {
1496                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1497                                 perf_evsel__name(evsel));
1498
1499                         nr = evsel->nr_members - 1;
1500                 } else if (nr) {
1501                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1502
1503                         if (--nr == 0)
1504                                 fprintf(fp, "}\n");
1505                 }
1506         }
1507 }
1508
1509 static int __event_process_build_id(struct build_id_event *bev,
1510                                     char *filename,
1511                                     struct perf_session *session)
1512 {
1513         int err = -1;
1514         struct machine *machine;
1515         u16 cpumode;
1516         struct dso *dso;
1517         enum dso_kernel_type dso_type;
1518
1519         machine = perf_session__findnew_machine(session, bev->pid);
1520         if (!machine)
1521                 goto out;
1522
1523         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1524
1525         switch (cpumode) {
1526         case PERF_RECORD_MISC_KERNEL:
1527                 dso_type = DSO_TYPE_KERNEL;
1528                 break;
1529         case PERF_RECORD_MISC_GUEST_KERNEL:
1530                 dso_type = DSO_TYPE_GUEST_KERNEL;
1531                 break;
1532         case PERF_RECORD_MISC_USER:
1533         case PERF_RECORD_MISC_GUEST_USER:
1534                 dso_type = DSO_TYPE_USER;
1535                 break;
1536         default:
1537                 goto out;
1538         }
1539
1540         dso = machine__findnew_dso(machine, filename);
1541         if (dso != NULL) {
1542                 char sbuild_id[SBUILD_ID_SIZE];
1543
1544                 dso__set_build_id(dso, &bev->build_id);
1545
1546                 if (dso_type != DSO_TYPE_USER) {
1547                         struct kmod_path m = { .name = NULL, };
1548
1549                         if (!kmod_path__parse_name(&m, filename) && m.kmod)
1550                                 dso__set_module_info(dso, &m, machine);
1551                         else
1552                                 dso->kernel = dso_type;
1553
1554                         free(m.name);
1555                 }
1556
1557                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1558                                   sbuild_id);
1559                 pr_debug("build id event received for %s: %s\n",
1560                          dso->long_name, sbuild_id);
1561                 dso__put(dso);
1562         }
1563
1564         err = 0;
1565 out:
1566         return err;
1567 }
1568
1569 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1570                                                  int input, u64 offset, u64 size)
1571 {
1572         struct perf_session *session = container_of(header, struct perf_session, header);
1573         struct {
1574                 struct perf_event_header   header;
1575                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1576                 char                       filename[0];
1577         } old_bev;
1578         struct build_id_event bev;
1579         char filename[PATH_MAX];
1580         u64 limit = offset + size;
1581
1582         while (offset < limit) {
1583                 ssize_t len;
1584
1585                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1586                         return -1;
1587
1588                 if (header->needs_swap)
1589                         perf_event_header__bswap(&old_bev.header);
1590
1591                 len = old_bev.header.size - sizeof(old_bev);
1592                 if (readn(input, filename, len) != len)
1593                         return -1;
1594
1595                 bev.header = old_bev.header;
1596
1597                 /*
1598                  * As the pid is the missing value, we need to fill
1599                  * it properly. The header.misc value give us nice hint.
1600                  */
1601                 bev.pid = HOST_KERNEL_ID;
1602                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1603                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1604                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1605
1606                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1607                 __event_process_build_id(&bev, filename, session);
1608
1609                 offset += bev.header.size;
1610         }
1611
1612         return 0;
1613 }
1614
1615 static int perf_header__read_build_ids(struct perf_header *header,
1616                                        int input, u64 offset, u64 size)
1617 {
1618         struct perf_session *session = container_of(header, struct perf_session, header);
1619         struct build_id_event bev;
1620         char filename[PATH_MAX];
1621         u64 limit = offset + size, orig_offset = offset;
1622         int err = -1;
1623
1624         while (offset < limit) {
1625                 ssize_t len;
1626
1627                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1628                         goto out;
1629
1630                 if (header->needs_swap)
1631                         perf_event_header__bswap(&bev.header);
1632
1633                 len = bev.header.size - sizeof(bev);
1634                 if (readn(input, filename, len) != len)
1635                         goto out;
1636                 /*
1637                  * The a1645ce1 changeset:
1638                  *
1639                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1640                  *
1641                  * Added a field to struct build_id_event that broke the file
1642                  * format.
1643                  *
1644                  * Since the kernel build-id is the first entry, process the
1645                  * table using the old format if the well known
1646                  * '[kernel.kallsyms]' string for the kernel build-id has the
1647                  * first 4 characters chopped off (where the pid_t sits).
1648                  */
1649                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1650                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1651                                 return -1;
1652                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1653                 }
1654
1655                 __event_process_build_id(&bev, filename, session);
1656
1657                 offset += bev.header.size;
1658         }
1659         err = 0;
1660 out:
1661         return err;
1662 }
1663
1664 /* Macro for features that simply need to read and store a string. */
1665 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
1666 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
1667 {\
1668         ff->ph->env.__feat_env = do_read_string(ff); \
1669         return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
1670 }
1671
1672 FEAT_PROCESS_STR_FUN(hostname, hostname);
1673 FEAT_PROCESS_STR_FUN(osrelease, os_release);
1674 FEAT_PROCESS_STR_FUN(version, version);
1675 FEAT_PROCESS_STR_FUN(arch, arch);
1676 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
1677 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
1678
1679 static int process_tracing_data(struct feat_fd *ff, void *data)
1680 {
1681         ssize_t ret = trace_report(ff->fd, data, false);
1682
1683         return ret < 0 ? -1 : 0;
1684 }
1685
1686 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
1687 {
1688         if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
1689                 pr_debug("Failed to read buildids, continuing...\n");
1690         return 0;
1691 }
1692
1693 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
1694 {
1695         int ret;
1696         u32 nr_cpus_avail, nr_cpus_online;
1697
1698         ret = do_read_u32(ff, &nr_cpus_avail);
1699         if (ret)
1700                 return ret;
1701
1702         ret = do_read_u32(ff, &nr_cpus_online);
1703         if (ret)
1704                 return ret;
1705         ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
1706         ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
1707         return 0;
1708 }
1709
1710 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
1711 {
1712         u64 total_mem;
1713         int ret;
1714
1715         ret = do_read_u64(ff, &total_mem);
1716         if (ret)
1717                 return -1;
1718         ff->ph->env.total_mem = (unsigned long long)total_mem;
1719         return 0;
1720 }
1721
1722 static struct perf_evsel *
1723 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1724 {
1725         struct perf_evsel *evsel;
1726
1727         evlist__for_each_entry(evlist, evsel) {
1728                 if (evsel->idx == idx)
1729                         return evsel;
1730         }
1731
1732         return NULL;
1733 }
1734
1735 static void
1736 perf_evlist__set_event_name(struct perf_evlist *evlist,
1737                             struct perf_evsel *event)
1738 {
1739         struct perf_evsel *evsel;
1740
1741         if (!event->name)
1742                 return;
1743
1744         evsel = perf_evlist__find_by_index(evlist, event->idx);
1745         if (!evsel)
1746                 return;
1747
1748         if (evsel->name)
1749                 return;
1750
1751         evsel->name = strdup(event->name);
1752 }
1753
1754 static int
1755 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
1756 {
1757         struct perf_session *session;
1758         struct perf_evsel *evsel, *events = read_event_desc(ff);
1759
1760         if (!events)
1761                 return 0;
1762
1763         session = container_of(ff->ph, struct perf_session, header);
1764
1765         if (session->data->is_pipe) {
1766                 /* Save events for reading later by print_event_desc,
1767                  * since they can't be read again in pipe mode. */
1768                 ff->events = events;
1769         }
1770
1771         for (evsel = events; evsel->attr.size; evsel++)
1772                 perf_evlist__set_event_name(session->evlist, evsel);
1773
1774         if (!session->data->is_pipe)
1775                 free_event_desc(events);
1776
1777         return 0;
1778 }
1779
1780 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
1781 {
1782         char *str, *cmdline = NULL, **argv = NULL;
1783         u32 nr, i, len = 0;
1784
1785         if (do_read_u32(ff, &nr))
1786                 return -1;
1787
1788         ff->ph->env.nr_cmdline = nr;
1789
1790         cmdline = zalloc(ff->size + nr + 1);
1791         if (!cmdline)
1792                 return -1;
1793
1794         argv = zalloc(sizeof(char *) * (nr + 1));
1795         if (!argv)
1796                 goto error;
1797
1798         for (i = 0; i < nr; i++) {
1799                 str = do_read_string(ff);
1800                 if (!str)
1801                         goto error;
1802
1803                 argv[i] = cmdline + len;
1804                 memcpy(argv[i], str, strlen(str) + 1);
1805                 len += strlen(str) + 1;
1806                 free(str);
1807         }
1808         ff->ph->env.cmdline = cmdline;
1809         ff->ph->env.cmdline_argv = (const char **) argv;
1810         return 0;
1811
1812 error:
1813         free(argv);
1814         free(cmdline);
1815         return -1;
1816 }
1817
1818 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
1819 {
1820         u32 nr, i;
1821         char *str;
1822         struct strbuf sb;
1823         int cpu_nr = ff->ph->env.nr_cpus_avail;
1824         u64 size = 0;
1825         struct perf_header *ph = ff->ph;
1826
1827         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1828         if (!ph->env.cpu)
1829                 return -1;
1830
1831         if (do_read_u32(ff, &nr))
1832                 goto free_cpu;
1833
1834         ph->env.nr_sibling_cores = nr;
1835         size += sizeof(u32);
1836         if (strbuf_init(&sb, 128) < 0)
1837                 goto free_cpu;
1838
1839         for (i = 0; i < nr; i++) {
1840                 str = do_read_string(ff);
1841                 if (!str)
1842                         goto error;
1843
1844                 /* include a NULL character at the end */
1845                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1846                         goto error;
1847                 size += string_size(str);
1848                 free(str);
1849         }
1850         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1851
1852         if (do_read_u32(ff, &nr))
1853                 return -1;
1854
1855         ph->env.nr_sibling_threads = nr;
1856         size += sizeof(u32);
1857
1858         for (i = 0; i < nr; i++) {
1859                 str = do_read_string(ff);
1860                 if (!str)
1861                         goto error;
1862
1863                 /* include a NULL character at the end */
1864                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1865                         goto error;
1866                 size += string_size(str);
1867                 free(str);
1868         }
1869         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1870
1871         /*
1872          * The header may be from old perf,
1873          * which doesn't include core id and socket id information.
1874          */
1875         if (ff->size <= size) {
1876                 zfree(&ph->env.cpu);
1877                 return 0;
1878         }
1879
1880         for (i = 0; i < (u32)cpu_nr; i++) {
1881                 if (do_read_u32(ff, &nr))
1882                         goto free_cpu;
1883
1884                 ph->env.cpu[i].core_id = nr;
1885
1886                 if (do_read_u32(ff, &nr))
1887                         goto free_cpu;
1888
1889                 if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1890                         pr_debug("socket_id number is too big."
1891                                  "You may need to upgrade the perf tool.\n");
1892                         goto free_cpu;
1893                 }
1894
1895                 ph->env.cpu[i].socket_id = nr;
1896         }
1897
1898         return 0;
1899
1900 error:
1901         strbuf_release(&sb);
1902 free_cpu:
1903         zfree(&ph->env.cpu);
1904         return -1;
1905 }
1906
1907 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
1908 {
1909         struct numa_node *nodes, *n;
1910         u32 nr, i;
1911         char *str;
1912
1913         /* nr nodes */
1914         if (do_read_u32(ff, &nr))
1915                 return -1;
1916
1917         nodes = zalloc(sizeof(*nodes) * nr);
1918         if (!nodes)
1919                 return -ENOMEM;
1920
1921         for (i = 0; i < nr; i++) {
1922                 n = &nodes[i];
1923
1924                 /* node number */
1925                 if (do_read_u32(ff, &n->node))
1926                         goto error;
1927
1928                 if (do_read_u64(ff, &n->mem_total))
1929                         goto error;
1930
1931                 if (do_read_u64(ff, &n->mem_free))
1932                         goto error;
1933
1934                 str = do_read_string(ff);
1935                 if (!str)
1936                         goto error;
1937
1938                 n->map = cpu_map__new(str);
1939                 if (!n->map)
1940                         goto error;
1941
1942                 free(str);
1943         }
1944         ff->ph->env.nr_numa_nodes = nr;
1945         ff->ph->env.numa_nodes = nodes;
1946         return 0;
1947
1948 error:
1949         free(nodes);
1950         return -1;
1951 }
1952
1953 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
1954 {
1955         char *name;
1956         u32 pmu_num;
1957         u32 type;
1958         struct strbuf sb;
1959
1960         if (do_read_u32(ff, &pmu_num))
1961                 return -1;
1962
1963         if (!pmu_num) {
1964                 pr_debug("pmu mappings not available\n");
1965                 return 0;
1966         }
1967
1968         ff->ph->env.nr_pmu_mappings = pmu_num;
1969         if (strbuf_init(&sb, 128) < 0)
1970                 return -1;
1971
1972         while (pmu_num) {
1973                 if (do_read_u32(ff, &type))
1974                         goto error;
1975
1976                 name = do_read_string(ff);
1977                 if (!name)
1978                         goto error;
1979
1980                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1981                         goto error;
1982                 /* include a NULL character at the end */
1983                 if (strbuf_add(&sb, "", 1) < 0)
1984                         goto error;
1985
1986                 if (!strcmp(name, "msr"))
1987                         ff->ph->env.msr_pmu_type = type;
1988
1989                 free(name);
1990                 pmu_num--;
1991         }
1992         ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
1993         return 0;
1994
1995 error:
1996         strbuf_release(&sb);
1997         return -1;
1998 }
1999
2000 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2001 {
2002         size_t ret = -1;
2003         u32 i, nr, nr_groups;
2004         struct perf_session *session;
2005         struct perf_evsel *evsel, *leader = NULL;
2006         struct group_desc {
2007                 char *name;
2008                 u32 leader_idx;
2009                 u32 nr_members;
2010         } *desc;
2011
2012         if (do_read_u32(ff, &nr_groups))
2013                 return -1;
2014
2015         ff->ph->env.nr_groups = nr_groups;
2016         if (!nr_groups) {
2017                 pr_debug("group desc not available\n");
2018                 return 0;
2019         }
2020
2021         desc = calloc(nr_groups, sizeof(*desc));
2022         if (!desc)
2023                 return -1;
2024
2025         for (i = 0; i < nr_groups; i++) {
2026                 desc[i].name = do_read_string(ff);
2027                 if (!desc[i].name)
2028                         goto out_free;
2029
2030                 if (do_read_u32(ff, &desc[i].leader_idx))
2031                         goto out_free;
2032
2033                 if (do_read_u32(ff, &desc[i].nr_members))
2034                         goto out_free;
2035         }
2036
2037         /*
2038          * Rebuild group relationship based on the group_desc
2039          */
2040         session = container_of(ff->ph, struct perf_session, header);
2041         session->evlist->nr_groups = nr_groups;
2042
2043         i = nr = 0;
2044         evlist__for_each_entry(session->evlist, evsel) {
2045                 if (evsel->idx == (int) desc[i].leader_idx) {
2046                         evsel->leader = evsel;
2047                         /* {anon_group} is a dummy name */
2048                         if (strcmp(desc[i].name, "{anon_group}")) {
2049                                 evsel->group_name = desc[i].name;
2050                                 desc[i].name = NULL;
2051                         }
2052                         evsel->nr_members = desc[i].nr_members;
2053
2054                         if (i >= nr_groups || nr > 0) {
2055                                 pr_debug("invalid group desc\n");
2056                                 goto out_free;
2057                         }
2058
2059                         leader = evsel;
2060                         nr = evsel->nr_members - 1;
2061                         i++;
2062                 } else if (nr) {
2063                         /* This is a group member */
2064                         evsel->leader = leader;
2065
2066                         nr--;
2067                 }
2068         }
2069
2070         if (i != nr_groups || nr != 0) {
2071                 pr_debug("invalid group desc\n");
2072                 goto out_free;
2073         }
2074
2075         ret = 0;
2076 out_free:
2077         for (i = 0; i < nr_groups; i++)
2078                 zfree(&desc[i].name);
2079         free(desc);
2080
2081         return ret;
2082 }
2083
2084 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2085 {
2086         struct perf_session *session;
2087         int err;
2088
2089         session = container_of(ff->ph, struct perf_session, header);
2090
2091         err = auxtrace_index__process(ff->fd, ff->size, session,
2092                                       ff->ph->needs_swap);
2093         if (err < 0)
2094                 pr_err("Failed to process auxtrace index\n");
2095         return err;
2096 }
2097
2098 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2099 {
2100         struct cpu_cache_level *caches;
2101         u32 cnt, i, version;
2102
2103         if (do_read_u32(ff, &version))
2104                 return -1;
2105
2106         if (version != 1)
2107                 return -1;
2108
2109         if (do_read_u32(ff, &cnt))
2110                 return -1;
2111
2112         caches = zalloc(sizeof(*caches) * cnt);
2113         if (!caches)
2114                 return -1;
2115
2116         for (i = 0; i < cnt; i++) {
2117                 struct cpu_cache_level c;
2118
2119                 #define _R(v)                                           \
2120                         if (do_read_u32(ff, &c.v))\
2121                                 goto out_free_caches;                   \
2122
2123                 _R(level)
2124                 _R(line_size)
2125                 _R(sets)
2126                 _R(ways)
2127                 #undef _R
2128
2129                 #define _R(v)                                   \
2130                         c.v = do_read_string(ff);               \
2131                         if (!c.v)                               \
2132                                 goto out_free_caches;
2133
2134                 _R(type)
2135                 _R(size)
2136                 _R(map)
2137                 #undef _R
2138
2139                 caches[i] = c;
2140         }
2141
2142         ff->ph->env.caches = caches;
2143         ff->ph->env.caches_cnt = cnt;
2144         return 0;
2145 out_free_caches:
2146         free(caches);
2147         return -1;
2148 }
2149
2150 struct feature_ops {
2151         int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2152         void (*print)(struct feat_fd *ff, FILE *fp);
2153         int (*process)(struct feat_fd *ff, void *data);
2154         const char *name;
2155         bool full_only;
2156         bool synthesize;
2157 };
2158
2159 #define FEAT_OPR(n, func, __full_only) \
2160         [HEADER_##n] = {                                        \
2161                 .name       = __stringify(n),                   \
2162                 .write      = write_##func,                     \
2163                 .print      = print_##func,                     \
2164                 .full_only  = __full_only,                      \
2165                 .process    = process_##func,                   \
2166                 .synthesize = true                              \
2167         }
2168
2169 #define FEAT_OPN(n, func, __full_only) \
2170         [HEADER_##n] = {                                        \
2171                 .name       = __stringify(n),                   \
2172                 .write      = write_##func,                     \
2173                 .print      = print_##func,                     \
2174                 .full_only  = __full_only,                      \
2175                 .process    = process_##func                    \
2176         }
2177
2178 /* feature_ops not implemented: */
2179 #define print_tracing_data      NULL
2180 #define print_build_id          NULL
2181
2182 #define process_branch_stack    NULL
2183 #define process_stat            NULL
2184
2185
2186 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2187         FEAT_OPN(TRACING_DATA,  tracing_data,   false),
2188         FEAT_OPN(BUILD_ID,      build_id,       false),
2189         FEAT_OPR(HOSTNAME,      hostname,       false),
2190         FEAT_OPR(OSRELEASE,     osrelease,      false),
2191         FEAT_OPR(VERSION,       version,        false),
2192         FEAT_OPR(ARCH,          arch,           false),
2193         FEAT_OPR(NRCPUS,        nrcpus,         false),
2194         FEAT_OPR(CPUDESC,       cpudesc,        false),
2195         FEAT_OPR(CPUID,         cpuid,          false),
2196         FEAT_OPR(TOTAL_MEM,     total_mem,      false),
2197         FEAT_OPR(EVENT_DESC,    event_desc,     false),
2198         FEAT_OPR(CMDLINE,       cmdline,        false),
2199         FEAT_OPR(CPU_TOPOLOGY,  cpu_topology,   true),
2200         FEAT_OPR(NUMA_TOPOLOGY, numa_topology,  true),
2201         FEAT_OPN(BRANCH_STACK,  branch_stack,   false),
2202         FEAT_OPR(PMU_MAPPINGS,  pmu_mappings,   false),
2203         FEAT_OPN(GROUP_DESC,    group_desc,     false),
2204         FEAT_OPN(AUXTRACE,      auxtrace,       false),
2205         FEAT_OPN(STAT,          stat,           false),
2206         FEAT_OPN(CACHE,         cache,          true),
2207 };
2208
2209 struct header_print_data {
2210         FILE *fp;
2211         bool full; /* extended list of headers */
2212 };
2213
2214 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2215                                            struct perf_header *ph,
2216                                            int feat, int fd, void *data)
2217 {
2218         struct header_print_data *hd = data;
2219         struct feat_fd ff;
2220
2221         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2222                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2223                                 "%d, continuing...\n", section->offset, feat);
2224                 return 0;
2225         }
2226         if (feat >= HEADER_LAST_FEATURE) {
2227                 pr_warning("unknown feature %d\n", feat);
2228                 return 0;
2229         }
2230         if (!feat_ops[feat].print)
2231                 return 0;
2232
2233         ff = (struct  feat_fd) {
2234                 .fd = fd,
2235                 .ph = ph,
2236         };
2237
2238         if (!feat_ops[feat].full_only || hd->full)
2239                 feat_ops[feat].print(&ff, hd->fp);
2240         else
2241                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2242                         feat_ops[feat].name);
2243
2244         return 0;
2245 }
2246
2247 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2248 {
2249         struct header_print_data hd;
2250         struct perf_header *header = &session->header;
2251         int fd = perf_data__fd(session->data);
2252         struct stat st;
2253         int ret, bit;
2254
2255         hd.fp = fp;
2256         hd.full = full;
2257
2258         ret = fstat(fd, &st);
2259         if (ret == -1)
2260                 return -1;
2261
2262         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2263
2264         perf_header__process_sections(header, fd, &hd,
2265                                       perf_file_section__fprintf_info);
2266
2267         if (session->data->is_pipe)
2268                 return 0;
2269
2270         fprintf(fp, "# missing features: ");
2271         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2272                 if (bit)
2273                         fprintf(fp, "%s ", feat_ops[bit].name);
2274         }
2275
2276         fprintf(fp, "\n");
2277         return 0;
2278 }
2279
2280 static int do_write_feat(struct feat_fd *ff, int type,
2281                          struct perf_file_section **p,
2282                          struct perf_evlist *evlist)
2283 {
2284         int err;
2285         int ret = 0;
2286
2287         if (perf_header__has_feat(ff->ph, type)) {
2288                 if (!feat_ops[type].write)
2289                         return -1;
2290
2291                 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2292                         return -1;
2293
2294                 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2295
2296                 err = feat_ops[type].write(ff, evlist);
2297                 if (err < 0) {
2298                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2299
2300                         /* undo anything written */
2301                         lseek(ff->fd, (*p)->offset, SEEK_SET);
2302
2303                         return -1;
2304                 }
2305                 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2306                 (*p)++;
2307         }
2308         return ret;
2309 }
2310
2311 static int perf_header__adds_write(struct perf_header *header,
2312                                    struct perf_evlist *evlist, int fd)
2313 {
2314         int nr_sections;
2315         struct feat_fd ff;
2316         struct perf_file_section *feat_sec, *p;
2317         int sec_size;
2318         u64 sec_start;
2319         int feat;
2320         int err;
2321
2322         ff = (struct feat_fd){
2323                 .fd  = fd,
2324                 .ph = header,
2325         };
2326
2327         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2328         if (!nr_sections)
2329                 return 0;
2330
2331         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2332         if (feat_sec == NULL)
2333                 return -ENOMEM;
2334
2335         sec_size = sizeof(*feat_sec) * nr_sections;
2336
2337         sec_start = header->feat_offset;
2338         lseek(fd, sec_start + sec_size, SEEK_SET);
2339
2340         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2341                 if (do_write_feat(&ff, feat, &p, evlist))
2342                         perf_header__clear_feat(header, feat);
2343         }
2344
2345         lseek(fd, sec_start, SEEK_SET);
2346         /*
2347          * may write more than needed due to dropped feature, but
2348          * this is okay, reader will skip the mising entries
2349          */
2350         err = do_write(&ff, feat_sec, sec_size);
2351         if (err < 0)
2352                 pr_debug("failed to write feature section\n");
2353         free(feat_sec);
2354         return err;
2355 }
2356
2357 int perf_header__write_pipe(int fd)
2358 {
2359         struct perf_pipe_file_header f_header;
2360         struct feat_fd ff;
2361         int err;
2362
2363         ff = (struct feat_fd){ .fd = fd };
2364
2365         f_header = (struct perf_pipe_file_header){
2366                 .magic     = PERF_MAGIC,
2367                 .size      = sizeof(f_header),
2368         };
2369
2370         err = do_write(&ff, &f_header, sizeof(f_header));
2371         if (err < 0) {
2372                 pr_debug("failed to write perf pipe header\n");
2373                 return err;
2374         }
2375
2376         return 0;
2377 }
2378
2379 int perf_session__write_header(struct perf_session *session,
2380                                struct perf_evlist *evlist,
2381                                int fd, bool at_exit)
2382 {
2383         struct perf_file_header f_header;
2384         struct perf_file_attr   f_attr;
2385         struct perf_header *header = &session->header;
2386         struct perf_evsel *evsel;
2387         struct feat_fd ff;
2388         u64 attr_offset;
2389         int err;
2390
2391         ff = (struct feat_fd){ .fd = fd};
2392         lseek(fd, sizeof(f_header), SEEK_SET);
2393
2394         evlist__for_each_entry(session->evlist, evsel) {
2395                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2396                 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
2397                 if (err < 0) {
2398                         pr_debug("failed to write perf header\n");
2399                         return err;
2400                 }
2401         }
2402
2403         attr_offset = lseek(ff.fd, 0, SEEK_CUR);
2404
2405         evlist__for_each_entry(evlist, evsel) {
2406                 f_attr = (struct perf_file_attr){
2407                         .attr = evsel->attr,
2408                         .ids  = {
2409                                 .offset = evsel->id_offset,
2410                                 .size   = evsel->ids * sizeof(u64),
2411                         }
2412                 };
2413                 err = do_write(&ff, &f_attr, sizeof(f_attr));
2414                 if (err < 0) {
2415                         pr_debug("failed to write perf header attribute\n");
2416                         return err;
2417                 }
2418         }
2419
2420         if (!header->data_offset)
2421                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2422         header->feat_offset = header->data_offset + header->data_size;
2423
2424         if (at_exit) {
2425                 err = perf_header__adds_write(header, evlist, fd);
2426                 if (err < 0)
2427                         return err;
2428         }
2429
2430         f_header = (struct perf_file_header){
2431                 .magic     = PERF_MAGIC,
2432                 .size      = sizeof(f_header),
2433                 .attr_size = sizeof(f_attr),
2434                 .attrs = {
2435                         .offset = attr_offset,
2436                         .size   = evlist->nr_entries * sizeof(f_attr),
2437                 },
2438                 .data = {
2439                         .offset = header->data_offset,
2440                         .size   = header->data_size,
2441                 },
2442                 /* event_types is ignored, store zeros */
2443         };
2444
2445         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2446
2447         lseek(fd, 0, SEEK_SET);
2448         err = do_write(&ff, &f_header, sizeof(f_header));
2449         if (err < 0) {
2450                 pr_debug("failed to write perf header\n");
2451                 return err;
2452         }
2453         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2454
2455         return 0;
2456 }
2457
2458 static int perf_header__getbuffer64(struct perf_header *header,
2459                                     int fd, void *buf, size_t size)
2460 {
2461         if (readn(fd, buf, size) <= 0)
2462                 return -1;
2463
2464         if (header->needs_swap)
2465                 mem_bswap_64(buf, size);
2466
2467         return 0;
2468 }
2469
2470 int perf_header__process_sections(struct perf_header *header, int fd,
2471                                   void *data,
2472                                   int (*process)(struct perf_file_section *section,
2473                                                  struct perf_header *ph,
2474                                                  int feat, int fd, void *data))
2475 {
2476         struct perf_file_section *feat_sec, *sec;
2477         int nr_sections;
2478         int sec_size;
2479         int feat;
2480         int err;
2481
2482         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2483         if (!nr_sections)
2484                 return 0;
2485
2486         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2487         if (!feat_sec)
2488                 return -1;
2489
2490         sec_size = sizeof(*feat_sec) * nr_sections;
2491
2492         lseek(fd, header->feat_offset, SEEK_SET);
2493
2494         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2495         if (err < 0)
2496                 goto out_free;
2497
2498         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2499                 err = process(sec++, header, feat, fd, data);
2500                 if (err < 0)
2501                         goto out_free;
2502         }
2503         err = 0;
2504 out_free:
2505         free(feat_sec);
2506         return err;
2507 }
2508
2509 static const int attr_file_abi_sizes[] = {
2510         [0] = PERF_ATTR_SIZE_VER0,
2511         [1] = PERF_ATTR_SIZE_VER1,
2512         [2] = PERF_ATTR_SIZE_VER2,
2513         [3] = PERF_ATTR_SIZE_VER3,
2514         [4] = PERF_ATTR_SIZE_VER4,
2515         0,
2516 };
2517
2518 /*
2519  * In the legacy file format, the magic number is not used to encode endianness.
2520  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2521  * on ABI revisions, we need to try all combinations for all endianness to
2522  * detect the endianness.
2523  */
2524 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2525 {
2526         uint64_t ref_size, attr_size;
2527         int i;
2528
2529         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2530                 ref_size = attr_file_abi_sizes[i]
2531                          + sizeof(struct perf_file_section);
2532                 if (hdr_sz != ref_size) {
2533                         attr_size = bswap_64(hdr_sz);
2534                         if (attr_size != ref_size)
2535                                 continue;
2536
2537                         ph->needs_swap = true;
2538                 }
2539                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2540                          i,
2541                          ph->needs_swap);
2542                 return 0;
2543         }
2544         /* could not determine endianness */
2545         return -1;
2546 }
2547
2548 #define PERF_PIPE_HDR_VER0      16
2549
2550 static const size_t attr_pipe_abi_sizes[] = {
2551         [0] = PERF_PIPE_HDR_VER0,
2552         0,
2553 };
2554
2555 /*
2556  * In the legacy pipe format, there is an implicit assumption that endiannesss
2557  * between host recording the samples, and host parsing the samples is the
2558  * same. This is not always the case given that the pipe output may always be
2559  * redirected into a file and analyzed on a different machine with possibly a
2560  * different endianness and perf_event ABI revsions in the perf tool itself.
2561  */
2562 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2563 {
2564         u64 attr_size;
2565         int i;
2566
2567         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2568                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2569                         attr_size = bswap_64(hdr_sz);
2570                         if (attr_size != hdr_sz)
2571                                 continue;
2572
2573                         ph->needs_swap = true;
2574                 }
2575                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2576                 return 0;
2577         }
2578         return -1;
2579 }
2580
2581 bool is_perf_magic(u64 magic)
2582 {
2583         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2584                 || magic == __perf_magic2
2585                 || magic == __perf_magic2_sw)
2586                 return true;
2587
2588         return false;
2589 }
2590
2591 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2592                               bool is_pipe, struct perf_header *ph)
2593 {
2594         int ret;
2595
2596         /* check for legacy format */
2597         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2598         if (ret == 0) {
2599                 ph->version = PERF_HEADER_VERSION_1;
2600                 pr_debug("legacy perf.data format\n");
2601                 if (is_pipe)
2602                         return try_all_pipe_abis(hdr_sz, ph);
2603
2604                 return try_all_file_abis(hdr_sz, ph);
2605         }
2606         /*
2607          * the new magic number serves two purposes:
2608          * - unique number to identify actual perf.data files
2609          * - encode endianness of file
2610          */
2611         ph->version = PERF_HEADER_VERSION_2;
2612
2613         /* check magic number with one endianness */
2614         if (magic == __perf_magic2)
2615                 return 0;
2616
2617         /* check magic number with opposite endianness */
2618         if (magic != __perf_magic2_sw)
2619                 return -1;
2620
2621         ph->needs_swap = true;
2622
2623         return 0;
2624 }
2625
2626 int perf_file_header__read(struct perf_file_header *header,
2627                            struct perf_header *ph, int fd)
2628 {
2629         ssize_t ret;
2630
2631         lseek(fd, 0, SEEK_SET);
2632
2633         ret = readn(fd, header, sizeof(*header));
2634         if (ret <= 0)
2635                 return -1;
2636
2637         if (check_magic_endian(header->magic,
2638                                header->attr_size, false, ph) < 0) {
2639                 pr_debug("magic/endian check failed\n");
2640                 return -1;
2641         }
2642
2643         if (ph->needs_swap) {
2644                 mem_bswap_64(header, offsetof(struct perf_file_header,
2645                              adds_features));
2646         }
2647
2648         if (header->size != sizeof(*header)) {
2649                 /* Support the previous format */
2650                 if (header->size == offsetof(typeof(*header), adds_features))
2651                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2652                 else
2653                         return -1;
2654         } else if (ph->needs_swap) {
2655                 /*
2656                  * feature bitmap is declared as an array of unsigned longs --
2657                  * not good since its size can differ between the host that
2658                  * generated the data file and the host analyzing the file.
2659                  *
2660                  * We need to handle endianness, but we don't know the size of
2661                  * the unsigned long where the file was generated. Take a best
2662                  * guess at determining it: try 64-bit swap first (ie., file
2663                  * created on a 64-bit host), and check if the hostname feature
2664                  * bit is set (this feature bit is forced on as of fbe96f2).
2665                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2666                  * swap. If the hostname bit is still not set (e.g., older data
2667                  * file), punt and fallback to the original behavior --
2668                  * clearing all feature bits and setting buildid.
2669                  */
2670                 mem_bswap_64(&header->adds_features,
2671                             BITS_TO_U64(HEADER_FEAT_BITS));
2672
2673                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2674                         /* unswap as u64 */
2675                         mem_bswap_64(&header->adds_features,
2676                                     BITS_TO_U64(HEADER_FEAT_BITS));
2677
2678                         /* unswap as u32 */
2679                         mem_bswap_32(&header->adds_features,
2680                                     BITS_TO_U32(HEADER_FEAT_BITS));
2681                 }
2682
2683                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2684                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2685                         set_bit(HEADER_BUILD_ID, header->adds_features);
2686                 }
2687         }
2688
2689         memcpy(&ph->adds_features, &header->adds_features,
2690                sizeof(ph->adds_features));
2691
2692         ph->data_offset  = header->data.offset;
2693         ph->data_size    = header->data.size;
2694         ph->feat_offset  = header->data.offset + header->data.size;
2695         return 0;
2696 }
2697
2698 static int perf_file_section__process(struct perf_file_section *section,
2699                                       struct perf_header *ph,
2700                                       int feat, int fd, void *data)
2701 {
2702         struct feat_fd fdd = {
2703                 .fd     = fd,
2704                 .ph     = ph,
2705                 .size   = section->size,
2706                 .offset = section->offset,
2707         };
2708
2709         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2710                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2711                           "%d, continuing...\n", section->offset, feat);
2712                 return 0;
2713         }
2714
2715         if (feat >= HEADER_LAST_FEATURE) {
2716                 pr_debug("unknown feature %d, continuing...\n", feat);
2717                 return 0;
2718         }
2719
2720         if (!feat_ops[feat].process)
2721                 return 0;
2722
2723         return feat_ops[feat].process(&fdd, data);
2724 }
2725
2726 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2727                                        struct perf_header *ph, int fd,
2728                                        bool repipe)
2729 {
2730         struct feat_fd ff = {
2731                 .fd = STDOUT_FILENO,
2732                 .ph = ph,
2733         };
2734         ssize_t ret;
2735
2736         ret = readn(fd, header, sizeof(*header));
2737         if (ret <= 0)
2738                 return -1;
2739
2740         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2741                 pr_debug("endian/magic failed\n");
2742                 return -1;
2743         }
2744
2745         if (ph->needs_swap)
2746                 header->size = bswap_64(header->size);
2747
2748         if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
2749                 return -1;
2750
2751         return 0;
2752 }
2753
2754 static int perf_header__read_pipe(struct perf_session *session)
2755 {
2756         struct perf_header *header = &session->header;
2757         struct perf_pipe_file_header f_header;
2758
2759         if (perf_file_header__read_pipe(&f_header, header,
2760                                         perf_data__fd(session->data),
2761                                         session->repipe) < 0) {
2762                 pr_debug("incompatible file format\n");
2763                 return -EINVAL;
2764         }
2765
2766         return 0;
2767 }
2768
2769 static int read_attr(int fd, struct perf_header *ph,
2770                      struct perf_file_attr *f_attr)
2771 {
2772         struct perf_event_attr *attr = &f_attr->attr;
2773         size_t sz, left;
2774         size_t our_sz = sizeof(f_attr->attr);
2775         ssize_t ret;
2776
2777         memset(f_attr, 0, sizeof(*f_attr));
2778
2779         /* read minimal guaranteed structure */
2780         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2781         if (ret <= 0) {
2782                 pr_debug("cannot read %d bytes of header attr\n",
2783                          PERF_ATTR_SIZE_VER0);
2784                 return -1;
2785         }
2786
2787         /* on file perf_event_attr size */
2788         sz = attr->size;
2789
2790         if (ph->needs_swap)
2791                 sz = bswap_32(sz);
2792
2793         if (sz == 0) {
2794                 /* assume ABI0 */
2795                 sz =  PERF_ATTR_SIZE_VER0;
2796         } else if (sz > our_sz) {
2797                 pr_debug("file uses a more recent and unsupported ABI"
2798                          " (%zu bytes extra)\n", sz - our_sz);
2799                 return -1;
2800         }
2801         /* what we have not yet read and that we know about */
2802         left = sz - PERF_ATTR_SIZE_VER0;
2803         if (left) {
2804                 void *ptr = attr;
2805                 ptr += PERF_ATTR_SIZE_VER0;
2806
2807                 ret = readn(fd, ptr, left);
2808         }
2809         /* read perf_file_section, ids are read in caller */
2810         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2811
2812         return ret <= 0 ? -1 : 0;
2813 }
2814
2815 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2816                                                 struct pevent *pevent)
2817 {
2818         struct event_format *event;
2819         char bf[128];
2820
2821         /* already prepared */
2822         if (evsel->tp_format)
2823                 return 0;
2824
2825         if (pevent == NULL) {
2826                 pr_debug("broken or missing trace data\n");
2827                 return -1;
2828         }
2829
2830         event = pevent_find_event(pevent, evsel->attr.config);
2831         if (event == NULL) {
2832                 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2833                 return -1;
2834         }
2835
2836         if (!evsel->name) {
2837                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2838                 evsel->name = strdup(bf);
2839                 if (evsel->name == NULL)
2840                         return -1;
2841         }
2842
2843         evsel->tp_format = event;
2844         return 0;
2845 }
2846
2847 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2848                                                   struct pevent *pevent)
2849 {
2850         struct perf_evsel *pos;
2851
2852         evlist__for_each_entry(evlist, pos) {
2853                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2854                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2855                         return -1;
2856         }
2857
2858         return 0;
2859 }
2860
2861 int perf_session__read_header(struct perf_session *session)
2862 {
2863         struct perf_data *data = session->data;
2864         struct perf_header *header = &session->header;
2865         struct perf_file_header f_header;
2866         struct perf_file_attr   f_attr;
2867         u64                     f_id;
2868         int nr_attrs, nr_ids, i, j;
2869         int fd = perf_data__fd(data);
2870
2871         session->evlist = perf_evlist__new();
2872         if (session->evlist == NULL)
2873                 return -ENOMEM;
2874
2875         session->evlist->env = &header->env;
2876         session->machines.host.env = &header->env;
2877         if (perf_data__is_pipe(data))
2878                 return perf_header__read_pipe(session);
2879
2880         if (perf_file_header__read(&f_header, header, fd) < 0)
2881                 return -EINVAL;
2882
2883         /*
2884          * Sanity check that perf.data was written cleanly; data size is
2885          * initialized to 0 and updated only if the on_exit function is run.
2886          * If data size is still 0 then the file contains only partial
2887          * information.  Just warn user and process it as much as it can.
2888          */
2889         if (f_header.data.size == 0) {
2890                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2891                            "Was the 'perf record' command properly terminated?\n",
2892                            data->path);
2893         }
2894
2895         nr_attrs = f_header.attrs.size / f_header.attr_size;
2896         lseek(fd, f_header.attrs.offset, SEEK_SET);
2897
2898         for (i = 0; i < nr_attrs; i++) {
2899                 struct perf_evsel *evsel;
2900                 off_t tmp;
2901
2902                 if (read_attr(fd, header, &f_attr) < 0)
2903                         goto out_errno;
2904
2905                 if (header->needs_swap) {
2906                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2907                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2908                         perf_event__attr_swap(&f_attr.attr);
2909                 }
2910
2911                 tmp = lseek(fd, 0, SEEK_CUR);
2912                 evsel = perf_evsel__new(&f_attr.attr);
2913
2914                 if (evsel == NULL)
2915                         goto out_delete_evlist;
2916
2917                 evsel->needs_swap = header->needs_swap;
2918                 /*
2919                  * Do it before so that if perf_evsel__alloc_id fails, this
2920                  * entry gets purged too at perf_evlist__delete().
2921                  */
2922                 perf_evlist__add(session->evlist, evsel);
2923
2924                 nr_ids = f_attr.ids.size / sizeof(u64);
2925                 /*
2926                  * We don't have the cpu and thread maps on the header, so
2927                  * for allocating the perf_sample_id table we fake 1 cpu and
2928                  * hattr->ids threads.
2929                  */
2930                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2931                         goto out_delete_evlist;
2932
2933                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2934
2935                 for (j = 0; j < nr_ids; j++) {
2936                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2937                                 goto out_errno;
2938
2939                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2940                 }
2941
2942                 lseek(fd, tmp, SEEK_SET);
2943         }
2944
2945         symbol_conf.nr_events = nr_attrs;
2946
2947         perf_header__process_sections(header, fd, &session->tevent,
2948                                       perf_file_section__process);
2949
2950         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2951                                                    session->tevent.pevent))
2952                 goto out_delete_evlist;
2953
2954         return 0;
2955 out_errno:
2956         return -errno;
2957
2958 out_delete_evlist:
2959         perf_evlist__delete(session->evlist);
2960         session->evlist = NULL;
2961         return -ENOMEM;
2962 }
2963
2964 int perf_event__synthesize_attr(struct perf_tool *tool,
2965                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2966                                 perf_event__handler_t process)
2967 {
2968         union perf_event *ev;
2969         size_t size;
2970         int err;
2971
2972         size = sizeof(struct perf_event_attr);
2973         size = PERF_ALIGN(size, sizeof(u64));
2974         size += sizeof(struct perf_event_header);
2975         size += ids * sizeof(u64);
2976
2977         ev = malloc(size);
2978
2979         if (ev == NULL)
2980                 return -ENOMEM;
2981
2982         ev->attr.attr = *attr;
2983         memcpy(ev->attr.id, id, ids * sizeof(u64));
2984
2985         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2986         ev->attr.header.size = (u16)size;
2987
2988         if (ev->attr.header.size == size)
2989                 err = process(tool, ev, NULL, NULL);
2990         else
2991                 err = -E2BIG;
2992
2993         free(ev);
2994
2995         return err;
2996 }
2997
2998 int perf_event__synthesize_features(struct perf_tool *tool,
2999                                     struct perf_session *session,
3000                                     struct perf_evlist *evlist,
3001                                     perf_event__handler_t process)
3002 {
3003         struct perf_header *header = &session->header;
3004         struct feat_fd ff;
3005         struct feature_event *fe;
3006         size_t sz, sz_hdr;
3007         int feat, ret;
3008
3009         sz_hdr = sizeof(fe->header);
3010         sz = sizeof(union perf_event);
3011         /* get a nice alignment */
3012         sz = PERF_ALIGN(sz, page_size);
3013
3014         memset(&ff, 0, sizeof(ff));
3015
3016         ff.buf = malloc(sz);
3017         if (!ff.buf)
3018                 return -ENOMEM;
3019
3020         ff.size = sz - sz_hdr;
3021
3022         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3023                 if (!feat_ops[feat].synthesize) {
3024                         pr_debug("No record header feature for header :%d\n", feat);
3025                         continue;
3026                 }
3027
3028                 ff.offset = sizeof(*fe);
3029
3030                 ret = feat_ops[feat].write(&ff, evlist);
3031                 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3032                         pr_debug("Error writing feature\n");
3033                         continue;
3034                 }
3035                 /* ff.buf may have changed due to realloc in do_write() */
3036                 fe = ff.buf;
3037                 memset(fe, 0, sizeof(*fe));
3038
3039                 fe->feat_id = feat;
3040                 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3041                 fe->header.size = ff.offset;
3042
3043                 ret = process(tool, ff.buf, NULL, NULL);
3044                 if (ret) {
3045                         free(ff.buf);
3046                         return ret;
3047                 }
3048         }
3049         free(ff.buf);
3050         return 0;
3051 }
3052
3053 int perf_event__process_feature(struct perf_tool *tool,
3054                                 union perf_event *event,
3055                                 struct perf_session *session __maybe_unused)
3056 {
3057         struct feat_fd ff = { .fd = 0 };
3058         struct feature_event *fe = (struct feature_event *)event;
3059         int type = fe->header.type;
3060         u64 feat = fe->feat_id;
3061
3062         if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3063                 pr_warning("invalid record type %d in pipe-mode\n", type);
3064                 return 0;
3065         }
3066         if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) {
3067                 pr_warning("invalid record type %d in pipe-mode\n", type);
3068                 return -1;
3069         }
3070
3071         if (!feat_ops[feat].process)
3072                 return 0;
3073
3074         ff.buf  = (void *)fe->data;
3075         ff.size = event->header.size - sizeof(event->header);
3076         ff.ph = &session->header;
3077
3078         if (feat_ops[feat].process(&ff, NULL))
3079                 return -1;
3080
3081         if (!feat_ops[feat].print || !tool->show_feat_hdr)
3082                 return 0;
3083
3084         if (!feat_ops[feat].full_only ||
3085             tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3086                 feat_ops[feat].print(&ff, stdout);
3087         } else {
3088                 fprintf(stdout, "# %s info available, use -I to display\n",
3089                         feat_ops[feat].name);
3090         }
3091
3092         return 0;
3093 }
3094
3095 static struct event_update_event *
3096 event_update_event__new(size_t size, u64 type, u64 id)
3097 {
3098         struct event_update_event *ev;
3099
3100         size += sizeof(*ev);
3101         size  = PERF_ALIGN(size, sizeof(u64));
3102
3103         ev = zalloc(size);
3104         if (ev) {
3105                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3106                 ev->header.size = (u16)size;
3107                 ev->type = type;
3108                 ev->id = id;
3109         }
3110         return ev;
3111 }
3112
3113 int
3114 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3115                                          struct perf_evsel *evsel,
3116                                          perf_event__handler_t process)
3117 {
3118         struct event_update_event *ev;
3119         size_t size = strlen(evsel->unit);
3120         int err;
3121
3122         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3123         if (ev == NULL)
3124                 return -ENOMEM;
3125
3126         strncpy(ev->data, evsel->unit, size);
3127         err = process(tool, (union perf_event *)ev, NULL, NULL);
3128         free(ev);
3129         return err;
3130 }
3131
3132 int
3133 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3134                                           struct perf_evsel *evsel,
3135                                           perf_event__handler_t process)
3136 {
3137         struct event_update_event *ev;
3138         struct event_update_event_scale *ev_data;
3139         int err;
3140
3141         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3142         if (ev == NULL)
3143                 return -ENOMEM;
3144
3145         ev_data = (struct event_update_event_scale *) ev->data;
3146         ev_data->scale = evsel->scale;
3147         err = process(tool, (union perf_event*) ev, NULL, NULL);
3148         free(ev);
3149         return err;
3150 }
3151
3152 int
3153 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3154                                          struct perf_evsel *evsel,
3155                                          perf_event__handler_t process)
3156 {
3157         struct event_update_event *ev;
3158         size_t len = strlen(evsel->name);
3159         int err;
3160
3161         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3162         if (ev == NULL)
3163                 return -ENOMEM;
3164
3165         strncpy(ev->data, evsel->name, len);
3166         err = process(tool, (union perf_event*) ev, NULL, NULL);
3167         free(ev);
3168         return err;
3169 }
3170
3171 int
3172 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3173                                         struct perf_evsel *evsel,
3174                                         perf_event__handler_t process)
3175 {
3176         size_t size = sizeof(struct event_update_event);
3177         struct event_update_event *ev;
3178         int max, err;
3179         u16 type;
3180
3181         if (!evsel->own_cpus)
3182                 return 0;
3183
3184         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3185         if (!ev)
3186                 return -ENOMEM;
3187
3188         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3189         ev->header.size = (u16)size;
3190         ev->type = PERF_EVENT_UPDATE__CPUS;
3191         ev->id   = evsel->id[0];
3192
3193         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3194                                  evsel->own_cpus,
3195                                  type, max);
3196
3197         err = process(tool, (union perf_event*) ev, NULL, NULL);
3198         free(ev);
3199         return err;
3200 }
3201
3202 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3203 {
3204         struct event_update_event *ev = &event->event_update;
3205         struct event_update_event_scale *ev_scale;
3206         struct event_update_event_cpus *ev_cpus;
3207         struct cpu_map *map;
3208         size_t ret;
3209
3210         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3211
3212         switch (ev->type) {
3213         case PERF_EVENT_UPDATE__SCALE:
3214                 ev_scale = (struct event_update_event_scale *) ev->data;
3215                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3216                 break;
3217         case PERF_EVENT_UPDATE__UNIT:
3218                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3219                 break;
3220         case PERF_EVENT_UPDATE__NAME:
3221                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3222                 break;
3223         case PERF_EVENT_UPDATE__CPUS:
3224                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3225                 ret += fprintf(fp, "... ");
3226
3227                 map = cpu_map__new_data(&ev_cpus->cpus);
3228                 if (map)
3229                         ret += cpu_map__fprintf(map, fp);
3230                 else
3231                         ret += fprintf(fp, "failed to get cpus\n");
3232                 break;
3233         default:
3234                 ret += fprintf(fp, "... unknown type\n");
3235                 break;
3236         }
3237
3238         return ret;
3239 }
3240
3241 int perf_event__synthesize_attrs(struct perf_tool *tool,
3242                                    struct perf_session *session,
3243                                    perf_event__handler_t process)
3244 {
3245         struct perf_evsel *evsel;
3246         int err = 0;
3247
3248         evlist__for_each_entry(session->evlist, evsel) {
3249                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3250                                                   evsel->id, process);
3251                 if (err) {
3252                         pr_debug("failed to create perf header attribute\n");
3253                         return err;
3254                 }
3255         }
3256
3257         return err;
3258 }
3259
3260 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3261                              union perf_event *event,
3262                              struct perf_evlist **pevlist)
3263 {
3264         u32 i, ids, n_ids;
3265         struct perf_evsel *evsel;
3266         struct perf_evlist *evlist = *pevlist;
3267
3268         if (evlist == NULL) {
3269                 *pevlist = evlist = perf_evlist__new();
3270                 if (evlist == NULL)
3271                         return -ENOMEM;
3272         }
3273
3274         evsel = perf_evsel__new(&event->attr.attr);
3275         if (evsel == NULL)
3276                 return -ENOMEM;
3277
3278         perf_evlist__add(evlist, evsel);
3279
3280         ids = event->header.size;
3281         ids -= (void *)&event->attr.id - (void *)event;
3282         n_ids = ids / sizeof(u64);
3283         /*
3284          * We don't have the cpu and thread maps on the header, so
3285          * for allocating the perf_sample_id table we fake 1 cpu and
3286          * hattr->ids threads.
3287          */
3288         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3289                 return -ENOMEM;
3290
3291         for (i = 0; i < n_ids; i++) {
3292                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3293         }
3294
3295         symbol_conf.nr_events = evlist->nr_entries;
3296
3297         return 0;
3298 }
3299
3300 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3301                                      union perf_event *event,
3302                                      struct perf_evlist **pevlist)
3303 {
3304         struct event_update_event *ev = &event->event_update;
3305         struct event_update_event_scale *ev_scale;
3306         struct event_update_event_cpus *ev_cpus;
3307         struct perf_evlist *evlist;
3308         struct perf_evsel *evsel;
3309         struct cpu_map *map;
3310
3311         if (!pevlist || *pevlist == NULL)
3312                 return -EINVAL;
3313
3314         evlist = *pevlist;
3315
3316         evsel = perf_evlist__id2evsel(evlist, ev->id);
3317         if (evsel == NULL)
3318                 return -EINVAL;
3319
3320         switch (ev->type) {
3321         case PERF_EVENT_UPDATE__UNIT:
3322                 evsel->unit = strdup(ev->data);
3323                 break;
3324         case PERF_EVENT_UPDATE__NAME:
3325                 evsel->name = strdup(ev->data);
3326                 break;
3327         case PERF_EVENT_UPDATE__SCALE:
3328                 ev_scale = (struct event_update_event_scale *) ev->data;
3329                 evsel->scale = ev_scale->scale;
3330                 break;
3331         case PERF_EVENT_UPDATE__CPUS:
3332                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3333
3334                 map = cpu_map__new_data(&ev_cpus->cpus);
3335                 if (map)
3336                         evsel->own_cpus = map;
3337                 else
3338                         pr_err("failed to get event_update cpus\n");
3339         default:
3340                 break;
3341         }
3342
3343         return 0;
3344 }
3345
3346 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3347                                         struct perf_evlist *evlist,
3348                                         perf_event__handler_t process)
3349 {
3350         union perf_event ev;
3351         struct tracing_data *tdata;
3352         ssize_t size = 0, aligned_size = 0, padding;
3353         struct feat_fd ff;
3354         int err __maybe_unused = 0;
3355
3356         /*
3357          * We are going to store the size of the data followed
3358          * by the data contents. Since the fd descriptor is a pipe,
3359          * we cannot seek back to store the size of the data once
3360          * we know it. Instead we:
3361          *
3362          * - write the tracing data to the temp file
3363          * - get/write the data size to pipe
3364          * - write the tracing data from the temp file
3365          *   to the pipe
3366          */
3367         tdata = tracing_data_get(&evlist->entries, fd, true);
3368         if (!tdata)
3369                 return -1;
3370
3371         memset(&ev, 0, sizeof(ev));
3372
3373         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3374         size = tdata->size;
3375         aligned_size = PERF_ALIGN(size, sizeof(u64));
3376         padding = aligned_size - size;
3377         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3378         ev.tracing_data.size = aligned_size;
3379
3380         process(tool, &ev, NULL, NULL);
3381
3382         /*
3383          * The put function will copy all the tracing data
3384          * stored in temp file to the pipe.
3385          */
3386         tracing_data_put(tdata);
3387
3388         ff = (struct feat_fd){ .fd = fd };
3389         if (write_padded(&ff, NULL, 0, padding))
3390                 return -1;
3391
3392         return aligned_size;
3393 }
3394
3395 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3396                                      union perf_event *event,
3397                                      struct perf_session *session)
3398 {
3399         ssize_t size_read, padding, size = event->tracing_data.size;
3400         int fd = perf_data__fd(session->data);
3401         off_t offset = lseek(fd, 0, SEEK_CUR);
3402         char buf[BUFSIZ];
3403
3404         /* setup for reading amidst mmap */
3405         lseek(fd, offset + sizeof(struct tracing_data_event),
3406               SEEK_SET);
3407
3408         size_read = trace_report(fd, &session->tevent,
3409                                  session->repipe);
3410         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3411
3412         if (readn(fd, buf, padding) < 0) {
3413                 pr_err("%s: reading input file", __func__);
3414                 return -1;
3415         }
3416         if (session->repipe) {
3417                 int retw = write(STDOUT_FILENO, buf, padding);
3418                 if (retw <= 0 || retw != padding) {
3419                         pr_err("%s: repiping tracing data padding", __func__);
3420                         return -1;
3421                 }
3422         }
3423
3424         if (size_read + padding != size) {
3425                 pr_err("%s: tracing data size mismatch", __func__);
3426                 return -1;
3427         }
3428
3429         perf_evlist__prepare_tracepoint_events(session->evlist,
3430                                                session->tevent.pevent);
3431
3432         return size_read + padding;
3433 }
3434
3435 int perf_event__synthesize_build_id(struct perf_tool *tool,
3436                                     struct dso *pos, u16 misc,
3437                                     perf_event__handler_t process,
3438                                     struct machine *machine)
3439 {
3440         union perf_event ev;
3441         size_t len;
3442         int err = 0;
3443
3444         if (!pos->hit)
3445                 return err;
3446
3447         memset(&ev, 0, sizeof(ev));
3448
3449         len = pos->long_name_len + 1;
3450         len = PERF_ALIGN(len, NAME_ALIGN);
3451         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3452         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3453         ev.build_id.header.misc = misc;
3454         ev.build_id.pid = machine->pid;
3455         ev.build_id.header.size = sizeof(ev.build_id) + len;
3456         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3457
3458         err = process(tool, &ev, NULL, machine);
3459
3460         return err;
3461 }
3462
3463 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3464                                  union perf_event *event,
3465                                  struct perf_session *session)
3466 {
3467         __event_process_build_id(&event->build_id,
3468                                  event->build_id.filename,
3469                                  session);
3470         return 0;
3471 }