3b3c50b127918ba12526e8542f939a0ca4b12185
[linux-2.6-microblaze.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14
15 #include "map_symbol.h"
16 #include "branch.h"
17 #include "debug.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "memswap.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "session.h"
24 #include "tool.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "ui/progress.h"
33 #include "../perf.h"
34 #include "arch/common.h"
35 #include <internal/lib.h>
36
37 #ifdef HAVE_ZSTD_SUPPORT
38 static int perf_session__process_compressed_event(struct perf_session *session,
39                                                   union perf_event *event, u64 file_offset)
40 {
41         void *src;
42         size_t decomp_size, src_size;
43         u64 decomp_last_rem = 0;
44         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
45         struct decomp *decomp, *decomp_last = session->decomp_last;
46
47         if (decomp_last) {
48                 decomp_last_rem = decomp_last->size - decomp_last->head;
49                 decomp_len += decomp_last_rem;
50         }
51
52         mmap_len = sizeof(struct decomp) + decomp_len;
53         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
54                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
55         if (decomp == MAP_FAILED) {
56                 pr_err("Couldn't allocate memory for decompression\n");
57                 return -1;
58         }
59
60         decomp->file_pos = file_offset;
61         decomp->mmap_len = mmap_len;
62         decomp->head = 0;
63
64         if (decomp_last_rem) {
65                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
66                 decomp->size = decomp_last_rem;
67         }
68
69         src = (void *)event + sizeof(struct perf_record_compressed);
70         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
71
72         decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
73                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
74         if (!decomp_size) {
75                 munmap(decomp, mmap_len);
76                 pr_err("Couldn't decompress data\n");
77                 return -1;
78         }
79
80         decomp->size += decomp_size;
81
82         if (session->decomp == NULL) {
83                 session->decomp = decomp;
84                 session->decomp_last = decomp;
85         } else {
86                 session->decomp_last->next = decomp;
87                 session->decomp_last = decomp;
88         }
89
90         pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
91
92         return 0;
93 }
94 #else /* !HAVE_ZSTD_SUPPORT */
95 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
96 #endif
97
98 static int perf_session__deliver_event(struct perf_session *session,
99                                        union perf_event *event,
100                                        struct perf_tool *tool,
101                                        u64 file_offset);
102
103 static int perf_session__open(struct perf_session *session)
104 {
105         struct perf_data *data = session->data;
106
107         if (perf_session__read_header(session) < 0) {
108                 pr_err("incompatible file format (rerun with -v to learn more)\n");
109                 return -1;
110         }
111
112         if (perf_data__is_pipe(data))
113                 return 0;
114
115         if (perf_header__has_feat(&session->header, HEADER_STAT))
116                 return 0;
117
118         if (!evlist__valid_sample_type(session->evlist)) {
119                 pr_err("non matching sample_type\n");
120                 return -1;
121         }
122
123         if (!evlist__valid_sample_id_all(session->evlist)) {
124                 pr_err("non matching sample_id_all\n");
125                 return -1;
126         }
127
128         if (!evlist__valid_read_format(session->evlist)) {
129                 pr_err("non matching read_format\n");
130                 return -1;
131         }
132
133         return 0;
134 }
135
136 void perf_session__set_id_hdr_size(struct perf_session *session)
137 {
138         u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
139
140         machines__set_id_hdr_size(&session->machines, id_hdr_size);
141 }
142
143 int perf_session__create_kernel_maps(struct perf_session *session)
144 {
145         int ret = machine__create_kernel_maps(&session->machines.host);
146
147         if (ret >= 0)
148                 ret = machines__create_guest_kernel_maps(&session->machines);
149         return ret;
150 }
151
152 static void perf_session__destroy_kernel_maps(struct perf_session *session)
153 {
154         machines__destroy_kernel_maps(&session->machines);
155 }
156
157 static bool perf_session__has_comm_exec(struct perf_session *session)
158 {
159         struct evsel *evsel;
160
161         evlist__for_each_entry(session->evlist, evsel) {
162                 if (evsel->core.attr.comm_exec)
163                         return true;
164         }
165
166         return false;
167 }
168
169 static void perf_session__set_comm_exec(struct perf_session *session)
170 {
171         bool comm_exec = perf_session__has_comm_exec(session);
172
173         machines__set_comm_exec(&session->machines, comm_exec);
174 }
175
176 static int ordered_events__deliver_event(struct ordered_events *oe,
177                                          struct ordered_event *event)
178 {
179         struct perf_session *session = container_of(oe, struct perf_session,
180                                                     ordered_events);
181
182         return perf_session__deliver_event(session, event->event,
183                                            session->tool, event->file_offset);
184 }
185
186 struct perf_session *perf_session__new(struct perf_data *data,
187                                        bool repipe, struct perf_tool *tool)
188 {
189         int ret = -ENOMEM;
190         struct perf_session *session = zalloc(sizeof(*session));
191
192         if (!session)
193                 goto out;
194
195         session->repipe = repipe;
196         session->tool   = tool;
197         INIT_LIST_HEAD(&session->auxtrace_index);
198         machines__init(&session->machines);
199         ordered_events__init(&session->ordered_events,
200                              ordered_events__deliver_event, NULL);
201
202         perf_env__init(&session->header.env);
203         if (data) {
204                 ret = perf_data__open(data);
205                 if (ret < 0)
206                         goto out_delete;
207
208                 session->data = data;
209
210                 if (perf_data__is_read(data)) {
211                         ret = perf_session__open(session);
212                         if (ret < 0)
213                                 goto out_delete;
214
215                         /*
216                          * set session attributes that are present in perf.data
217                          * but not in pipe-mode.
218                          */
219                         if (!data->is_pipe) {
220                                 perf_session__set_id_hdr_size(session);
221                                 perf_session__set_comm_exec(session);
222                         }
223
224                         evlist__init_trace_event_sample_raw(session->evlist);
225
226                         /* Open the directory data. */
227                         if (data->is_dir) {
228                                 ret = perf_data__open_dir(data);
229                                 if (ret)
230                                         goto out_delete;
231                         }
232
233                         if (!symbol_conf.kallsyms_name &&
234                             !symbol_conf.vmlinux_name)
235                                 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
236                 }
237         } else  {
238                 session->machines.host.env = &perf_env;
239         }
240
241         session->machines.host.single_address_space =
242                 perf_env__single_address_space(session->machines.host.env);
243
244         if (!data || perf_data__is_write(data)) {
245                 /*
246                  * In O_RDONLY mode this will be performed when reading the
247                  * kernel MMAP event, in perf_event__process_mmap().
248                  */
249                 if (perf_session__create_kernel_maps(session) < 0)
250                         pr_warning("Cannot read kernel map\n");
251         }
252
253         /*
254          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
255          * processed, so evlist__sample_id_all is not meaningful here.
256          */
257         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
258             tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
259                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
260                 tool->ordered_events = false;
261         }
262
263         return session;
264
265  out_delete:
266         perf_session__delete(session);
267  out:
268         return ERR_PTR(ret);
269 }
270
271 static void perf_session__delete_threads(struct perf_session *session)
272 {
273         machine__delete_threads(&session->machines.host);
274 }
275
276 static void perf_session__release_decomp_events(struct perf_session *session)
277 {
278         struct decomp *next, *decomp;
279         size_t mmap_len;
280         next = session->decomp;
281         do {
282                 decomp = next;
283                 if (decomp == NULL)
284                         break;
285                 next = decomp->next;
286                 mmap_len = decomp->mmap_len;
287                 munmap(decomp, mmap_len);
288         } while (1);
289 }
290
291 void perf_session__delete(struct perf_session *session)
292 {
293         if (session == NULL)
294                 return;
295         auxtrace__free(session);
296         auxtrace_index__free(&session->auxtrace_index);
297         perf_session__destroy_kernel_maps(session);
298         perf_session__delete_threads(session);
299         perf_session__release_decomp_events(session);
300         perf_env__exit(&session->header.env);
301         machines__exit(&session->machines);
302         if (session->data)
303                 perf_data__close(session->data);
304         free(session);
305 }
306
307 static int process_event_synth_tracing_data_stub(struct perf_session *session
308                                                  __maybe_unused,
309                                                  union perf_event *event
310                                                  __maybe_unused)
311 {
312         dump_printf(": unhandled!\n");
313         return 0;
314 }
315
316 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
317                                          union perf_event *event __maybe_unused,
318                                          struct evlist **pevlist
319                                          __maybe_unused)
320 {
321         dump_printf(": unhandled!\n");
322         return 0;
323 }
324
325 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
326                                                  union perf_event *event __maybe_unused,
327                                                  struct evlist **pevlist
328                                                  __maybe_unused)
329 {
330         if (dump_trace)
331                 perf_event__fprintf_event_update(event, stdout);
332
333         dump_printf(": unhandled!\n");
334         return 0;
335 }
336
337 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
338                                      union perf_event *event __maybe_unused,
339                                      struct perf_sample *sample __maybe_unused,
340                                      struct evsel *evsel __maybe_unused,
341                                      struct machine *machine __maybe_unused)
342 {
343         dump_printf(": unhandled!\n");
344         return 0;
345 }
346
347 static int process_event_stub(struct perf_tool *tool __maybe_unused,
348                               union perf_event *event __maybe_unused,
349                               struct perf_sample *sample __maybe_unused,
350                               struct machine *machine __maybe_unused)
351 {
352         dump_printf(": unhandled!\n");
353         return 0;
354 }
355
356 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
357                                        union perf_event *event __maybe_unused,
358                                        struct ordered_events *oe __maybe_unused)
359 {
360         dump_printf(": unhandled!\n");
361         return 0;
362 }
363
364 static int process_finished_round(struct perf_tool *tool,
365                                   union perf_event *event,
366                                   struct ordered_events *oe);
367
368 static int skipn(int fd, off_t n)
369 {
370         char buf[4096];
371         ssize_t ret;
372
373         while (n > 0) {
374                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
375                 if (ret <= 0)
376                         return ret;
377                 n -= ret;
378         }
379
380         return 0;
381 }
382
383 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
384                                        union perf_event *event)
385 {
386         dump_printf(": unhandled!\n");
387         if (perf_data__is_pipe(session->data))
388                 skipn(perf_data__fd(session->data), event->auxtrace.size);
389         return event->auxtrace.size;
390 }
391
392 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
393                                   union perf_event *event __maybe_unused)
394 {
395         dump_printf(": unhandled!\n");
396         return 0;
397 }
398
399
400 static
401 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
402                                   union perf_event *event __maybe_unused)
403 {
404         if (dump_trace)
405                 perf_event__fprintf_thread_map(event, stdout);
406
407         dump_printf(": unhandled!\n");
408         return 0;
409 }
410
411 static
412 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
413                                union perf_event *event __maybe_unused)
414 {
415         if (dump_trace)
416                 perf_event__fprintf_cpu_map(event, stdout);
417
418         dump_printf(": unhandled!\n");
419         return 0;
420 }
421
422 static
423 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
424                                    union perf_event *event __maybe_unused)
425 {
426         if (dump_trace)
427                 perf_event__fprintf_stat_config(event, stdout);
428
429         dump_printf(": unhandled!\n");
430         return 0;
431 }
432
433 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
434                              union perf_event *event)
435 {
436         if (dump_trace)
437                 perf_event__fprintf_stat(event, stdout);
438
439         dump_printf(": unhandled!\n");
440         return 0;
441 }
442
443 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
444                                    union perf_event *event)
445 {
446         if (dump_trace)
447                 perf_event__fprintf_stat_round(event, stdout);
448
449         dump_printf(": unhandled!\n");
450         return 0;
451 }
452
453 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
454                                                        union perf_event *event __maybe_unused,
455                                                        u64 file_offset __maybe_unused)
456 {
457        dump_printf(": unhandled!\n");
458        return 0;
459 }
460
461 void perf_tool__fill_defaults(struct perf_tool *tool)
462 {
463         if (tool->sample == NULL)
464                 tool->sample = process_event_sample_stub;
465         if (tool->mmap == NULL)
466                 tool->mmap = process_event_stub;
467         if (tool->mmap2 == NULL)
468                 tool->mmap2 = process_event_stub;
469         if (tool->comm == NULL)
470                 tool->comm = process_event_stub;
471         if (tool->namespaces == NULL)
472                 tool->namespaces = process_event_stub;
473         if (tool->cgroup == NULL)
474                 tool->cgroup = process_event_stub;
475         if (tool->fork == NULL)
476                 tool->fork = process_event_stub;
477         if (tool->exit == NULL)
478                 tool->exit = process_event_stub;
479         if (tool->lost == NULL)
480                 tool->lost = perf_event__process_lost;
481         if (tool->lost_samples == NULL)
482                 tool->lost_samples = perf_event__process_lost_samples;
483         if (tool->aux == NULL)
484                 tool->aux = perf_event__process_aux;
485         if (tool->itrace_start == NULL)
486                 tool->itrace_start = perf_event__process_itrace_start;
487         if (tool->context_switch == NULL)
488                 tool->context_switch = perf_event__process_switch;
489         if (tool->ksymbol == NULL)
490                 tool->ksymbol = perf_event__process_ksymbol;
491         if (tool->bpf == NULL)
492                 tool->bpf = perf_event__process_bpf;
493         if (tool->text_poke == NULL)
494                 tool->text_poke = perf_event__process_text_poke;
495         if (tool->read == NULL)
496                 tool->read = process_event_sample_stub;
497         if (tool->throttle == NULL)
498                 tool->throttle = process_event_stub;
499         if (tool->unthrottle == NULL)
500                 tool->unthrottle = process_event_stub;
501         if (tool->attr == NULL)
502                 tool->attr = process_event_synth_attr_stub;
503         if (tool->event_update == NULL)
504                 tool->event_update = process_event_synth_event_update_stub;
505         if (tool->tracing_data == NULL)
506                 tool->tracing_data = process_event_synth_tracing_data_stub;
507         if (tool->build_id == NULL)
508                 tool->build_id = process_event_op2_stub;
509         if (tool->finished_round == NULL) {
510                 if (tool->ordered_events)
511                         tool->finished_round = process_finished_round;
512                 else
513                         tool->finished_round = process_finished_round_stub;
514         }
515         if (tool->id_index == NULL)
516                 tool->id_index = process_event_op2_stub;
517         if (tool->auxtrace_info == NULL)
518                 tool->auxtrace_info = process_event_op2_stub;
519         if (tool->auxtrace == NULL)
520                 tool->auxtrace = process_event_auxtrace_stub;
521         if (tool->auxtrace_error == NULL)
522                 tool->auxtrace_error = process_event_op2_stub;
523         if (tool->thread_map == NULL)
524                 tool->thread_map = process_event_thread_map_stub;
525         if (tool->cpu_map == NULL)
526                 tool->cpu_map = process_event_cpu_map_stub;
527         if (tool->stat_config == NULL)
528                 tool->stat_config = process_event_stat_config_stub;
529         if (tool->stat == NULL)
530                 tool->stat = process_stat_stub;
531         if (tool->stat_round == NULL)
532                 tool->stat_round = process_stat_round_stub;
533         if (tool->time_conv == NULL)
534                 tool->time_conv = process_event_op2_stub;
535         if (tool->feature == NULL)
536                 tool->feature = process_event_op2_stub;
537         if (tool->compressed == NULL)
538                 tool->compressed = perf_session__process_compressed_event;
539 }
540
541 static void swap_sample_id_all(union perf_event *event, void *data)
542 {
543         void *end = (void *) event + event->header.size;
544         int size = end - data;
545
546         BUG_ON(size % sizeof(u64));
547         mem_bswap_64(data, size);
548 }
549
550 static void perf_event__all64_swap(union perf_event *event,
551                                    bool sample_id_all __maybe_unused)
552 {
553         struct perf_event_header *hdr = &event->header;
554         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
555 }
556
557 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
558 {
559         event->comm.pid = bswap_32(event->comm.pid);
560         event->comm.tid = bswap_32(event->comm.tid);
561
562         if (sample_id_all) {
563                 void *data = &event->comm.comm;
564
565                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
566                 swap_sample_id_all(event, data);
567         }
568 }
569
570 static void perf_event__mmap_swap(union perf_event *event,
571                                   bool sample_id_all)
572 {
573         event->mmap.pid   = bswap_32(event->mmap.pid);
574         event->mmap.tid   = bswap_32(event->mmap.tid);
575         event->mmap.start = bswap_64(event->mmap.start);
576         event->mmap.len   = bswap_64(event->mmap.len);
577         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
578
579         if (sample_id_all) {
580                 void *data = &event->mmap.filename;
581
582                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
583                 swap_sample_id_all(event, data);
584         }
585 }
586
587 static void perf_event__mmap2_swap(union perf_event *event,
588                                   bool sample_id_all)
589 {
590         event->mmap2.pid   = bswap_32(event->mmap2.pid);
591         event->mmap2.tid   = bswap_32(event->mmap2.tid);
592         event->mmap2.start = bswap_64(event->mmap2.start);
593         event->mmap2.len   = bswap_64(event->mmap2.len);
594         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
595         event->mmap2.maj   = bswap_32(event->mmap2.maj);
596         event->mmap2.min   = bswap_32(event->mmap2.min);
597         event->mmap2.ino   = bswap_64(event->mmap2.ino);
598         event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
599
600         if (sample_id_all) {
601                 void *data = &event->mmap2.filename;
602
603                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
604                 swap_sample_id_all(event, data);
605         }
606 }
607 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
608 {
609         event->fork.pid  = bswap_32(event->fork.pid);
610         event->fork.tid  = bswap_32(event->fork.tid);
611         event->fork.ppid = bswap_32(event->fork.ppid);
612         event->fork.ptid = bswap_32(event->fork.ptid);
613         event->fork.time = bswap_64(event->fork.time);
614
615         if (sample_id_all)
616                 swap_sample_id_all(event, &event->fork + 1);
617 }
618
619 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
620 {
621         event->read.pid          = bswap_32(event->read.pid);
622         event->read.tid          = bswap_32(event->read.tid);
623         event->read.value        = bswap_64(event->read.value);
624         event->read.time_enabled = bswap_64(event->read.time_enabled);
625         event->read.time_running = bswap_64(event->read.time_running);
626         event->read.id           = bswap_64(event->read.id);
627
628         if (sample_id_all)
629                 swap_sample_id_all(event, &event->read + 1);
630 }
631
632 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
633 {
634         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
635         event->aux.aux_size   = bswap_64(event->aux.aux_size);
636         event->aux.flags      = bswap_64(event->aux.flags);
637
638         if (sample_id_all)
639                 swap_sample_id_all(event, &event->aux + 1);
640 }
641
642 static void perf_event__itrace_start_swap(union perf_event *event,
643                                           bool sample_id_all)
644 {
645         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
646         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
647
648         if (sample_id_all)
649                 swap_sample_id_all(event, &event->itrace_start + 1);
650 }
651
652 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
653 {
654         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
655                 event->context_switch.next_prev_pid =
656                                 bswap_32(event->context_switch.next_prev_pid);
657                 event->context_switch.next_prev_tid =
658                                 bswap_32(event->context_switch.next_prev_tid);
659         }
660
661         if (sample_id_all)
662                 swap_sample_id_all(event, &event->context_switch + 1);
663 }
664
665 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
666 {
667         event->text_poke.addr    = bswap_64(event->text_poke.addr);
668         event->text_poke.old_len = bswap_16(event->text_poke.old_len);
669         event->text_poke.new_len = bswap_16(event->text_poke.new_len);
670
671         if (sample_id_all) {
672                 size_t len = sizeof(event->text_poke.old_len) +
673                              sizeof(event->text_poke.new_len) +
674                              event->text_poke.old_len +
675                              event->text_poke.new_len;
676                 void *data = &event->text_poke.old_len;
677
678                 data += PERF_ALIGN(len, sizeof(u64));
679                 swap_sample_id_all(event, data);
680         }
681 }
682
683 static void perf_event__throttle_swap(union perf_event *event,
684                                       bool sample_id_all)
685 {
686         event->throttle.time      = bswap_64(event->throttle.time);
687         event->throttle.id        = bswap_64(event->throttle.id);
688         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
689
690         if (sample_id_all)
691                 swap_sample_id_all(event, &event->throttle + 1);
692 }
693
694 static void perf_event__namespaces_swap(union perf_event *event,
695                                         bool sample_id_all)
696 {
697         u64 i;
698
699         event->namespaces.pid           = bswap_32(event->namespaces.pid);
700         event->namespaces.tid           = bswap_32(event->namespaces.tid);
701         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
702
703         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
704                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
705
706                 ns->dev = bswap_64(ns->dev);
707                 ns->ino = bswap_64(ns->ino);
708         }
709
710         if (sample_id_all)
711                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
712 }
713
714 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
715 {
716         event->cgroup.id = bswap_64(event->cgroup.id);
717
718         if (sample_id_all) {
719                 void *data = &event->cgroup.path;
720
721                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
722                 swap_sample_id_all(event, data);
723         }
724 }
725
726 static u8 revbyte(u8 b)
727 {
728         int rev = (b >> 4) | ((b & 0xf) << 4);
729         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
730         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
731         return (u8) rev;
732 }
733
734 /*
735  * XXX this is hack in attempt to carry flags bitfield
736  * through endian village. ABI says:
737  *
738  * Bit-fields are allocated from right to left (least to most significant)
739  * on little-endian implementations and from left to right (most to least
740  * significant) on big-endian implementations.
741  *
742  * The above seems to be byte specific, so we need to reverse each
743  * byte of the bitfield. 'Internet' also says this might be implementation
744  * specific and we probably need proper fix and carry perf_event_attr
745  * bitfield flags in separate data file FEAT_ section. Thought this seems
746  * to work for now.
747  */
748 static void swap_bitfield(u8 *p, unsigned len)
749 {
750         unsigned i;
751
752         for (i = 0; i < len; i++) {
753                 *p = revbyte(*p);
754                 p++;
755         }
756 }
757
758 /* exported for swapping attributes in file header */
759 void perf_event__attr_swap(struct perf_event_attr *attr)
760 {
761         attr->type              = bswap_32(attr->type);
762         attr->size              = bswap_32(attr->size);
763
764 #define bswap_safe(f, n)                                        \
765         (attr->size > (offsetof(struct perf_event_attr, f) +    \
766                        sizeof(attr->f) * (n)))
767 #define bswap_field(f, sz)                      \
768 do {                                            \
769         if (bswap_safe(f, 0))                   \
770                 attr->f = bswap_##sz(attr->f);  \
771 } while(0)
772 #define bswap_field_16(f) bswap_field(f, 16)
773 #define bswap_field_32(f) bswap_field(f, 32)
774 #define bswap_field_64(f) bswap_field(f, 64)
775
776         bswap_field_64(config);
777         bswap_field_64(sample_period);
778         bswap_field_64(sample_type);
779         bswap_field_64(read_format);
780         bswap_field_32(wakeup_events);
781         bswap_field_32(bp_type);
782         bswap_field_64(bp_addr);
783         bswap_field_64(bp_len);
784         bswap_field_64(branch_sample_type);
785         bswap_field_64(sample_regs_user);
786         bswap_field_32(sample_stack_user);
787         bswap_field_32(aux_watermark);
788         bswap_field_16(sample_max_stack);
789         bswap_field_32(aux_sample_size);
790
791         /*
792          * After read_format are bitfields. Check read_format because
793          * we are unable to use offsetof on bitfield.
794          */
795         if (bswap_safe(read_format, 1))
796                 swap_bitfield((u8 *) (&attr->read_format + 1),
797                               sizeof(u64));
798 #undef bswap_field_64
799 #undef bswap_field_32
800 #undef bswap_field
801 #undef bswap_safe
802 }
803
804 static void perf_event__hdr_attr_swap(union perf_event *event,
805                                       bool sample_id_all __maybe_unused)
806 {
807         size_t size;
808
809         perf_event__attr_swap(&event->attr.attr);
810
811         size = event->header.size;
812         size -= (void *)&event->attr.id - (void *)event;
813         mem_bswap_64(event->attr.id, size);
814 }
815
816 static void perf_event__event_update_swap(union perf_event *event,
817                                           bool sample_id_all __maybe_unused)
818 {
819         event->event_update.type = bswap_64(event->event_update.type);
820         event->event_update.id   = bswap_64(event->event_update.id);
821 }
822
823 static void perf_event__event_type_swap(union perf_event *event,
824                                         bool sample_id_all __maybe_unused)
825 {
826         event->event_type.event_type.event_id =
827                 bswap_64(event->event_type.event_type.event_id);
828 }
829
830 static void perf_event__tracing_data_swap(union perf_event *event,
831                                           bool sample_id_all __maybe_unused)
832 {
833         event->tracing_data.size = bswap_32(event->tracing_data.size);
834 }
835
836 static void perf_event__auxtrace_info_swap(union perf_event *event,
837                                            bool sample_id_all __maybe_unused)
838 {
839         size_t size;
840
841         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
842
843         size = event->header.size;
844         size -= (void *)&event->auxtrace_info.priv - (void *)event;
845         mem_bswap_64(event->auxtrace_info.priv, size);
846 }
847
848 static void perf_event__auxtrace_swap(union perf_event *event,
849                                       bool sample_id_all __maybe_unused)
850 {
851         event->auxtrace.size      = bswap_64(event->auxtrace.size);
852         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
853         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
854         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
855         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
856         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
857 }
858
859 static void perf_event__auxtrace_error_swap(union perf_event *event,
860                                             bool sample_id_all __maybe_unused)
861 {
862         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
863         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
864         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
865         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
866         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
867         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
868         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
869         if (event->auxtrace_error.fmt)
870                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
871 }
872
873 static void perf_event__thread_map_swap(union perf_event *event,
874                                         bool sample_id_all __maybe_unused)
875 {
876         unsigned i;
877
878         event->thread_map.nr = bswap_64(event->thread_map.nr);
879
880         for (i = 0; i < event->thread_map.nr; i++)
881                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
882 }
883
884 static void perf_event__cpu_map_swap(union perf_event *event,
885                                      bool sample_id_all __maybe_unused)
886 {
887         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
888         struct cpu_map_entries *cpus;
889         struct perf_record_record_cpu_map *mask;
890         unsigned i;
891
892         data->type = bswap_64(data->type);
893
894         switch (data->type) {
895         case PERF_CPU_MAP__CPUS:
896                 cpus = (struct cpu_map_entries *)data->data;
897
898                 cpus->nr = bswap_16(cpus->nr);
899
900                 for (i = 0; i < cpus->nr; i++)
901                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
902                 break;
903         case PERF_CPU_MAP__MASK:
904                 mask = (struct perf_record_record_cpu_map *)data->data;
905
906                 mask->nr = bswap_16(mask->nr);
907                 mask->long_size = bswap_16(mask->long_size);
908
909                 switch (mask->long_size) {
910                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
911                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
912                 default:
913                         pr_err("cpu_map swap: unsupported long size\n");
914                 }
915         default:
916                 break;
917         }
918 }
919
920 static void perf_event__stat_config_swap(union perf_event *event,
921                                          bool sample_id_all __maybe_unused)
922 {
923         u64 size;
924
925         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
926         size += 1; /* nr item itself */
927         mem_bswap_64(&event->stat_config.nr, size);
928 }
929
930 static void perf_event__stat_swap(union perf_event *event,
931                                   bool sample_id_all __maybe_unused)
932 {
933         event->stat.id     = bswap_64(event->stat.id);
934         event->stat.thread = bswap_32(event->stat.thread);
935         event->stat.cpu    = bswap_32(event->stat.cpu);
936         event->stat.val    = bswap_64(event->stat.val);
937         event->stat.ena    = bswap_64(event->stat.ena);
938         event->stat.run    = bswap_64(event->stat.run);
939 }
940
941 static void perf_event__stat_round_swap(union perf_event *event,
942                                         bool sample_id_all __maybe_unused)
943 {
944         event->stat_round.type = bswap_64(event->stat_round.type);
945         event->stat_round.time = bswap_64(event->stat_round.time);
946 }
947
948 typedef void (*perf_event__swap_op)(union perf_event *event,
949                                     bool sample_id_all);
950
951 static perf_event__swap_op perf_event__swap_ops[] = {
952         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
953         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
954         [PERF_RECORD_COMM]                = perf_event__comm_swap,
955         [PERF_RECORD_FORK]                = perf_event__task_swap,
956         [PERF_RECORD_EXIT]                = perf_event__task_swap,
957         [PERF_RECORD_LOST]                = perf_event__all64_swap,
958         [PERF_RECORD_READ]                = perf_event__read_swap,
959         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
960         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
961         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
962         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
963         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
964         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
965         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
966         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
967         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
968         [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
969         [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
970         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
971         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
972         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
973         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
974         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
975         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
976         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
977         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
978         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
979         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
980         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
981         [PERF_RECORD_STAT]                = perf_event__stat_swap,
982         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
983         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
984         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
985         [PERF_RECORD_HEADER_MAX]          = NULL,
986 };
987
988 /*
989  * When perf record finishes a pass on every buffers, it records this pseudo
990  * event.
991  * We record the max timestamp t found in the pass n.
992  * Assuming these timestamps are monotonic across cpus, we know that if
993  * a buffer still has events with timestamps below t, they will be all
994  * available and then read in the pass n + 1.
995  * Hence when we start to read the pass n + 2, we can safely flush every
996  * events with timestamps below t.
997  *
998  *    ============ PASS n =================
999  *       CPU 0         |   CPU 1
1000  *                     |
1001  *    cnt1 timestamps  |   cnt2 timestamps
1002  *          1          |         2
1003  *          2          |         3
1004  *          -          |         4  <--- max recorded
1005  *
1006  *    ============ PASS n + 1 ==============
1007  *       CPU 0         |   CPU 1
1008  *                     |
1009  *    cnt1 timestamps  |   cnt2 timestamps
1010  *          3          |         5
1011  *          4          |         6
1012  *          5          |         7 <---- max recorded
1013  *
1014  *      Flush every events below timestamp 4
1015  *
1016  *    ============ PASS n + 2 ==============
1017  *       CPU 0         |   CPU 1
1018  *                     |
1019  *    cnt1 timestamps  |   cnt2 timestamps
1020  *          6          |         8
1021  *          7          |         9
1022  *          -          |         10
1023  *
1024  *      Flush every events below timestamp 7
1025  *      etc...
1026  */
1027 static int process_finished_round(struct perf_tool *tool __maybe_unused,
1028                                   union perf_event *event __maybe_unused,
1029                                   struct ordered_events *oe)
1030 {
1031         if (dump_trace)
1032                 fprintf(stdout, "\n");
1033         return ordered_events__flush(oe, OE_FLUSH__ROUND);
1034 }
1035
1036 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1037                               u64 timestamp, u64 file_offset)
1038 {
1039         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1040 }
1041
1042 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1043 {
1044         struct ip_callchain *callchain = sample->callchain;
1045         struct branch_stack *lbr_stack = sample->branch_stack;
1046         struct branch_entry *entries = perf_sample__branch_entries(sample);
1047         u64 kernel_callchain_nr = callchain->nr;
1048         unsigned int i;
1049
1050         for (i = 0; i < kernel_callchain_nr; i++) {
1051                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1052                         break;
1053         }
1054
1055         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1056                 u64 total_nr;
1057                 /*
1058                  * LBR callstack can only get user call chain,
1059                  * i is kernel call chain number,
1060                  * 1 is PERF_CONTEXT_USER.
1061                  *
1062                  * The user call chain is stored in LBR registers.
1063                  * LBR are pair registers. The caller is stored
1064                  * in "from" register, while the callee is stored
1065                  * in "to" register.
1066                  * For example, there is a call stack
1067                  * "A"->"B"->"C"->"D".
1068                  * The LBR registers will recorde like
1069                  * "C"->"D", "B"->"C", "A"->"B".
1070                  * So only the first "to" register and all "from"
1071                  * registers are needed to construct the whole stack.
1072                  */
1073                 total_nr = i + 1 + lbr_stack->nr + 1;
1074                 kernel_callchain_nr = i + 1;
1075
1076                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1077
1078                 for (i = 0; i < kernel_callchain_nr; i++)
1079                         printf("..... %2d: %016" PRIx64 "\n",
1080                                i, callchain->ips[i]);
1081
1082                 printf("..... %2d: %016" PRIx64 "\n",
1083                        (int)(kernel_callchain_nr), entries[0].to);
1084                 for (i = 0; i < lbr_stack->nr; i++)
1085                         printf("..... %2d: %016" PRIx64 "\n",
1086                                (int)(i + kernel_callchain_nr + 1), entries[i].from);
1087         }
1088 }
1089
1090 static void callchain__printf(struct evsel *evsel,
1091                               struct perf_sample *sample)
1092 {
1093         unsigned int i;
1094         struct ip_callchain *callchain = sample->callchain;
1095
1096         if (evsel__has_branch_callstack(evsel))
1097                 callchain__lbr_callstack_printf(sample);
1098
1099         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1100
1101         for (i = 0; i < callchain->nr; i++)
1102                 printf("..... %2d: %016" PRIx64 "\n",
1103                        i, callchain->ips[i]);
1104 }
1105
1106 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1107 {
1108         struct branch_entry *entries = perf_sample__branch_entries(sample);
1109         uint64_t i;
1110
1111         printf("%s: nr:%" PRIu64 "\n",
1112                 !callstack ? "... branch stack" : "... branch callstack",
1113                 sample->branch_stack->nr);
1114
1115         for (i = 0; i < sample->branch_stack->nr; i++) {
1116                 struct branch_entry *e = &entries[i];
1117
1118                 if (!callstack) {
1119                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1120                                 i, e->from, e->to,
1121                                 (unsigned short)e->flags.cycles,
1122                                 e->flags.mispred ? "M" : " ",
1123                                 e->flags.predicted ? "P" : " ",
1124                                 e->flags.abort ? "A" : " ",
1125                                 e->flags.in_tx ? "T" : " ",
1126                                 (unsigned)e->flags.reserved);
1127                 } else {
1128                         printf("..... %2"PRIu64": %016" PRIx64 "\n",
1129                                 i, i > 0 ? e->from : e->to);
1130                 }
1131         }
1132 }
1133
1134 static void regs_dump__printf(u64 mask, u64 *regs)
1135 {
1136         unsigned rid, i = 0;
1137
1138         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1139                 u64 val = regs[i++];
1140
1141                 printf(".... %-5s 0x%016" PRIx64 "\n",
1142                        perf_reg_name(rid), val);
1143         }
1144 }
1145
1146 static const char *regs_abi[] = {
1147         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1148         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1149         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1150 };
1151
1152 static inline const char *regs_dump_abi(struct regs_dump *d)
1153 {
1154         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1155                 return "unknown";
1156
1157         return regs_abi[d->abi];
1158 }
1159
1160 static void regs__printf(const char *type, struct regs_dump *regs)
1161 {
1162         u64 mask = regs->mask;
1163
1164         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1165                type,
1166                mask,
1167                regs_dump_abi(regs));
1168
1169         regs_dump__printf(mask, regs->regs);
1170 }
1171
1172 static void regs_user__printf(struct perf_sample *sample)
1173 {
1174         struct regs_dump *user_regs = &sample->user_regs;
1175
1176         if (user_regs->regs)
1177                 regs__printf("user", user_regs);
1178 }
1179
1180 static void regs_intr__printf(struct perf_sample *sample)
1181 {
1182         struct regs_dump *intr_regs = &sample->intr_regs;
1183
1184         if (intr_regs->regs)
1185                 regs__printf("intr", intr_regs);
1186 }
1187
1188 static void stack_user__printf(struct stack_dump *dump)
1189 {
1190         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1191                dump->size, dump->offset);
1192 }
1193
1194 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1195 {
1196         u64 sample_type = __evlist__combined_sample_type(evlist);
1197
1198         if (event->header.type != PERF_RECORD_SAMPLE &&
1199             !evlist__sample_id_all(evlist)) {
1200                 fputs("-1 -1 ", stdout);
1201                 return;
1202         }
1203
1204         if ((sample_type & PERF_SAMPLE_CPU))
1205                 printf("%u ", sample->cpu);
1206
1207         if (sample_type & PERF_SAMPLE_TIME)
1208                 printf("%" PRIu64 " ", sample->time);
1209 }
1210
1211 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1212 {
1213         printf("... sample_read:\n");
1214
1215         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1216                 printf("...... time enabled %016" PRIx64 "\n",
1217                        sample->read.time_enabled);
1218
1219         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1220                 printf("...... time running %016" PRIx64 "\n",
1221                        sample->read.time_running);
1222
1223         if (read_format & PERF_FORMAT_GROUP) {
1224                 u64 i;
1225
1226                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1227
1228                 for (i = 0; i < sample->read.group.nr; i++) {
1229                         struct sample_read_value *value;
1230
1231                         value = &sample->read.group.values[i];
1232                         printf("..... id %016" PRIx64
1233                                ", value %016" PRIx64 "\n",
1234                                value->id, value->value);
1235                 }
1236         } else
1237                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1238                         sample->read.one.id, sample->read.one.value);
1239 }
1240
1241 static void dump_event(struct evlist *evlist, union perf_event *event,
1242                        u64 file_offset, struct perf_sample *sample)
1243 {
1244         if (!dump_trace)
1245                 return;
1246
1247         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1248                file_offset, event->header.size, event->header.type);
1249
1250         trace_event(event);
1251         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1252                 evlist->trace_event_sample_raw(evlist, event, sample);
1253
1254         if (sample)
1255                 evlist__print_tstamp(evlist, event, sample);
1256
1257         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1258                event->header.size, perf_event__name(event->header.type));
1259 }
1260
1261 static void dump_sample(struct evsel *evsel, union perf_event *event,
1262                         struct perf_sample *sample)
1263 {
1264         u64 sample_type;
1265
1266         if (!dump_trace)
1267                 return;
1268
1269         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1270                event->header.misc, sample->pid, sample->tid, sample->ip,
1271                sample->period, sample->addr);
1272
1273         sample_type = evsel->core.attr.sample_type;
1274
1275         if (evsel__has_callchain(evsel))
1276                 callchain__printf(evsel, sample);
1277
1278         if (evsel__has_br_stack(evsel))
1279                 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1280
1281         if (sample_type & PERF_SAMPLE_REGS_USER)
1282                 regs_user__printf(sample);
1283
1284         if (sample_type & PERF_SAMPLE_REGS_INTR)
1285                 regs_intr__printf(sample);
1286
1287         if (sample_type & PERF_SAMPLE_STACK_USER)
1288                 stack_user__printf(&sample->user_stack);
1289
1290         if (sample_type & PERF_SAMPLE_WEIGHT)
1291                 printf("... weight: %" PRIu64 "\n", sample->weight);
1292
1293         if (sample_type & PERF_SAMPLE_DATA_SRC)
1294                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1295
1296         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1297                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1298
1299         if (sample_type & PERF_SAMPLE_TRANSACTION)
1300                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1301
1302         if (sample_type & PERF_SAMPLE_READ)
1303                 sample_read__printf(sample, evsel->core.attr.read_format);
1304 }
1305
1306 static void dump_read(struct evsel *evsel, union perf_event *event)
1307 {
1308         struct perf_record_read *read_event = &event->read;
1309         u64 read_format;
1310
1311         if (!dump_trace)
1312                 return;
1313
1314         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1315                evsel__name(evsel), event->read.value);
1316
1317         if (!evsel)
1318                 return;
1319
1320         read_format = evsel->core.attr.read_format;
1321
1322         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1323                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1324
1325         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1326                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1327
1328         if (read_format & PERF_FORMAT_ID)
1329                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1330 }
1331
1332 static struct machine *machines__find_for_cpumode(struct machines *machines,
1333                                                union perf_event *event,
1334                                                struct perf_sample *sample)
1335 {
1336         struct machine *machine;
1337
1338         if (perf_guest &&
1339             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1340              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1341                 u32 pid;
1342
1343                 if (event->header.type == PERF_RECORD_MMAP
1344                     || event->header.type == PERF_RECORD_MMAP2)
1345                         pid = event->mmap.pid;
1346                 else
1347                         pid = sample->pid;
1348
1349                 machine = machines__find(machines, pid);
1350                 if (!machine)
1351                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1352                 return machine;
1353         }
1354
1355         return &machines->host;
1356 }
1357
1358 static int deliver_sample_value(struct evlist *evlist,
1359                                 struct perf_tool *tool,
1360                                 union perf_event *event,
1361                                 struct perf_sample *sample,
1362                                 struct sample_read_value *v,
1363                                 struct machine *machine)
1364 {
1365         struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1366         struct evsel *evsel;
1367
1368         if (sid) {
1369                 sample->id     = v->id;
1370                 sample->period = v->value - sid->period;
1371                 sid->period    = v->value;
1372         }
1373
1374         if (!sid || sid->evsel == NULL) {
1375                 ++evlist->stats.nr_unknown_id;
1376                 return 0;
1377         }
1378
1379         /*
1380          * There's no reason to deliver sample
1381          * for zero period, bail out.
1382          */
1383         if (!sample->period)
1384                 return 0;
1385
1386         evsel = container_of(sid->evsel, struct evsel, core);
1387         return tool->sample(tool, event, sample, evsel, machine);
1388 }
1389
1390 static int deliver_sample_group(struct evlist *evlist,
1391                                 struct perf_tool *tool,
1392                                 union  perf_event *event,
1393                                 struct perf_sample *sample,
1394                                 struct machine *machine)
1395 {
1396         int ret = -EINVAL;
1397         u64 i;
1398
1399         for (i = 0; i < sample->read.group.nr; i++) {
1400                 ret = deliver_sample_value(evlist, tool, event, sample,
1401                                            &sample->read.group.values[i],
1402                                            machine);
1403                 if (ret)
1404                         break;
1405         }
1406
1407         return ret;
1408 }
1409
1410 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1411                                   union  perf_event *event, struct perf_sample *sample,
1412                                   struct evsel *evsel, struct machine *machine)
1413 {
1414         /* We know evsel != NULL. */
1415         u64 sample_type = evsel->core.attr.sample_type;
1416         u64 read_format = evsel->core.attr.read_format;
1417
1418         /* Standard sample delivery. */
1419         if (!(sample_type & PERF_SAMPLE_READ))
1420                 return tool->sample(tool, event, sample, evsel, machine);
1421
1422         /* For PERF_SAMPLE_READ we have either single or group mode. */
1423         if (read_format & PERF_FORMAT_GROUP)
1424                 return deliver_sample_group(evlist, tool, event, sample,
1425                                             machine);
1426         else
1427                 return deliver_sample_value(evlist, tool, event, sample,
1428                                             &sample->read.one, machine);
1429 }
1430
1431 static int machines__deliver_event(struct machines *machines,
1432                                    struct evlist *evlist,
1433                                    union perf_event *event,
1434                                    struct perf_sample *sample,
1435                                    struct perf_tool *tool, u64 file_offset)
1436 {
1437         struct evsel *evsel;
1438         struct machine *machine;
1439
1440         dump_event(evlist, event, file_offset, sample);
1441
1442         evsel = evlist__id2evsel(evlist, sample->id);
1443
1444         machine = machines__find_for_cpumode(machines, event, sample);
1445
1446         switch (event->header.type) {
1447         case PERF_RECORD_SAMPLE:
1448                 if (evsel == NULL) {
1449                         ++evlist->stats.nr_unknown_id;
1450                         return 0;
1451                 }
1452                 dump_sample(evsel, event, sample);
1453                 if (machine == NULL) {
1454                         ++evlist->stats.nr_unprocessable_samples;
1455                         return 0;
1456                 }
1457                 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1458         case PERF_RECORD_MMAP:
1459                 return tool->mmap(tool, event, sample, machine);
1460         case PERF_RECORD_MMAP2:
1461                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1462                         ++evlist->stats.nr_proc_map_timeout;
1463                 return tool->mmap2(tool, event, sample, machine);
1464         case PERF_RECORD_COMM:
1465                 return tool->comm(tool, event, sample, machine);
1466         case PERF_RECORD_NAMESPACES:
1467                 return tool->namespaces(tool, event, sample, machine);
1468         case PERF_RECORD_CGROUP:
1469                 return tool->cgroup(tool, event, sample, machine);
1470         case PERF_RECORD_FORK:
1471                 return tool->fork(tool, event, sample, machine);
1472         case PERF_RECORD_EXIT:
1473                 return tool->exit(tool, event, sample, machine);
1474         case PERF_RECORD_LOST:
1475                 if (tool->lost == perf_event__process_lost)
1476                         evlist->stats.total_lost += event->lost.lost;
1477                 return tool->lost(tool, event, sample, machine);
1478         case PERF_RECORD_LOST_SAMPLES:
1479                 if (tool->lost_samples == perf_event__process_lost_samples)
1480                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1481                 return tool->lost_samples(tool, event, sample, machine);
1482         case PERF_RECORD_READ:
1483                 dump_read(evsel, event);
1484                 return tool->read(tool, event, sample, evsel, machine);
1485         case PERF_RECORD_THROTTLE:
1486                 return tool->throttle(tool, event, sample, machine);
1487         case PERF_RECORD_UNTHROTTLE:
1488                 return tool->unthrottle(tool, event, sample, machine);
1489         case PERF_RECORD_AUX:
1490                 if (tool->aux == perf_event__process_aux) {
1491                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1492                                 evlist->stats.total_aux_lost += 1;
1493                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1494                                 evlist->stats.total_aux_partial += 1;
1495                 }
1496                 return tool->aux(tool, event, sample, machine);
1497         case PERF_RECORD_ITRACE_START:
1498                 return tool->itrace_start(tool, event, sample, machine);
1499         case PERF_RECORD_SWITCH:
1500         case PERF_RECORD_SWITCH_CPU_WIDE:
1501                 return tool->context_switch(tool, event, sample, machine);
1502         case PERF_RECORD_KSYMBOL:
1503                 return tool->ksymbol(tool, event, sample, machine);
1504         case PERF_RECORD_BPF_EVENT:
1505                 return tool->bpf(tool, event, sample, machine);
1506         case PERF_RECORD_TEXT_POKE:
1507                 return tool->text_poke(tool, event, sample, machine);
1508         default:
1509                 ++evlist->stats.nr_unknown_events;
1510                 return -1;
1511         }
1512 }
1513
1514 static int perf_session__deliver_event(struct perf_session *session,
1515                                        union perf_event *event,
1516                                        struct perf_tool *tool,
1517                                        u64 file_offset)
1518 {
1519         struct perf_sample sample;
1520         int ret = evlist__parse_sample(session->evlist, event, &sample);
1521
1522         if (ret) {
1523                 pr_err("Can't parse sample, err = %d\n", ret);
1524                 return ret;
1525         }
1526
1527         ret = auxtrace__process_event(session, event, &sample, tool);
1528         if (ret < 0)
1529                 return ret;
1530         if (ret > 0)
1531                 return 0;
1532
1533         ret = machines__deliver_event(&session->machines, session->evlist,
1534                                       event, &sample, tool, file_offset);
1535
1536         if (dump_trace && sample.aux_sample.size)
1537                 auxtrace__dump_auxtrace_sample(session, &sample);
1538
1539         return ret;
1540 }
1541
1542 static s64 perf_session__process_user_event(struct perf_session *session,
1543                                             union perf_event *event,
1544                                             u64 file_offset)
1545 {
1546         struct ordered_events *oe = &session->ordered_events;
1547         struct perf_tool *tool = session->tool;
1548         struct perf_sample sample = { .time = 0, };
1549         int fd = perf_data__fd(session->data);
1550         int err;
1551
1552         if (event->header.type != PERF_RECORD_COMPRESSED ||
1553             tool->compressed == perf_session__process_compressed_event_stub)
1554                 dump_event(session->evlist, event, file_offset, &sample);
1555
1556         /* These events are processed right away */
1557         switch (event->header.type) {
1558         case PERF_RECORD_HEADER_ATTR:
1559                 err = tool->attr(tool, event, &session->evlist);
1560                 if (err == 0) {
1561                         perf_session__set_id_hdr_size(session);
1562                         perf_session__set_comm_exec(session);
1563                 }
1564                 return err;
1565         case PERF_RECORD_EVENT_UPDATE:
1566                 return tool->event_update(tool, event, &session->evlist);
1567         case PERF_RECORD_HEADER_EVENT_TYPE:
1568                 /*
1569                  * Depreceated, but we need to handle it for sake
1570                  * of old data files create in pipe mode.
1571                  */
1572                 return 0;
1573         case PERF_RECORD_HEADER_TRACING_DATA:
1574                 /*
1575                  * Setup for reading amidst mmap, but only when we
1576                  * are in 'file' mode. The 'pipe' fd is in proper
1577                  * place already.
1578                  */
1579                 if (!perf_data__is_pipe(session->data))
1580                         lseek(fd, file_offset, SEEK_SET);
1581                 return tool->tracing_data(session, event);
1582         case PERF_RECORD_HEADER_BUILD_ID:
1583                 return tool->build_id(session, event);
1584         case PERF_RECORD_FINISHED_ROUND:
1585                 return tool->finished_round(tool, event, oe);
1586         case PERF_RECORD_ID_INDEX:
1587                 return tool->id_index(session, event);
1588         case PERF_RECORD_AUXTRACE_INFO:
1589                 return tool->auxtrace_info(session, event);
1590         case PERF_RECORD_AUXTRACE:
1591                 /* setup for reading amidst mmap */
1592                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1593                 return tool->auxtrace(session, event);
1594         case PERF_RECORD_AUXTRACE_ERROR:
1595                 perf_session__auxtrace_error_inc(session, event);
1596                 return tool->auxtrace_error(session, event);
1597         case PERF_RECORD_THREAD_MAP:
1598                 return tool->thread_map(session, event);
1599         case PERF_RECORD_CPU_MAP:
1600                 return tool->cpu_map(session, event);
1601         case PERF_RECORD_STAT_CONFIG:
1602                 return tool->stat_config(session, event);
1603         case PERF_RECORD_STAT:
1604                 return tool->stat(session, event);
1605         case PERF_RECORD_STAT_ROUND:
1606                 return tool->stat_round(session, event);
1607         case PERF_RECORD_TIME_CONV:
1608                 session->time_conv = event->time_conv;
1609                 return tool->time_conv(session, event);
1610         case PERF_RECORD_HEADER_FEATURE:
1611                 return tool->feature(session, event);
1612         case PERF_RECORD_COMPRESSED:
1613                 err = tool->compressed(session, event, file_offset);
1614                 if (err)
1615                         dump_event(session->evlist, event, file_offset, &sample);
1616                 return err;
1617         default:
1618                 return -EINVAL;
1619         }
1620 }
1621
1622 int perf_session__deliver_synth_event(struct perf_session *session,
1623                                       union perf_event *event,
1624                                       struct perf_sample *sample)
1625 {
1626         struct evlist *evlist = session->evlist;
1627         struct perf_tool *tool = session->tool;
1628
1629         events_stats__inc(&evlist->stats, event->header.type);
1630
1631         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1632                 return perf_session__process_user_event(session, event, 0);
1633
1634         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1635 }
1636
1637 static void event_swap(union perf_event *event, bool sample_id_all)
1638 {
1639         perf_event__swap_op swap;
1640
1641         swap = perf_event__swap_ops[event->header.type];
1642         if (swap)
1643                 swap(event, sample_id_all);
1644 }
1645
1646 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1647                              void *buf, size_t buf_sz,
1648                              union perf_event **event_ptr,
1649                              struct perf_sample *sample)
1650 {
1651         union perf_event *event;
1652         size_t hdr_sz, rest;
1653         int fd;
1654
1655         if (session->one_mmap && !session->header.needs_swap) {
1656                 event = file_offset - session->one_mmap_offset +
1657                         session->one_mmap_addr;
1658                 goto out_parse_sample;
1659         }
1660
1661         if (perf_data__is_pipe(session->data))
1662                 return -1;
1663
1664         fd = perf_data__fd(session->data);
1665         hdr_sz = sizeof(struct perf_event_header);
1666
1667         if (buf_sz < hdr_sz)
1668                 return -1;
1669
1670         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1671             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1672                 return -1;
1673
1674         event = (union perf_event *)buf;
1675
1676         if (session->header.needs_swap)
1677                 perf_event_header__bswap(&event->header);
1678
1679         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1680                 return -1;
1681
1682         rest = event->header.size - hdr_sz;
1683
1684         if (readn(fd, buf, rest) != (ssize_t)rest)
1685                 return -1;
1686
1687         if (session->header.needs_swap)
1688                 event_swap(event, evlist__sample_id_all(session->evlist));
1689
1690 out_parse_sample:
1691
1692         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1693             evlist__parse_sample(session->evlist, event, sample))
1694                 return -1;
1695
1696         *event_ptr = event;
1697
1698         return 0;
1699 }
1700
1701 int perf_session__peek_events(struct perf_session *session, u64 offset,
1702                               u64 size, peek_events_cb_t cb, void *data)
1703 {
1704         u64 max_offset = offset + size;
1705         char buf[PERF_SAMPLE_MAX_SIZE];
1706         union perf_event *event;
1707         int err;
1708
1709         do {
1710                 err = perf_session__peek_event(session, offset, buf,
1711                                                PERF_SAMPLE_MAX_SIZE, &event,
1712                                                NULL);
1713                 if (err)
1714                         return err;
1715
1716                 err = cb(session, event, offset, data);
1717                 if (err)
1718                         return err;
1719
1720                 offset += event->header.size;
1721                 if (event->header.type == PERF_RECORD_AUXTRACE)
1722                         offset += event->auxtrace.size;
1723
1724         } while (offset < max_offset);
1725
1726         return err;
1727 }
1728
1729 static s64 perf_session__process_event(struct perf_session *session,
1730                                        union perf_event *event, u64 file_offset)
1731 {
1732         struct evlist *evlist = session->evlist;
1733         struct perf_tool *tool = session->tool;
1734         int ret;
1735
1736         if (session->header.needs_swap)
1737                 event_swap(event, evlist__sample_id_all(evlist));
1738
1739         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1740                 return -EINVAL;
1741
1742         events_stats__inc(&evlist->stats, event->header.type);
1743
1744         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1745                 return perf_session__process_user_event(session, event, file_offset);
1746
1747         if (tool->ordered_events) {
1748                 u64 timestamp = -1ULL;
1749
1750                 ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1751                 if (ret && ret != -1)
1752                         return ret;
1753
1754                 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1755                 if (ret != -ETIME)
1756                         return ret;
1757         }
1758
1759         return perf_session__deliver_event(session, event, tool, file_offset);
1760 }
1761
1762 void perf_event_header__bswap(struct perf_event_header *hdr)
1763 {
1764         hdr->type = bswap_32(hdr->type);
1765         hdr->misc = bswap_16(hdr->misc);
1766         hdr->size = bswap_16(hdr->size);
1767 }
1768
1769 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1770 {
1771         return machine__findnew_thread(&session->machines.host, -1, pid);
1772 }
1773
1774 /*
1775  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1776  * So here a single thread is created for that, but actually there is a separate
1777  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1778  * is only 1. That causes problems for some tools, requiring workarounds. For
1779  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1780  */
1781 int perf_session__register_idle_thread(struct perf_session *session)
1782 {
1783         struct thread *thread;
1784         int err = 0;
1785
1786         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1787         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1788                 pr_err("problem inserting idle task.\n");
1789                 err = -1;
1790         }
1791
1792         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1793                 pr_err("problem inserting idle task.\n");
1794                 err = -1;
1795         }
1796
1797         /* machine__findnew_thread() got the thread, so put it */
1798         thread__put(thread);
1799         return err;
1800 }
1801
1802 static void
1803 perf_session__warn_order(const struct perf_session *session)
1804 {
1805         const struct ordered_events *oe = &session->ordered_events;
1806         struct evsel *evsel;
1807         bool should_warn = true;
1808
1809         evlist__for_each_entry(session->evlist, evsel) {
1810                 if (evsel->core.attr.write_backward)
1811                         should_warn = false;
1812         }
1813
1814         if (!should_warn)
1815                 return;
1816         if (oe->nr_unordered_events != 0)
1817                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1818 }
1819
1820 static void perf_session__warn_about_errors(const struct perf_session *session)
1821 {
1822         const struct events_stats *stats = &session->evlist->stats;
1823
1824         if (session->tool->lost == perf_event__process_lost &&
1825             stats->nr_events[PERF_RECORD_LOST] != 0) {
1826                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1827                             "Check IO/CPU overload!\n\n",
1828                             stats->nr_events[0],
1829                             stats->nr_events[PERF_RECORD_LOST]);
1830         }
1831
1832         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1833                 double drop_rate;
1834
1835                 drop_rate = (double)stats->total_lost_samples /
1836                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1837                 if (drop_rate > 0.05) {
1838                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1839                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1840                                     drop_rate * 100.0);
1841                 }
1842         }
1843
1844         if (session->tool->aux == perf_event__process_aux &&
1845             stats->total_aux_lost != 0) {
1846                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1847                             stats->total_aux_lost,
1848                             stats->nr_events[PERF_RECORD_AUX]);
1849         }
1850
1851         if (session->tool->aux == perf_event__process_aux &&
1852             stats->total_aux_partial != 0) {
1853                 bool vmm_exclusive = false;
1854
1855                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1856                                        &vmm_exclusive);
1857
1858                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1859                             "Are you running a KVM guest in the background?%s\n\n",
1860                             stats->total_aux_partial,
1861                             stats->nr_events[PERF_RECORD_AUX],
1862                             vmm_exclusive ?
1863                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1864                             "will reduce the gaps to only guest's timeslices." :
1865                             "");
1866         }
1867
1868         if (stats->nr_unknown_events != 0) {
1869                 ui__warning("Found %u unknown events!\n\n"
1870                             "Is this an older tool processing a perf.data "
1871                             "file generated by a more recent tool?\n\n"
1872                             "If that is not the case, consider "
1873                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1874                             stats->nr_unknown_events);
1875         }
1876
1877         if (stats->nr_unknown_id != 0) {
1878                 ui__warning("%u samples with id not present in the header\n",
1879                             stats->nr_unknown_id);
1880         }
1881
1882         if (stats->nr_invalid_chains != 0) {
1883                 ui__warning("Found invalid callchains!\n\n"
1884                             "%u out of %u events were discarded for this reason.\n\n"
1885                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1886                             stats->nr_invalid_chains,
1887                             stats->nr_events[PERF_RECORD_SAMPLE]);
1888         }
1889
1890         if (stats->nr_unprocessable_samples != 0) {
1891                 ui__warning("%u unprocessable samples recorded.\n"
1892                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1893                             stats->nr_unprocessable_samples);
1894         }
1895
1896         perf_session__warn_order(session);
1897
1898         events_stats__auxtrace_error_warn(stats);
1899
1900         if (stats->nr_proc_map_timeout != 0) {
1901                 ui__warning("%d map information files for pre-existing threads were\n"
1902                             "not processed, if there are samples for addresses they\n"
1903                             "will not be resolved, you may find out which are these\n"
1904                             "threads by running with -v and redirecting the output\n"
1905                             "to a file.\n"
1906                             "The time limit to process proc map is too short?\n"
1907                             "Increase it by --proc-map-timeout\n",
1908                             stats->nr_proc_map_timeout);
1909         }
1910 }
1911
1912 static int perf_session__flush_thread_stack(struct thread *thread,
1913                                             void *p __maybe_unused)
1914 {
1915         return thread_stack__flush(thread);
1916 }
1917
1918 static int perf_session__flush_thread_stacks(struct perf_session *session)
1919 {
1920         return machines__for_each_thread(&session->machines,
1921                                          perf_session__flush_thread_stack,
1922                                          NULL);
1923 }
1924
1925 volatile int session_done;
1926
1927 static int __perf_session__process_decomp_events(struct perf_session *session);
1928
1929 static int __perf_session__process_pipe_events(struct perf_session *session)
1930 {
1931         struct ordered_events *oe = &session->ordered_events;
1932         struct perf_tool *tool = session->tool;
1933         union perf_event *event;
1934         uint32_t size, cur_size = 0;
1935         void *buf = NULL;
1936         s64 skip = 0;
1937         u64 head;
1938         ssize_t err;
1939         void *p;
1940
1941         perf_tool__fill_defaults(tool);
1942
1943         head = 0;
1944         cur_size = sizeof(union perf_event);
1945
1946         buf = malloc(cur_size);
1947         if (!buf)
1948                 return -errno;
1949         ordered_events__set_copy_on_queue(oe, true);
1950 more:
1951         event = buf;
1952         err = perf_data__read(session->data, event,
1953                               sizeof(struct perf_event_header));
1954         if (err <= 0) {
1955                 if (err == 0)
1956                         goto done;
1957
1958                 pr_err("failed to read event header\n");
1959                 goto out_err;
1960         }
1961
1962         if (session->header.needs_swap)
1963                 perf_event_header__bswap(&event->header);
1964
1965         size = event->header.size;
1966         if (size < sizeof(struct perf_event_header)) {
1967                 pr_err("bad event header size\n");
1968                 goto out_err;
1969         }
1970
1971         if (size > cur_size) {
1972                 void *new = realloc(buf, size);
1973                 if (!new) {
1974                         pr_err("failed to allocate memory to read event\n");
1975                         goto out_err;
1976                 }
1977                 buf = new;
1978                 cur_size = size;
1979                 event = buf;
1980         }
1981         p = event;
1982         p += sizeof(struct perf_event_header);
1983
1984         if (size - sizeof(struct perf_event_header)) {
1985                 err = perf_data__read(session->data, p,
1986                                       size - sizeof(struct perf_event_header));
1987                 if (err <= 0) {
1988                         if (err == 0) {
1989                                 pr_err("unexpected end of event stream\n");
1990                                 goto done;
1991                         }
1992
1993                         pr_err("failed to read event data\n");
1994                         goto out_err;
1995                 }
1996         }
1997
1998         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1999                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2000                        head, event->header.size, event->header.type);
2001                 err = -EINVAL;
2002                 goto out_err;
2003         }
2004
2005         head += size;
2006
2007         if (skip > 0)
2008                 head += skip;
2009
2010         err = __perf_session__process_decomp_events(session);
2011         if (err)
2012                 goto out_err;
2013
2014         if (!session_done())
2015                 goto more;
2016 done:
2017         /* do the final flush for ordered samples */
2018         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2019         if (err)
2020                 goto out_err;
2021         err = auxtrace__flush_events(session, tool);
2022         if (err)
2023                 goto out_err;
2024         err = perf_session__flush_thread_stacks(session);
2025 out_err:
2026         free(buf);
2027         if (!tool->no_warn)
2028                 perf_session__warn_about_errors(session);
2029         ordered_events__free(&session->ordered_events);
2030         auxtrace__free_events(session);
2031         return err;
2032 }
2033
2034 static union perf_event *
2035 prefetch_event(char *buf, u64 head, size_t mmap_size,
2036                bool needs_swap, union perf_event *error)
2037 {
2038         union perf_event *event;
2039
2040         /*
2041          * Ensure we have enough space remaining to read
2042          * the size of the event in the headers.
2043          */
2044         if (head + sizeof(event->header) > mmap_size)
2045                 return NULL;
2046
2047         event = (union perf_event *)(buf + head);
2048         if (needs_swap)
2049                 perf_event_header__bswap(&event->header);
2050
2051         if (head + event->header.size <= mmap_size)
2052                 return event;
2053
2054         /* We're not fetching the event so swap back again */
2055         if (needs_swap)
2056                 perf_event_header__bswap(&event->header);
2057
2058         pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2059                  " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2060
2061         return error;
2062 }
2063
2064 static union perf_event *
2065 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2066 {
2067         return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2068 }
2069
2070 static union perf_event *
2071 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2072 {
2073         return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2074 }
2075
2076 static int __perf_session__process_decomp_events(struct perf_session *session)
2077 {
2078         s64 skip;
2079         u64 size, file_pos = 0;
2080         struct decomp *decomp = session->decomp_last;
2081
2082         if (!decomp)
2083                 return 0;
2084
2085         while (decomp->head < decomp->size && !session_done()) {
2086                 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2087                                                              session->header.needs_swap);
2088
2089                 if (!event)
2090                         break;
2091
2092                 size = event->header.size;
2093
2094                 if (size < sizeof(struct perf_event_header) ||
2095                     (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2096                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2097                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2098                         return -EINVAL;
2099                 }
2100
2101                 if (skip)
2102                         size += skip;
2103
2104                 decomp->head += size;
2105         }
2106
2107         return 0;
2108 }
2109
2110 /*
2111  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2112  * slices. On 32bit we use 32MB.
2113  */
2114 #if BITS_PER_LONG == 64
2115 #define MMAP_SIZE ULLONG_MAX
2116 #define NUM_MMAPS 1
2117 #else
2118 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2119 #define NUM_MMAPS 128
2120 #endif
2121
2122 struct reader;
2123
2124 typedef s64 (*reader_cb_t)(struct perf_session *session,
2125                            union perf_event *event,
2126                            u64 file_offset);
2127
2128 struct reader {
2129         int              fd;
2130         u64              data_size;
2131         u64              data_offset;
2132         reader_cb_t      process;
2133 };
2134
2135 static int
2136 reader__process_events(struct reader *rd, struct perf_session *session,
2137                        struct ui_progress *prog)
2138 {
2139         u64 data_size = rd->data_size;
2140         u64 head, page_offset, file_offset, file_pos, size;
2141         int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2142         size_t  mmap_size;
2143         char *buf, *mmaps[NUM_MMAPS];
2144         union perf_event *event;
2145         s64 skip;
2146
2147         page_offset = page_size * (rd->data_offset / page_size);
2148         file_offset = page_offset;
2149         head = rd->data_offset - page_offset;
2150
2151         ui_progress__init_size(prog, data_size, "Processing events...");
2152
2153         data_size += rd->data_offset;
2154
2155         mmap_size = MMAP_SIZE;
2156         if (mmap_size > data_size) {
2157                 mmap_size = data_size;
2158                 session->one_mmap = true;
2159         }
2160
2161         memset(mmaps, 0, sizeof(mmaps));
2162
2163         mmap_prot  = PROT_READ;
2164         mmap_flags = MAP_SHARED;
2165
2166         if (session->header.needs_swap) {
2167                 mmap_prot  |= PROT_WRITE;
2168                 mmap_flags = MAP_PRIVATE;
2169         }
2170 remap:
2171         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2172                    file_offset);
2173         if (buf == MAP_FAILED) {
2174                 pr_err("failed to mmap file\n");
2175                 err = -errno;
2176                 goto out;
2177         }
2178         mmaps[map_idx] = buf;
2179         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2180         file_pos = file_offset + head;
2181         if (session->one_mmap) {
2182                 session->one_mmap_addr = buf;
2183                 session->one_mmap_offset = file_offset;
2184         }
2185
2186 more:
2187         event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2188         if (IS_ERR(event))
2189                 return PTR_ERR(event);
2190
2191         if (!event) {
2192                 if (mmaps[map_idx]) {
2193                         munmap(mmaps[map_idx], mmap_size);
2194                         mmaps[map_idx] = NULL;
2195                 }
2196
2197                 page_offset = page_size * (head / page_size);
2198                 file_offset += page_offset;
2199                 head -= page_offset;
2200                 goto remap;
2201         }
2202
2203         size = event->header.size;
2204
2205         skip = -EINVAL;
2206
2207         if (size < sizeof(struct perf_event_header) ||
2208             (skip = rd->process(session, event, file_pos)) < 0) {
2209                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2210                        file_offset + head, event->header.size,
2211                        event->header.type, strerror(-skip));
2212                 err = skip;
2213                 goto out;
2214         }
2215
2216         if (skip)
2217                 size += skip;
2218
2219         head += size;
2220         file_pos += size;
2221
2222         err = __perf_session__process_decomp_events(session);
2223         if (err)
2224                 goto out;
2225
2226         ui_progress__update(prog, size);
2227
2228         if (session_done())
2229                 goto out;
2230
2231         if (file_pos < data_size)
2232                 goto more;
2233
2234 out:
2235         return err;
2236 }
2237
2238 static s64 process_simple(struct perf_session *session,
2239                           union perf_event *event,
2240                           u64 file_offset)
2241 {
2242         return perf_session__process_event(session, event, file_offset);
2243 }
2244
2245 static int __perf_session__process_events(struct perf_session *session)
2246 {
2247         struct reader rd = {
2248                 .fd             = perf_data__fd(session->data),
2249                 .data_size      = session->header.data_size,
2250                 .data_offset    = session->header.data_offset,
2251                 .process        = process_simple,
2252         };
2253         struct ordered_events *oe = &session->ordered_events;
2254         struct perf_tool *tool = session->tool;
2255         struct ui_progress prog;
2256         int err;
2257
2258         perf_tool__fill_defaults(tool);
2259
2260         if (rd.data_size == 0)
2261                 return -1;
2262
2263         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2264
2265         err = reader__process_events(&rd, session, &prog);
2266         if (err)
2267                 goto out_err;
2268         /* do the final flush for ordered samples */
2269         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2270         if (err)
2271                 goto out_err;
2272         err = auxtrace__flush_events(session, tool);
2273         if (err)
2274                 goto out_err;
2275         err = perf_session__flush_thread_stacks(session);
2276 out_err:
2277         ui_progress__finish();
2278         if (!tool->no_warn)
2279                 perf_session__warn_about_errors(session);
2280         /*
2281          * We may switching perf.data output, make ordered_events
2282          * reusable.
2283          */
2284         ordered_events__reinit(&session->ordered_events);
2285         auxtrace__free_events(session);
2286         session->one_mmap = false;
2287         return err;
2288 }
2289
2290 int perf_session__process_events(struct perf_session *session)
2291 {
2292         if (perf_session__register_idle_thread(session) < 0)
2293                 return -ENOMEM;
2294
2295         if (perf_data__is_pipe(session->data))
2296                 return __perf_session__process_pipe_events(session);
2297
2298         return __perf_session__process_events(session);
2299 }
2300
2301 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2302 {
2303         struct evsel *evsel;
2304
2305         evlist__for_each_entry(session->evlist, evsel) {
2306                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2307                         return true;
2308         }
2309
2310         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2311         return false;
2312 }
2313
2314 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2315 {
2316         char *bracket;
2317         struct ref_reloc_sym *ref;
2318         struct kmap *kmap;
2319
2320         ref = zalloc(sizeof(struct ref_reloc_sym));
2321         if (ref == NULL)
2322                 return -ENOMEM;
2323
2324         ref->name = strdup(symbol_name);
2325         if (ref->name == NULL) {
2326                 free(ref);
2327                 return -ENOMEM;
2328         }
2329
2330         bracket = strchr(ref->name, ']');
2331         if (bracket)
2332                 *bracket = '\0';
2333
2334         ref->addr = addr;
2335
2336         kmap = map__kmap(map);
2337         if (kmap)
2338                 kmap->ref_reloc_sym = ref;
2339
2340         return 0;
2341 }
2342
2343 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2344 {
2345         return machines__fprintf_dsos(&session->machines, fp);
2346 }
2347
2348 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2349                                           bool (skip)(struct dso *dso, int parm), int parm)
2350 {
2351         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2352 }
2353
2354 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2355 {
2356         size_t ret;
2357         const char *msg = "";
2358
2359         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2360                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2361
2362         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2363
2364         ret += events_stats__fprintf(&session->evlist->stats, fp);
2365         return ret;
2366 }
2367
2368 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2369 {
2370         /*
2371          * FIXME: Here we have to actually print all the machines in this
2372          * session, not just the host...
2373          */
2374         return machine__fprintf(&session->machines.host, fp);
2375 }
2376
2377 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2378                                               unsigned int type)
2379 {
2380         struct evsel *pos;
2381
2382         evlist__for_each_entry(session->evlist, pos) {
2383                 if (pos->core.attr.type == type)
2384                         return pos;
2385         }
2386         return NULL;
2387 }
2388
2389 int perf_session__cpu_bitmap(struct perf_session *session,
2390                              const char *cpu_list, unsigned long *cpu_bitmap)
2391 {
2392         int i, err = -1;
2393         struct perf_cpu_map *map;
2394         int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2395
2396         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2397                 struct evsel *evsel;
2398
2399                 evsel = perf_session__find_first_evtype(session, i);
2400                 if (!evsel)
2401                         continue;
2402
2403                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2404                         pr_err("File does not contain CPU events. "
2405                                "Remove -C option to proceed.\n");
2406                         return -1;
2407                 }
2408         }
2409
2410         map = perf_cpu_map__new(cpu_list);
2411         if (map == NULL) {
2412                 pr_err("Invalid cpu_list\n");
2413                 return -1;
2414         }
2415
2416         for (i = 0; i < map->nr; i++) {
2417                 int cpu = map->map[i];
2418
2419                 if (cpu >= nr_cpus) {
2420                         pr_err("Requested CPU %d too large. "
2421                                "Consider raising MAX_NR_CPUS\n", cpu);
2422                         goto out_delete_map;
2423                 }
2424
2425                 set_bit(cpu, cpu_bitmap);
2426         }
2427
2428         err = 0;
2429
2430 out_delete_map:
2431         perf_cpu_map__put(map);
2432         return err;
2433 }
2434
2435 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2436                                 bool full)
2437 {
2438         if (session == NULL || fp == NULL)
2439                 return;
2440
2441         fprintf(fp, "# ========\n");
2442         perf_header__fprintf_info(session, fp, full);
2443         fprintf(fp, "# ========\n#\n");
2444 }
2445
2446 int perf_event__process_id_index(struct perf_session *session,
2447                                  union perf_event *event)
2448 {
2449         struct evlist *evlist = session->evlist;
2450         struct perf_record_id_index *ie = &event->id_index;
2451         size_t i, nr, max_nr;
2452
2453         max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2454                  sizeof(struct id_index_entry);
2455         nr = ie->nr;
2456         if (nr > max_nr)
2457                 return -EINVAL;
2458
2459         if (dump_trace)
2460                 fprintf(stdout, " nr: %zu\n", nr);
2461
2462         for (i = 0; i < nr; i++) {
2463                 struct id_index_entry *e = &ie->entries[i];
2464                 struct perf_sample_id *sid;
2465
2466                 if (dump_trace) {
2467                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2468                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2469                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2470                         fprintf(stdout, "  tid: %"PRI_ld64"\n", e->tid);
2471                 }
2472
2473                 sid = evlist__id2sid(evlist, e->id);
2474                 if (!sid)
2475                         return -ENOENT;
2476                 sid->idx = e->idx;
2477                 sid->cpu = e->cpu;
2478                 sid->tid = e->tid;
2479         }
2480         return 0;
2481 }