Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-microblaze.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14
15 #include "map_symbol.h"
16 #include "branch.h"
17 #include "debug.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "memswap.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "session.h"
24 #include "tool.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "tsc.h"
33 #include "ui/progress.h"
34 #include "../perf.h"
35 #include "arch/common.h"
36 #include "units.h"
37 #include <internal/lib.h>
38
39 #ifdef HAVE_ZSTD_SUPPORT
40 static int perf_session__process_compressed_event(struct perf_session *session,
41                                                   union perf_event *event, u64 file_offset)
42 {
43         void *src;
44         size_t decomp_size, src_size;
45         u64 decomp_last_rem = 0;
46         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
47         struct decomp *decomp, *decomp_last = session->decomp_last;
48
49         if (decomp_last) {
50                 decomp_last_rem = decomp_last->size - decomp_last->head;
51                 decomp_len += decomp_last_rem;
52         }
53
54         mmap_len = sizeof(struct decomp) + decomp_len;
55         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
56                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
57         if (decomp == MAP_FAILED) {
58                 pr_err("Couldn't allocate memory for decompression\n");
59                 return -1;
60         }
61
62         decomp->file_pos = file_offset;
63         decomp->mmap_len = mmap_len;
64         decomp->head = 0;
65
66         if (decomp_last_rem) {
67                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
68                 decomp->size = decomp_last_rem;
69         }
70
71         src = (void *)event + sizeof(struct perf_record_compressed);
72         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
73
74         decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
75                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
76         if (!decomp_size) {
77                 munmap(decomp, mmap_len);
78                 pr_err("Couldn't decompress data\n");
79                 return -1;
80         }
81
82         decomp->size += decomp_size;
83
84         if (session->decomp == NULL) {
85                 session->decomp = decomp;
86                 session->decomp_last = decomp;
87         } else {
88                 session->decomp_last->next = decomp;
89                 session->decomp_last = decomp;
90         }
91
92         pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
93
94         return 0;
95 }
96 #else /* !HAVE_ZSTD_SUPPORT */
97 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
98 #endif
99
100 static int perf_session__deliver_event(struct perf_session *session,
101                                        union perf_event *event,
102                                        struct perf_tool *tool,
103                                        u64 file_offset);
104
105 static int perf_session__open(struct perf_session *session)
106 {
107         struct perf_data *data = session->data;
108
109         if (perf_session__read_header(session) < 0) {
110                 pr_err("incompatible file format (rerun with -v to learn more)\n");
111                 return -1;
112         }
113
114         if (perf_data__is_pipe(data))
115                 return 0;
116
117         if (perf_header__has_feat(&session->header, HEADER_STAT))
118                 return 0;
119
120         if (!evlist__valid_sample_type(session->evlist)) {
121                 pr_err("non matching sample_type\n");
122                 return -1;
123         }
124
125         if (!evlist__valid_sample_id_all(session->evlist)) {
126                 pr_err("non matching sample_id_all\n");
127                 return -1;
128         }
129
130         if (!evlist__valid_read_format(session->evlist)) {
131                 pr_err("non matching read_format\n");
132                 return -1;
133         }
134
135         return 0;
136 }
137
138 void perf_session__set_id_hdr_size(struct perf_session *session)
139 {
140         u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
141
142         machines__set_id_hdr_size(&session->machines, id_hdr_size);
143 }
144
145 int perf_session__create_kernel_maps(struct perf_session *session)
146 {
147         int ret = machine__create_kernel_maps(&session->machines.host);
148
149         if (ret >= 0)
150                 ret = machines__create_guest_kernel_maps(&session->machines);
151         return ret;
152 }
153
154 static void perf_session__destroy_kernel_maps(struct perf_session *session)
155 {
156         machines__destroy_kernel_maps(&session->machines);
157 }
158
159 static bool perf_session__has_comm_exec(struct perf_session *session)
160 {
161         struct evsel *evsel;
162
163         evlist__for_each_entry(session->evlist, evsel) {
164                 if (evsel->core.attr.comm_exec)
165                         return true;
166         }
167
168         return false;
169 }
170
171 static void perf_session__set_comm_exec(struct perf_session *session)
172 {
173         bool comm_exec = perf_session__has_comm_exec(session);
174
175         machines__set_comm_exec(&session->machines, comm_exec);
176 }
177
178 static int ordered_events__deliver_event(struct ordered_events *oe,
179                                          struct ordered_event *event)
180 {
181         struct perf_session *session = container_of(oe, struct perf_session,
182                                                     ordered_events);
183
184         return perf_session__deliver_event(session, event->event,
185                                            session->tool, event->file_offset);
186 }
187
188 struct perf_session *perf_session__new(struct perf_data *data,
189                                        bool repipe, struct perf_tool *tool)
190 {
191         int ret = -ENOMEM;
192         struct perf_session *session = zalloc(sizeof(*session));
193
194         if (!session)
195                 goto out;
196
197         session->repipe = repipe;
198         session->tool   = tool;
199         INIT_LIST_HEAD(&session->auxtrace_index);
200         machines__init(&session->machines);
201         ordered_events__init(&session->ordered_events,
202                              ordered_events__deliver_event, NULL);
203
204         perf_env__init(&session->header.env);
205         if (data) {
206                 ret = perf_data__open(data);
207                 if (ret < 0)
208                         goto out_delete;
209
210                 session->data = data;
211
212                 if (perf_data__is_read(data)) {
213                         ret = perf_session__open(session);
214                         if (ret < 0)
215                                 goto out_delete;
216
217                         /*
218                          * set session attributes that are present in perf.data
219                          * but not in pipe-mode.
220                          */
221                         if (!data->is_pipe) {
222                                 perf_session__set_id_hdr_size(session);
223                                 perf_session__set_comm_exec(session);
224                         }
225
226                         evlist__init_trace_event_sample_raw(session->evlist);
227
228                         /* Open the directory data. */
229                         if (data->is_dir) {
230                                 ret = perf_data__open_dir(data);
231                                 if (ret)
232                                         goto out_delete;
233                         }
234
235                         if (!symbol_conf.kallsyms_name &&
236                             !symbol_conf.vmlinux_name)
237                                 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
238                 }
239         } else  {
240                 session->machines.host.env = &perf_env;
241         }
242
243         session->machines.host.single_address_space =
244                 perf_env__single_address_space(session->machines.host.env);
245
246         if (!data || perf_data__is_write(data)) {
247                 /*
248                  * In O_RDONLY mode this will be performed when reading the
249                  * kernel MMAP event, in perf_event__process_mmap().
250                  */
251                 if (perf_session__create_kernel_maps(session) < 0)
252                         pr_warning("Cannot read kernel map\n");
253         }
254
255         /*
256          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
257          * processed, so evlist__sample_id_all is not meaningful here.
258          */
259         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
260             tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
261                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
262                 tool->ordered_events = false;
263         }
264
265         return session;
266
267  out_delete:
268         perf_session__delete(session);
269  out:
270         return ERR_PTR(ret);
271 }
272
273 static void perf_session__delete_threads(struct perf_session *session)
274 {
275         machine__delete_threads(&session->machines.host);
276 }
277
278 static void perf_session__release_decomp_events(struct perf_session *session)
279 {
280         struct decomp *next, *decomp;
281         size_t mmap_len;
282         next = session->decomp;
283         do {
284                 decomp = next;
285                 if (decomp == NULL)
286                         break;
287                 next = decomp->next;
288                 mmap_len = decomp->mmap_len;
289                 munmap(decomp, mmap_len);
290         } while (1);
291 }
292
293 void perf_session__delete(struct perf_session *session)
294 {
295         if (session == NULL)
296                 return;
297         auxtrace__free(session);
298         auxtrace_index__free(&session->auxtrace_index);
299         perf_session__destroy_kernel_maps(session);
300         perf_session__delete_threads(session);
301         perf_session__release_decomp_events(session);
302         perf_env__exit(&session->header.env);
303         machines__exit(&session->machines);
304         if (session->data)
305                 perf_data__close(session->data);
306         free(session);
307 }
308
309 static int process_event_synth_tracing_data_stub(struct perf_session *session
310                                                  __maybe_unused,
311                                                  union perf_event *event
312                                                  __maybe_unused)
313 {
314         dump_printf(": unhandled!\n");
315         return 0;
316 }
317
318 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
319                                          union perf_event *event __maybe_unused,
320                                          struct evlist **pevlist
321                                          __maybe_unused)
322 {
323         dump_printf(": unhandled!\n");
324         return 0;
325 }
326
327 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
328                                                  union perf_event *event __maybe_unused,
329                                                  struct evlist **pevlist
330                                                  __maybe_unused)
331 {
332         if (dump_trace)
333                 perf_event__fprintf_event_update(event, stdout);
334
335         dump_printf(": unhandled!\n");
336         return 0;
337 }
338
339 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
340                                      union perf_event *event __maybe_unused,
341                                      struct perf_sample *sample __maybe_unused,
342                                      struct evsel *evsel __maybe_unused,
343                                      struct machine *machine __maybe_unused)
344 {
345         dump_printf(": unhandled!\n");
346         return 0;
347 }
348
349 static int process_event_stub(struct perf_tool *tool __maybe_unused,
350                               union perf_event *event __maybe_unused,
351                               struct perf_sample *sample __maybe_unused,
352                               struct machine *machine __maybe_unused)
353 {
354         dump_printf(": unhandled!\n");
355         return 0;
356 }
357
358 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
359                                        union perf_event *event __maybe_unused,
360                                        struct ordered_events *oe __maybe_unused)
361 {
362         dump_printf(": unhandled!\n");
363         return 0;
364 }
365
366 static int process_finished_round(struct perf_tool *tool,
367                                   union perf_event *event,
368                                   struct ordered_events *oe);
369
370 static int skipn(int fd, off_t n)
371 {
372         char buf[4096];
373         ssize_t ret;
374
375         while (n > 0) {
376                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
377                 if (ret <= 0)
378                         return ret;
379                 n -= ret;
380         }
381
382         return 0;
383 }
384
385 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
386                                        union perf_event *event)
387 {
388         dump_printf(": unhandled!\n");
389         if (perf_data__is_pipe(session->data))
390                 skipn(perf_data__fd(session->data), event->auxtrace.size);
391         return event->auxtrace.size;
392 }
393
394 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
395                                   union perf_event *event __maybe_unused)
396 {
397         dump_printf(": unhandled!\n");
398         return 0;
399 }
400
401
402 static
403 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
404                                   union perf_event *event __maybe_unused)
405 {
406         if (dump_trace)
407                 perf_event__fprintf_thread_map(event, stdout);
408
409         dump_printf(": unhandled!\n");
410         return 0;
411 }
412
413 static
414 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
415                                union perf_event *event __maybe_unused)
416 {
417         if (dump_trace)
418                 perf_event__fprintf_cpu_map(event, stdout);
419
420         dump_printf(": unhandled!\n");
421         return 0;
422 }
423
424 static
425 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
426                                    union perf_event *event __maybe_unused)
427 {
428         if (dump_trace)
429                 perf_event__fprintf_stat_config(event, stdout);
430
431         dump_printf(": unhandled!\n");
432         return 0;
433 }
434
435 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
436                              union perf_event *event)
437 {
438         if (dump_trace)
439                 perf_event__fprintf_stat(event, stdout);
440
441         dump_printf(": unhandled!\n");
442         return 0;
443 }
444
445 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
446                                    union perf_event *event)
447 {
448         if (dump_trace)
449                 perf_event__fprintf_stat_round(event, stdout);
450
451         dump_printf(": unhandled!\n");
452         return 0;
453 }
454
455 static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
456                                         union perf_event *event)
457 {
458         if (dump_trace)
459                 perf_event__fprintf_time_conv(event, stdout);
460
461         dump_printf(": unhandled!\n");
462         return 0;
463 }
464
465 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
466                                                        union perf_event *event __maybe_unused,
467                                                        u64 file_offset __maybe_unused)
468 {
469        dump_printf(": unhandled!\n");
470        return 0;
471 }
472
473 void perf_tool__fill_defaults(struct perf_tool *tool)
474 {
475         if (tool->sample == NULL)
476                 tool->sample = process_event_sample_stub;
477         if (tool->mmap == NULL)
478                 tool->mmap = process_event_stub;
479         if (tool->mmap2 == NULL)
480                 tool->mmap2 = process_event_stub;
481         if (tool->comm == NULL)
482                 tool->comm = process_event_stub;
483         if (tool->namespaces == NULL)
484                 tool->namespaces = process_event_stub;
485         if (tool->cgroup == NULL)
486                 tool->cgroup = process_event_stub;
487         if (tool->fork == NULL)
488                 tool->fork = process_event_stub;
489         if (tool->exit == NULL)
490                 tool->exit = process_event_stub;
491         if (tool->lost == NULL)
492                 tool->lost = perf_event__process_lost;
493         if (tool->lost_samples == NULL)
494                 tool->lost_samples = perf_event__process_lost_samples;
495         if (tool->aux == NULL)
496                 tool->aux = perf_event__process_aux;
497         if (tool->itrace_start == NULL)
498                 tool->itrace_start = perf_event__process_itrace_start;
499         if (tool->context_switch == NULL)
500                 tool->context_switch = perf_event__process_switch;
501         if (tool->ksymbol == NULL)
502                 tool->ksymbol = perf_event__process_ksymbol;
503         if (tool->bpf == NULL)
504                 tool->bpf = perf_event__process_bpf;
505         if (tool->text_poke == NULL)
506                 tool->text_poke = perf_event__process_text_poke;
507         if (tool->read == NULL)
508                 tool->read = process_event_sample_stub;
509         if (tool->throttle == NULL)
510                 tool->throttle = process_event_stub;
511         if (tool->unthrottle == NULL)
512                 tool->unthrottle = process_event_stub;
513         if (tool->attr == NULL)
514                 tool->attr = process_event_synth_attr_stub;
515         if (tool->event_update == NULL)
516                 tool->event_update = process_event_synth_event_update_stub;
517         if (tool->tracing_data == NULL)
518                 tool->tracing_data = process_event_synth_tracing_data_stub;
519         if (tool->build_id == NULL)
520                 tool->build_id = process_event_op2_stub;
521         if (tool->finished_round == NULL) {
522                 if (tool->ordered_events)
523                         tool->finished_round = process_finished_round;
524                 else
525                         tool->finished_round = process_finished_round_stub;
526         }
527         if (tool->id_index == NULL)
528                 tool->id_index = process_event_op2_stub;
529         if (tool->auxtrace_info == NULL)
530                 tool->auxtrace_info = process_event_op2_stub;
531         if (tool->auxtrace == NULL)
532                 tool->auxtrace = process_event_auxtrace_stub;
533         if (tool->auxtrace_error == NULL)
534                 tool->auxtrace_error = process_event_op2_stub;
535         if (tool->thread_map == NULL)
536                 tool->thread_map = process_event_thread_map_stub;
537         if (tool->cpu_map == NULL)
538                 tool->cpu_map = process_event_cpu_map_stub;
539         if (tool->stat_config == NULL)
540                 tool->stat_config = process_event_stat_config_stub;
541         if (tool->stat == NULL)
542                 tool->stat = process_stat_stub;
543         if (tool->stat_round == NULL)
544                 tool->stat_round = process_stat_round_stub;
545         if (tool->time_conv == NULL)
546                 tool->time_conv = process_event_time_conv_stub;
547         if (tool->feature == NULL)
548                 tool->feature = process_event_op2_stub;
549         if (tool->compressed == NULL)
550                 tool->compressed = perf_session__process_compressed_event;
551 }
552
553 static void swap_sample_id_all(union perf_event *event, void *data)
554 {
555         void *end = (void *) event + event->header.size;
556         int size = end - data;
557
558         BUG_ON(size % sizeof(u64));
559         mem_bswap_64(data, size);
560 }
561
562 static void perf_event__all64_swap(union perf_event *event,
563                                    bool sample_id_all __maybe_unused)
564 {
565         struct perf_event_header *hdr = &event->header;
566         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
567 }
568
569 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
570 {
571         event->comm.pid = bswap_32(event->comm.pid);
572         event->comm.tid = bswap_32(event->comm.tid);
573
574         if (sample_id_all) {
575                 void *data = &event->comm.comm;
576
577                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
578                 swap_sample_id_all(event, data);
579         }
580 }
581
582 static void perf_event__mmap_swap(union perf_event *event,
583                                   bool sample_id_all)
584 {
585         event->mmap.pid   = bswap_32(event->mmap.pid);
586         event->mmap.tid   = bswap_32(event->mmap.tid);
587         event->mmap.start = bswap_64(event->mmap.start);
588         event->mmap.len   = bswap_64(event->mmap.len);
589         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
590
591         if (sample_id_all) {
592                 void *data = &event->mmap.filename;
593
594                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
595                 swap_sample_id_all(event, data);
596         }
597 }
598
599 static void perf_event__mmap2_swap(union perf_event *event,
600                                   bool sample_id_all)
601 {
602         event->mmap2.pid   = bswap_32(event->mmap2.pid);
603         event->mmap2.tid   = bswap_32(event->mmap2.tid);
604         event->mmap2.start = bswap_64(event->mmap2.start);
605         event->mmap2.len   = bswap_64(event->mmap2.len);
606         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
607
608         if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
609                 event->mmap2.maj   = bswap_32(event->mmap2.maj);
610                 event->mmap2.min   = bswap_32(event->mmap2.min);
611                 event->mmap2.ino   = bswap_64(event->mmap2.ino);
612                 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
613         }
614
615         if (sample_id_all) {
616                 void *data = &event->mmap2.filename;
617
618                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
619                 swap_sample_id_all(event, data);
620         }
621 }
622 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
623 {
624         event->fork.pid  = bswap_32(event->fork.pid);
625         event->fork.tid  = bswap_32(event->fork.tid);
626         event->fork.ppid = bswap_32(event->fork.ppid);
627         event->fork.ptid = bswap_32(event->fork.ptid);
628         event->fork.time = bswap_64(event->fork.time);
629
630         if (sample_id_all)
631                 swap_sample_id_all(event, &event->fork + 1);
632 }
633
634 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
635 {
636         event->read.pid          = bswap_32(event->read.pid);
637         event->read.tid          = bswap_32(event->read.tid);
638         event->read.value        = bswap_64(event->read.value);
639         event->read.time_enabled = bswap_64(event->read.time_enabled);
640         event->read.time_running = bswap_64(event->read.time_running);
641         event->read.id           = bswap_64(event->read.id);
642
643         if (sample_id_all)
644                 swap_sample_id_all(event, &event->read + 1);
645 }
646
647 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
648 {
649         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
650         event->aux.aux_size   = bswap_64(event->aux.aux_size);
651         event->aux.flags      = bswap_64(event->aux.flags);
652
653         if (sample_id_all)
654                 swap_sample_id_all(event, &event->aux + 1);
655 }
656
657 static void perf_event__itrace_start_swap(union perf_event *event,
658                                           bool sample_id_all)
659 {
660         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
661         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
662
663         if (sample_id_all)
664                 swap_sample_id_all(event, &event->itrace_start + 1);
665 }
666
667 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
668 {
669         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
670                 event->context_switch.next_prev_pid =
671                                 bswap_32(event->context_switch.next_prev_pid);
672                 event->context_switch.next_prev_tid =
673                                 bswap_32(event->context_switch.next_prev_tid);
674         }
675
676         if (sample_id_all)
677                 swap_sample_id_all(event, &event->context_switch + 1);
678 }
679
680 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
681 {
682         event->text_poke.addr    = bswap_64(event->text_poke.addr);
683         event->text_poke.old_len = bswap_16(event->text_poke.old_len);
684         event->text_poke.new_len = bswap_16(event->text_poke.new_len);
685
686         if (sample_id_all) {
687                 size_t len = sizeof(event->text_poke.old_len) +
688                              sizeof(event->text_poke.new_len) +
689                              event->text_poke.old_len +
690                              event->text_poke.new_len;
691                 void *data = &event->text_poke.old_len;
692
693                 data += PERF_ALIGN(len, sizeof(u64));
694                 swap_sample_id_all(event, data);
695         }
696 }
697
698 static void perf_event__throttle_swap(union perf_event *event,
699                                       bool sample_id_all)
700 {
701         event->throttle.time      = bswap_64(event->throttle.time);
702         event->throttle.id        = bswap_64(event->throttle.id);
703         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
704
705         if (sample_id_all)
706                 swap_sample_id_all(event, &event->throttle + 1);
707 }
708
709 static void perf_event__namespaces_swap(union perf_event *event,
710                                         bool sample_id_all)
711 {
712         u64 i;
713
714         event->namespaces.pid           = bswap_32(event->namespaces.pid);
715         event->namespaces.tid           = bswap_32(event->namespaces.tid);
716         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
717
718         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
719                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
720
721                 ns->dev = bswap_64(ns->dev);
722                 ns->ino = bswap_64(ns->ino);
723         }
724
725         if (sample_id_all)
726                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
727 }
728
729 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
730 {
731         event->cgroup.id = bswap_64(event->cgroup.id);
732
733         if (sample_id_all) {
734                 void *data = &event->cgroup.path;
735
736                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
737                 swap_sample_id_all(event, data);
738         }
739 }
740
741 static u8 revbyte(u8 b)
742 {
743         int rev = (b >> 4) | ((b & 0xf) << 4);
744         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
745         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
746         return (u8) rev;
747 }
748
749 /*
750  * XXX this is hack in attempt to carry flags bitfield
751  * through endian village. ABI says:
752  *
753  * Bit-fields are allocated from right to left (least to most significant)
754  * on little-endian implementations and from left to right (most to least
755  * significant) on big-endian implementations.
756  *
757  * The above seems to be byte specific, so we need to reverse each
758  * byte of the bitfield. 'Internet' also says this might be implementation
759  * specific and we probably need proper fix and carry perf_event_attr
760  * bitfield flags in separate data file FEAT_ section. Thought this seems
761  * to work for now.
762  */
763 static void swap_bitfield(u8 *p, unsigned len)
764 {
765         unsigned i;
766
767         for (i = 0; i < len; i++) {
768                 *p = revbyte(*p);
769                 p++;
770         }
771 }
772
773 /* exported for swapping attributes in file header */
774 void perf_event__attr_swap(struct perf_event_attr *attr)
775 {
776         attr->type              = bswap_32(attr->type);
777         attr->size              = bswap_32(attr->size);
778
779 #define bswap_safe(f, n)                                        \
780         (attr->size > (offsetof(struct perf_event_attr, f) +    \
781                        sizeof(attr->f) * (n)))
782 #define bswap_field(f, sz)                      \
783 do {                                            \
784         if (bswap_safe(f, 0))                   \
785                 attr->f = bswap_##sz(attr->f);  \
786 } while(0)
787 #define bswap_field_16(f) bswap_field(f, 16)
788 #define bswap_field_32(f) bswap_field(f, 32)
789 #define bswap_field_64(f) bswap_field(f, 64)
790
791         bswap_field_64(config);
792         bswap_field_64(sample_period);
793         bswap_field_64(sample_type);
794         bswap_field_64(read_format);
795         bswap_field_32(wakeup_events);
796         bswap_field_32(bp_type);
797         bswap_field_64(bp_addr);
798         bswap_field_64(bp_len);
799         bswap_field_64(branch_sample_type);
800         bswap_field_64(sample_regs_user);
801         bswap_field_32(sample_stack_user);
802         bswap_field_32(aux_watermark);
803         bswap_field_16(sample_max_stack);
804         bswap_field_32(aux_sample_size);
805
806         /*
807          * After read_format are bitfields. Check read_format because
808          * we are unable to use offsetof on bitfield.
809          */
810         if (bswap_safe(read_format, 1))
811                 swap_bitfield((u8 *) (&attr->read_format + 1),
812                               sizeof(u64));
813 #undef bswap_field_64
814 #undef bswap_field_32
815 #undef bswap_field
816 #undef bswap_safe
817 }
818
819 static void perf_event__hdr_attr_swap(union perf_event *event,
820                                       bool sample_id_all __maybe_unused)
821 {
822         size_t size;
823
824         perf_event__attr_swap(&event->attr.attr);
825
826         size = event->header.size;
827         size -= (void *)&event->attr.id - (void *)event;
828         mem_bswap_64(event->attr.id, size);
829 }
830
831 static void perf_event__event_update_swap(union perf_event *event,
832                                           bool sample_id_all __maybe_unused)
833 {
834         event->event_update.type = bswap_64(event->event_update.type);
835         event->event_update.id   = bswap_64(event->event_update.id);
836 }
837
838 static void perf_event__event_type_swap(union perf_event *event,
839                                         bool sample_id_all __maybe_unused)
840 {
841         event->event_type.event_type.event_id =
842                 bswap_64(event->event_type.event_type.event_id);
843 }
844
845 static void perf_event__tracing_data_swap(union perf_event *event,
846                                           bool sample_id_all __maybe_unused)
847 {
848         event->tracing_data.size = bswap_32(event->tracing_data.size);
849 }
850
851 static void perf_event__auxtrace_info_swap(union perf_event *event,
852                                            bool sample_id_all __maybe_unused)
853 {
854         size_t size;
855
856         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
857
858         size = event->header.size;
859         size -= (void *)&event->auxtrace_info.priv - (void *)event;
860         mem_bswap_64(event->auxtrace_info.priv, size);
861 }
862
863 static void perf_event__auxtrace_swap(union perf_event *event,
864                                       bool sample_id_all __maybe_unused)
865 {
866         event->auxtrace.size      = bswap_64(event->auxtrace.size);
867         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
868         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
869         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
870         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
871         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
872 }
873
874 static void perf_event__auxtrace_error_swap(union perf_event *event,
875                                             bool sample_id_all __maybe_unused)
876 {
877         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
878         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
879         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
880         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
881         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
882         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
883         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
884         if (event->auxtrace_error.fmt)
885                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
886 }
887
888 static void perf_event__thread_map_swap(union perf_event *event,
889                                         bool sample_id_all __maybe_unused)
890 {
891         unsigned i;
892
893         event->thread_map.nr = bswap_64(event->thread_map.nr);
894
895         for (i = 0; i < event->thread_map.nr; i++)
896                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
897 }
898
899 static void perf_event__cpu_map_swap(union perf_event *event,
900                                      bool sample_id_all __maybe_unused)
901 {
902         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
903         struct cpu_map_entries *cpus;
904         struct perf_record_record_cpu_map *mask;
905         unsigned i;
906
907         data->type = bswap_16(data->type);
908
909         switch (data->type) {
910         case PERF_CPU_MAP__CPUS:
911                 cpus = (struct cpu_map_entries *)data->data;
912
913                 cpus->nr = bswap_16(cpus->nr);
914
915                 for (i = 0; i < cpus->nr; i++)
916                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
917                 break;
918         case PERF_CPU_MAP__MASK:
919                 mask = (struct perf_record_record_cpu_map *)data->data;
920
921                 mask->nr = bswap_16(mask->nr);
922                 mask->long_size = bswap_16(mask->long_size);
923
924                 switch (mask->long_size) {
925                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
926                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
927                 default:
928                         pr_err("cpu_map swap: unsupported long size\n");
929                 }
930         default:
931                 break;
932         }
933 }
934
935 static void perf_event__stat_config_swap(union perf_event *event,
936                                          bool sample_id_all __maybe_unused)
937 {
938         u64 size;
939
940         size  = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
941         size += 1; /* nr item itself */
942         mem_bswap_64(&event->stat_config.nr, size);
943 }
944
945 static void perf_event__stat_swap(union perf_event *event,
946                                   bool sample_id_all __maybe_unused)
947 {
948         event->stat.id     = bswap_64(event->stat.id);
949         event->stat.thread = bswap_32(event->stat.thread);
950         event->stat.cpu    = bswap_32(event->stat.cpu);
951         event->stat.val    = bswap_64(event->stat.val);
952         event->stat.ena    = bswap_64(event->stat.ena);
953         event->stat.run    = bswap_64(event->stat.run);
954 }
955
956 static void perf_event__stat_round_swap(union perf_event *event,
957                                         bool sample_id_all __maybe_unused)
958 {
959         event->stat_round.type = bswap_64(event->stat_round.type);
960         event->stat_round.time = bswap_64(event->stat_round.time);
961 }
962
963 static void perf_event__time_conv_swap(union perf_event *event,
964                                        bool sample_id_all __maybe_unused)
965 {
966         event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
967         event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
968         event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
969
970         if (event_contains(event->time_conv, time_cycles)) {
971                 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
972                 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
973         }
974 }
975
976 typedef void (*perf_event__swap_op)(union perf_event *event,
977                                     bool sample_id_all);
978
979 static perf_event__swap_op perf_event__swap_ops[] = {
980         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
981         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
982         [PERF_RECORD_COMM]                = perf_event__comm_swap,
983         [PERF_RECORD_FORK]                = perf_event__task_swap,
984         [PERF_RECORD_EXIT]                = perf_event__task_swap,
985         [PERF_RECORD_LOST]                = perf_event__all64_swap,
986         [PERF_RECORD_READ]                = perf_event__read_swap,
987         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
988         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
989         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
990         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
991         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
992         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
993         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
994         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
995         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
996         [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
997         [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
998         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
999         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
1000         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1001         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
1002         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
1003         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
1004         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
1005         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
1006         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
1007         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
1008         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
1009         [PERF_RECORD_STAT]                = perf_event__stat_swap,
1010         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
1011         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
1012         [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
1013         [PERF_RECORD_HEADER_MAX]          = NULL,
1014 };
1015
1016 /*
1017  * When perf record finishes a pass on every buffers, it records this pseudo
1018  * event.
1019  * We record the max timestamp t found in the pass n.
1020  * Assuming these timestamps are monotonic across cpus, we know that if
1021  * a buffer still has events with timestamps below t, they will be all
1022  * available and then read in the pass n + 1.
1023  * Hence when we start to read the pass n + 2, we can safely flush every
1024  * events with timestamps below t.
1025  *
1026  *    ============ PASS n =================
1027  *       CPU 0         |   CPU 1
1028  *                     |
1029  *    cnt1 timestamps  |   cnt2 timestamps
1030  *          1          |         2
1031  *          2          |         3
1032  *          -          |         4  <--- max recorded
1033  *
1034  *    ============ PASS n + 1 ==============
1035  *       CPU 0         |   CPU 1
1036  *                     |
1037  *    cnt1 timestamps  |   cnt2 timestamps
1038  *          3          |         5
1039  *          4          |         6
1040  *          5          |         7 <---- max recorded
1041  *
1042  *      Flush every events below timestamp 4
1043  *
1044  *    ============ PASS n + 2 ==============
1045  *       CPU 0         |   CPU 1
1046  *                     |
1047  *    cnt1 timestamps  |   cnt2 timestamps
1048  *          6          |         8
1049  *          7          |         9
1050  *          -          |         10
1051  *
1052  *      Flush every events below timestamp 7
1053  *      etc...
1054  */
1055 static int process_finished_round(struct perf_tool *tool __maybe_unused,
1056                                   union perf_event *event __maybe_unused,
1057                                   struct ordered_events *oe)
1058 {
1059         if (dump_trace)
1060                 fprintf(stdout, "\n");
1061         return ordered_events__flush(oe, OE_FLUSH__ROUND);
1062 }
1063
1064 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1065                               u64 timestamp, u64 file_offset)
1066 {
1067         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1068 }
1069
1070 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1071 {
1072         struct ip_callchain *callchain = sample->callchain;
1073         struct branch_stack *lbr_stack = sample->branch_stack;
1074         struct branch_entry *entries = perf_sample__branch_entries(sample);
1075         u64 kernel_callchain_nr = callchain->nr;
1076         unsigned int i;
1077
1078         for (i = 0; i < kernel_callchain_nr; i++) {
1079                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1080                         break;
1081         }
1082
1083         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1084                 u64 total_nr;
1085                 /*
1086                  * LBR callstack can only get user call chain,
1087                  * i is kernel call chain number,
1088                  * 1 is PERF_CONTEXT_USER.
1089                  *
1090                  * The user call chain is stored in LBR registers.
1091                  * LBR are pair registers. The caller is stored
1092                  * in "from" register, while the callee is stored
1093                  * in "to" register.
1094                  * For example, there is a call stack
1095                  * "A"->"B"->"C"->"D".
1096                  * The LBR registers will be recorded like
1097                  * "C"->"D", "B"->"C", "A"->"B".
1098                  * So only the first "to" register and all "from"
1099                  * registers are needed to construct the whole stack.
1100                  */
1101                 total_nr = i + 1 + lbr_stack->nr + 1;
1102                 kernel_callchain_nr = i + 1;
1103
1104                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1105
1106                 for (i = 0; i < kernel_callchain_nr; i++)
1107                         printf("..... %2d: %016" PRIx64 "\n",
1108                                i, callchain->ips[i]);
1109
1110                 printf("..... %2d: %016" PRIx64 "\n",
1111                        (int)(kernel_callchain_nr), entries[0].to);
1112                 for (i = 0; i < lbr_stack->nr; i++)
1113                         printf("..... %2d: %016" PRIx64 "\n",
1114                                (int)(i + kernel_callchain_nr + 1), entries[i].from);
1115         }
1116 }
1117
1118 static void callchain__printf(struct evsel *evsel,
1119                               struct perf_sample *sample)
1120 {
1121         unsigned int i;
1122         struct ip_callchain *callchain = sample->callchain;
1123
1124         if (evsel__has_branch_callstack(evsel))
1125                 callchain__lbr_callstack_printf(sample);
1126
1127         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1128
1129         for (i = 0; i < callchain->nr; i++)
1130                 printf("..... %2d: %016" PRIx64 "\n",
1131                        i, callchain->ips[i]);
1132 }
1133
1134 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1135 {
1136         struct branch_entry *entries = perf_sample__branch_entries(sample);
1137         uint64_t i;
1138
1139         printf("%s: nr:%" PRIu64 "\n",
1140                 !callstack ? "... branch stack" : "... branch callstack",
1141                 sample->branch_stack->nr);
1142
1143         for (i = 0; i < sample->branch_stack->nr; i++) {
1144                 struct branch_entry *e = &entries[i];
1145
1146                 if (!callstack) {
1147                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1148                                 i, e->from, e->to,
1149                                 (unsigned short)e->flags.cycles,
1150                                 e->flags.mispred ? "M" : " ",
1151                                 e->flags.predicted ? "P" : " ",
1152                                 e->flags.abort ? "A" : " ",
1153                                 e->flags.in_tx ? "T" : " ",
1154                                 (unsigned)e->flags.reserved);
1155                 } else {
1156                         printf("..... %2"PRIu64": %016" PRIx64 "\n",
1157                                 i, i > 0 ? e->from : e->to);
1158                 }
1159         }
1160 }
1161
1162 static void regs_dump__printf(u64 mask, u64 *regs)
1163 {
1164         unsigned rid, i = 0;
1165
1166         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1167                 u64 val = regs[i++];
1168
1169                 printf(".... %-5s 0x%016" PRIx64 "\n",
1170                        perf_reg_name(rid), val);
1171         }
1172 }
1173
1174 static const char *regs_abi[] = {
1175         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1176         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1177         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1178 };
1179
1180 static inline const char *regs_dump_abi(struct regs_dump *d)
1181 {
1182         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1183                 return "unknown";
1184
1185         return regs_abi[d->abi];
1186 }
1187
1188 static void regs__printf(const char *type, struct regs_dump *regs)
1189 {
1190         u64 mask = regs->mask;
1191
1192         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1193                type,
1194                mask,
1195                regs_dump_abi(regs));
1196
1197         regs_dump__printf(mask, regs->regs);
1198 }
1199
1200 static void regs_user__printf(struct perf_sample *sample)
1201 {
1202         struct regs_dump *user_regs = &sample->user_regs;
1203
1204         if (user_regs->regs)
1205                 regs__printf("user", user_regs);
1206 }
1207
1208 static void regs_intr__printf(struct perf_sample *sample)
1209 {
1210         struct regs_dump *intr_regs = &sample->intr_regs;
1211
1212         if (intr_regs->regs)
1213                 regs__printf("intr", intr_regs);
1214 }
1215
1216 static void stack_user__printf(struct stack_dump *dump)
1217 {
1218         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1219                dump->size, dump->offset);
1220 }
1221
1222 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1223 {
1224         u64 sample_type = __evlist__combined_sample_type(evlist);
1225
1226         if (event->header.type != PERF_RECORD_SAMPLE &&
1227             !evlist__sample_id_all(evlist)) {
1228                 fputs("-1 -1 ", stdout);
1229                 return;
1230         }
1231
1232         if ((sample_type & PERF_SAMPLE_CPU))
1233                 printf("%u ", sample->cpu);
1234
1235         if (sample_type & PERF_SAMPLE_TIME)
1236                 printf("%" PRIu64 " ", sample->time);
1237 }
1238
1239 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1240 {
1241         printf("... sample_read:\n");
1242
1243         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1244                 printf("...... time enabled %016" PRIx64 "\n",
1245                        sample->read.time_enabled);
1246
1247         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1248                 printf("...... time running %016" PRIx64 "\n",
1249                        sample->read.time_running);
1250
1251         if (read_format & PERF_FORMAT_GROUP) {
1252                 u64 i;
1253
1254                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1255
1256                 for (i = 0; i < sample->read.group.nr; i++) {
1257                         struct sample_read_value *value;
1258
1259                         value = &sample->read.group.values[i];
1260                         printf("..... id %016" PRIx64
1261                                ", value %016" PRIx64 "\n",
1262                                value->id, value->value);
1263                 }
1264         } else
1265                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1266                         sample->read.one.id, sample->read.one.value);
1267 }
1268
1269 static void dump_event(struct evlist *evlist, union perf_event *event,
1270                        u64 file_offset, struct perf_sample *sample)
1271 {
1272         if (!dump_trace)
1273                 return;
1274
1275         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1276                file_offset, event->header.size, event->header.type);
1277
1278         trace_event(event);
1279         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1280                 evlist->trace_event_sample_raw(evlist, event, sample);
1281
1282         if (sample)
1283                 evlist__print_tstamp(evlist, event, sample);
1284
1285         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1286                event->header.size, perf_event__name(event->header.type));
1287 }
1288
1289 char *get_page_size_name(u64 size, char *str)
1290 {
1291         if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1292                 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1293
1294         return str;
1295 }
1296
1297 static void dump_sample(struct evsel *evsel, union perf_event *event,
1298                         struct perf_sample *sample)
1299 {
1300         u64 sample_type;
1301         char str[PAGE_SIZE_NAME_LEN];
1302
1303         if (!dump_trace)
1304                 return;
1305
1306         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1307                event->header.misc, sample->pid, sample->tid, sample->ip,
1308                sample->period, sample->addr);
1309
1310         sample_type = evsel->core.attr.sample_type;
1311
1312         if (evsel__has_callchain(evsel))
1313                 callchain__printf(evsel, sample);
1314
1315         if (evsel__has_br_stack(evsel))
1316                 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1317
1318         if (sample_type & PERF_SAMPLE_REGS_USER)
1319                 regs_user__printf(sample);
1320
1321         if (sample_type & PERF_SAMPLE_REGS_INTR)
1322                 regs_intr__printf(sample);
1323
1324         if (sample_type & PERF_SAMPLE_STACK_USER)
1325                 stack_user__printf(&sample->user_stack);
1326
1327         if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1328                 printf("... weight: %" PRIu64 "", sample->weight);
1329                         if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1330                                 printf(",0x%"PRIx16"", sample->ins_lat);
1331                                 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1332                         }
1333                 printf("\n");
1334         }
1335
1336         if (sample_type & PERF_SAMPLE_DATA_SRC)
1337                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1338
1339         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1340                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1341
1342         if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1343                 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1344
1345         if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1346                 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1347
1348         if (sample_type & PERF_SAMPLE_TRANSACTION)
1349                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1350
1351         if (sample_type & PERF_SAMPLE_READ)
1352                 sample_read__printf(sample, evsel->core.attr.read_format);
1353 }
1354
1355 static void dump_read(struct evsel *evsel, union perf_event *event)
1356 {
1357         struct perf_record_read *read_event = &event->read;
1358         u64 read_format;
1359
1360         if (!dump_trace)
1361                 return;
1362
1363         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1364                evsel__name(evsel), event->read.value);
1365
1366         if (!evsel)
1367                 return;
1368
1369         read_format = evsel->core.attr.read_format;
1370
1371         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1372                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1373
1374         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1375                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1376
1377         if (read_format & PERF_FORMAT_ID)
1378                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1379 }
1380
1381 static struct machine *machines__find_for_cpumode(struct machines *machines,
1382                                                union perf_event *event,
1383                                                struct perf_sample *sample)
1384 {
1385         if (perf_guest &&
1386             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1387              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1388                 u32 pid;
1389
1390                 if (event->header.type == PERF_RECORD_MMAP
1391                     || event->header.type == PERF_RECORD_MMAP2)
1392                         pid = event->mmap.pid;
1393                 else
1394                         pid = sample->pid;
1395
1396                 return machines__find_guest(machines, pid);
1397         }
1398
1399         return &machines->host;
1400 }
1401
1402 static int deliver_sample_value(struct evlist *evlist,
1403                                 struct perf_tool *tool,
1404                                 union perf_event *event,
1405                                 struct perf_sample *sample,
1406                                 struct sample_read_value *v,
1407                                 struct machine *machine)
1408 {
1409         struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1410         struct evsel *evsel;
1411
1412         if (sid) {
1413                 sample->id     = v->id;
1414                 sample->period = v->value - sid->period;
1415                 sid->period    = v->value;
1416         }
1417
1418         if (!sid || sid->evsel == NULL) {
1419                 ++evlist->stats.nr_unknown_id;
1420                 return 0;
1421         }
1422
1423         /*
1424          * There's no reason to deliver sample
1425          * for zero period, bail out.
1426          */
1427         if (!sample->period)
1428                 return 0;
1429
1430         evsel = container_of(sid->evsel, struct evsel, core);
1431         return tool->sample(tool, event, sample, evsel, machine);
1432 }
1433
1434 static int deliver_sample_group(struct evlist *evlist,
1435                                 struct perf_tool *tool,
1436                                 union  perf_event *event,
1437                                 struct perf_sample *sample,
1438                                 struct machine *machine)
1439 {
1440         int ret = -EINVAL;
1441         u64 i;
1442
1443         for (i = 0; i < sample->read.group.nr; i++) {
1444                 ret = deliver_sample_value(evlist, tool, event, sample,
1445                                            &sample->read.group.values[i],
1446                                            machine);
1447                 if (ret)
1448                         break;
1449         }
1450
1451         return ret;
1452 }
1453
1454 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1455                                   union  perf_event *event, struct perf_sample *sample,
1456                                   struct evsel *evsel, struct machine *machine)
1457 {
1458         /* We know evsel != NULL. */
1459         u64 sample_type = evsel->core.attr.sample_type;
1460         u64 read_format = evsel->core.attr.read_format;
1461
1462         /* Standard sample delivery. */
1463         if (!(sample_type & PERF_SAMPLE_READ))
1464                 return tool->sample(tool, event, sample, evsel, machine);
1465
1466         /* For PERF_SAMPLE_READ we have either single or group mode. */
1467         if (read_format & PERF_FORMAT_GROUP)
1468                 return deliver_sample_group(evlist, tool, event, sample,
1469                                             machine);
1470         else
1471                 return deliver_sample_value(evlist, tool, event, sample,
1472                                             &sample->read.one, machine);
1473 }
1474
1475 static int machines__deliver_event(struct machines *machines,
1476                                    struct evlist *evlist,
1477                                    union perf_event *event,
1478                                    struct perf_sample *sample,
1479                                    struct perf_tool *tool, u64 file_offset)
1480 {
1481         struct evsel *evsel;
1482         struct machine *machine;
1483
1484         dump_event(evlist, event, file_offset, sample);
1485
1486         evsel = evlist__id2evsel(evlist, sample->id);
1487
1488         machine = machines__find_for_cpumode(machines, event, sample);
1489
1490         switch (event->header.type) {
1491         case PERF_RECORD_SAMPLE:
1492                 if (evsel == NULL) {
1493                         ++evlist->stats.nr_unknown_id;
1494                         return 0;
1495                 }
1496                 dump_sample(evsel, event, sample);
1497                 if (machine == NULL) {
1498                         ++evlist->stats.nr_unprocessable_samples;
1499                         return 0;
1500                 }
1501                 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1502         case PERF_RECORD_MMAP:
1503                 return tool->mmap(tool, event, sample, machine);
1504         case PERF_RECORD_MMAP2:
1505                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1506                         ++evlist->stats.nr_proc_map_timeout;
1507                 return tool->mmap2(tool, event, sample, machine);
1508         case PERF_RECORD_COMM:
1509                 return tool->comm(tool, event, sample, machine);
1510         case PERF_RECORD_NAMESPACES:
1511                 return tool->namespaces(tool, event, sample, machine);
1512         case PERF_RECORD_CGROUP:
1513                 return tool->cgroup(tool, event, sample, machine);
1514         case PERF_RECORD_FORK:
1515                 return tool->fork(tool, event, sample, machine);
1516         case PERF_RECORD_EXIT:
1517                 return tool->exit(tool, event, sample, machine);
1518         case PERF_RECORD_LOST:
1519                 if (tool->lost == perf_event__process_lost)
1520                         evlist->stats.total_lost += event->lost.lost;
1521                 return tool->lost(tool, event, sample, machine);
1522         case PERF_RECORD_LOST_SAMPLES:
1523                 if (tool->lost_samples == perf_event__process_lost_samples)
1524                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1525                 return tool->lost_samples(tool, event, sample, machine);
1526         case PERF_RECORD_READ:
1527                 dump_read(evsel, event);
1528                 return tool->read(tool, event, sample, evsel, machine);
1529         case PERF_RECORD_THROTTLE:
1530                 return tool->throttle(tool, event, sample, machine);
1531         case PERF_RECORD_UNTHROTTLE:
1532                 return tool->unthrottle(tool, event, sample, machine);
1533         case PERF_RECORD_AUX:
1534                 if (tool->aux == perf_event__process_aux) {
1535                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1536                                 evlist->stats.total_aux_lost += 1;
1537                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1538                                 evlist->stats.total_aux_partial += 1;
1539                 }
1540                 return tool->aux(tool, event, sample, machine);
1541         case PERF_RECORD_ITRACE_START:
1542                 return tool->itrace_start(tool, event, sample, machine);
1543         case PERF_RECORD_SWITCH:
1544         case PERF_RECORD_SWITCH_CPU_WIDE:
1545                 return tool->context_switch(tool, event, sample, machine);
1546         case PERF_RECORD_KSYMBOL:
1547                 return tool->ksymbol(tool, event, sample, machine);
1548         case PERF_RECORD_BPF_EVENT:
1549                 return tool->bpf(tool, event, sample, machine);
1550         case PERF_RECORD_TEXT_POKE:
1551                 return tool->text_poke(tool, event, sample, machine);
1552         default:
1553                 ++evlist->stats.nr_unknown_events;
1554                 return -1;
1555         }
1556 }
1557
1558 static int perf_session__deliver_event(struct perf_session *session,
1559                                        union perf_event *event,
1560                                        struct perf_tool *tool,
1561                                        u64 file_offset)
1562 {
1563         struct perf_sample sample;
1564         int ret = evlist__parse_sample(session->evlist, event, &sample);
1565
1566         if (ret) {
1567                 pr_err("Can't parse sample, err = %d\n", ret);
1568                 return ret;
1569         }
1570
1571         ret = auxtrace__process_event(session, event, &sample, tool);
1572         if (ret < 0)
1573                 return ret;
1574         if (ret > 0)
1575                 return 0;
1576
1577         ret = machines__deliver_event(&session->machines, session->evlist,
1578                                       event, &sample, tool, file_offset);
1579
1580         if (dump_trace && sample.aux_sample.size)
1581                 auxtrace__dump_auxtrace_sample(session, &sample);
1582
1583         return ret;
1584 }
1585
1586 static s64 perf_session__process_user_event(struct perf_session *session,
1587                                             union perf_event *event,
1588                                             u64 file_offset)
1589 {
1590         struct ordered_events *oe = &session->ordered_events;
1591         struct perf_tool *tool = session->tool;
1592         struct perf_sample sample = { .time = 0, };
1593         int fd = perf_data__fd(session->data);
1594         int err;
1595
1596         if (event->header.type != PERF_RECORD_COMPRESSED ||
1597             tool->compressed == perf_session__process_compressed_event_stub)
1598                 dump_event(session->evlist, event, file_offset, &sample);
1599
1600         /* These events are processed right away */
1601         switch (event->header.type) {
1602         case PERF_RECORD_HEADER_ATTR:
1603                 err = tool->attr(tool, event, &session->evlist);
1604                 if (err == 0) {
1605                         perf_session__set_id_hdr_size(session);
1606                         perf_session__set_comm_exec(session);
1607                 }
1608                 return err;
1609         case PERF_RECORD_EVENT_UPDATE:
1610                 return tool->event_update(tool, event, &session->evlist);
1611         case PERF_RECORD_HEADER_EVENT_TYPE:
1612                 /*
1613                  * Deprecated, but we need to handle it for sake
1614                  * of old data files create in pipe mode.
1615                  */
1616                 return 0;
1617         case PERF_RECORD_HEADER_TRACING_DATA:
1618                 /*
1619                  * Setup for reading amidst mmap, but only when we
1620                  * are in 'file' mode. The 'pipe' fd is in proper
1621                  * place already.
1622                  */
1623                 if (!perf_data__is_pipe(session->data))
1624                         lseek(fd, file_offset, SEEK_SET);
1625                 return tool->tracing_data(session, event);
1626         case PERF_RECORD_HEADER_BUILD_ID:
1627                 return tool->build_id(session, event);
1628         case PERF_RECORD_FINISHED_ROUND:
1629                 return tool->finished_round(tool, event, oe);
1630         case PERF_RECORD_ID_INDEX:
1631                 return tool->id_index(session, event);
1632         case PERF_RECORD_AUXTRACE_INFO:
1633                 return tool->auxtrace_info(session, event);
1634         case PERF_RECORD_AUXTRACE:
1635                 /* setup for reading amidst mmap */
1636                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1637                 return tool->auxtrace(session, event);
1638         case PERF_RECORD_AUXTRACE_ERROR:
1639                 perf_session__auxtrace_error_inc(session, event);
1640                 return tool->auxtrace_error(session, event);
1641         case PERF_RECORD_THREAD_MAP:
1642                 return tool->thread_map(session, event);
1643         case PERF_RECORD_CPU_MAP:
1644                 return tool->cpu_map(session, event);
1645         case PERF_RECORD_STAT_CONFIG:
1646                 return tool->stat_config(session, event);
1647         case PERF_RECORD_STAT:
1648                 return tool->stat(session, event);
1649         case PERF_RECORD_STAT_ROUND:
1650                 return tool->stat_round(session, event);
1651         case PERF_RECORD_TIME_CONV:
1652                 session->time_conv = event->time_conv;
1653                 return tool->time_conv(session, event);
1654         case PERF_RECORD_HEADER_FEATURE:
1655                 return tool->feature(session, event);
1656         case PERF_RECORD_COMPRESSED:
1657                 err = tool->compressed(session, event, file_offset);
1658                 if (err)
1659                         dump_event(session->evlist, event, file_offset, &sample);
1660                 return err;
1661         default:
1662                 return -EINVAL;
1663         }
1664 }
1665
1666 int perf_session__deliver_synth_event(struct perf_session *session,
1667                                       union perf_event *event,
1668                                       struct perf_sample *sample)
1669 {
1670         struct evlist *evlist = session->evlist;
1671         struct perf_tool *tool = session->tool;
1672
1673         events_stats__inc(&evlist->stats, event->header.type);
1674
1675         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1676                 return perf_session__process_user_event(session, event, 0);
1677
1678         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1679 }
1680
1681 static void event_swap(union perf_event *event, bool sample_id_all)
1682 {
1683         perf_event__swap_op swap;
1684
1685         swap = perf_event__swap_ops[event->header.type];
1686         if (swap)
1687                 swap(event, sample_id_all);
1688 }
1689
1690 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1691                              void *buf, size_t buf_sz,
1692                              union perf_event **event_ptr,
1693                              struct perf_sample *sample)
1694 {
1695         union perf_event *event;
1696         size_t hdr_sz, rest;
1697         int fd;
1698
1699         if (session->one_mmap && !session->header.needs_swap) {
1700                 event = file_offset - session->one_mmap_offset +
1701                         session->one_mmap_addr;
1702                 goto out_parse_sample;
1703         }
1704
1705         if (perf_data__is_pipe(session->data))
1706                 return -1;
1707
1708         fd = perf_data__fd(session->data);
1709         hdr_sz = sizeof(struct perf_event_header);
1710
1711         if (buf_sz < hdr_sz)
1712                 return -1;
1713
1714         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1715             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1716                 return -1;
1717
1718         event = (union perf_event *)buf;
1719
1720         if (session->header.needs_swap)
1721                 perf_event_header__bswap(&event->header);
1722
1723         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1724                 return -1;
1725
1726         rest = event->header.size - hdr_sz;
1727
1728         if (readn(fd, buf, rest) != (ssize_t)rest)
1729                 return -1;
1730
1731         if (session->header.needs_swap)
1732                 event_swap(event, evlist__sample_id_all(session->evlist));
1733
1734 out_parse_sample:
1735
1736         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1737             evlist__parse_sample(session->evlist, event, sample))
1738                 return -1;
1739
1740         *event_ptr = event;
1741
1742         return 0;
1743 }
1744
1745 int perf_session__peek_events(struct perf_session *session, u64 offset,
1746                               u64 size, peek_events_cb_t cb, void *data)
1747 {
1748         u64 max_offset = offset + size;
1749         char buf[PERF_SAMPLE_MAX_SIZE];
1750         union perf_event *event;
1751         int err;
1752
1753         do {
1754                 err = perf_session__peek_event(session, offset, buf,
1755                                                PERF_SAMPLE_MAX_SIZE, &event,
1756                                                NULL);
1757                 if (err)
1758                         return err;
1759
1760                 err = cb(session, event, offset, data);
1761                 if (err)
1762                         return err;
1763
1764                 offset += event->header.size;
1765                 if (event->header.type == PERF_RECORD_AUXTRACE)
1766                         offset += event->auxtrace.size;
1767
1768         } while (offset < max_offset);
1769
1770         return err;
1771 }
1772
1773 static s64 perf_session__process_event(struct perf_session *session,
1774                                        union perf_event *event, u64 file_offset)
1775 {
1776         struct evlist *evlist = session->evlist;
1777         struct perf_tool *tool = session->tool;
1778         int ret;
1779
1780         if (session->header.needs_swap)
1781                 event_swap(event, evlist__sample_id_all(evlist));
1782
1783         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1784                 return -EINVAL;
1785
1786         events_stats__inc(&evlist->stats, event->header.type);
1787
1788         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1789                 return perf_session__process_user_event(session, event, file_offset);
1790
1791         if (tool->ordered_events) {
1792                 u64 timestamp = -1ULL;
1793
1794                 ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1795                 if (ret && ret != -1)
1796                         return ret;
1797
1798                 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1799                 if (ret != -ETIME)
1800                         return ret;
1801         }
1802
1803         return perf_session__deliver_event(session, event, tool, file_offset);
1804 }
1805
1806 void perf_event_header__bswap(struct perf_event_header *hdr)
1807 {
1808         hdr->type = bswap_32(hdr->type);
1809         hdr->misc = bswap_16(hdr->misc);
1810         hdr->size = bswap_16(hdr->size);
1811 }
1812
1813 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1814 {
1815         return machine__findnew_thread(&session->machines.host, -1, pid);
1816 }
1817
1818 int perf_session__register_idle_thread(struct perf_session *session)
1819 {
1820         struct thread *thread = machine__idle_thread(&session->machines.host);
1821
1822         /* machine__idle_thread() got the thread, so put it */
1823         thread__put(thread);
1824         return thread ? 0 : -1;
1825 }
1826
1827 static void
1828 perf_session__warn_order(const struct perf_session *session)
1829 {
1830         const struct ordered_events *oe = &session->ordered_events;
1831         struct evsel *evsel;
1832         bool should_warn = true;
1833
1834         evlist__for_each_entry(session->evlist, evsel) {
1835                 if (evsel->core.attr.write_backward)
1836                         should_warn = false;
1837         }
1838
1839         if (!should_warn)
1840                 return;
1841         if (oe->nr_unordered_events != 0)
1842                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1843 }
1844
1845 static void perf_session__warn_about_errors(const struct perf_session *session)
1846 {
1847         const struct events_stats *stats = &session->evlist->stats;
1848
1849         if (session->tool->lost == perf_event__process_lost &&
1850             stats->nr_events[PERF_RECORD_LOST] != 0) {
1851                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1852                             "Check IO/CPU overload!\n\n",
1853                             stats->nr_events[0],
1854                             stats->nr_events[PERF_RECORD_LOST]);
1855         }
1856
1857         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1858                 double drop_rate;
1859
1860                 drop_rate = (double)stats->total_lost_samples /
1861                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1862                 if (drop_rate > 0.05) {
1863                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1864                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1865                                     drop_rate * 100.0);
1866                 }
1867         }
1868
1869         if (session->tool->aux == perf_event__process_aux &&
1870             stats->total_aux_lost != 0) {
1871                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1872                             stats->total_aux_lost,
1873                             stats->nr_events[PERF_RECORD_AUX]);
1874         }
1875
1876         if (session->tool->aux == perf_event__process_aux &&
1877             stats->total_aux_partial != 0) {
1878                 bool vmm_exclusive = false;
1879
1880                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1881                                        &vmm_exclusive);
1882
1883                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1884                             "Are you running a KVM guest in the background?%s\n\n",
1885                             stats->total_aux_partial,
1886                             stats->nr_events[PERF_RECORD_AUX],
1887                             vmm_exclusive ?
1888                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1889                             "will reduce the gaps to only guest's timeslices." :
1890                             "");
1891         }
1892
1893         if (stats->nr_unknown_events != 0) {
1894                 ui__warning("Found %u unknown events!\n\n"
1895                             "Is this an older tool processing a perf.data "
1896                             "file generated by a more recent tool?\n\n"
1897                             "If that is not the case, consider "
1898                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1899                             stats->nr_unknown_events);
1900         }
1901
1902         if (stats->nr_unknown_id != 0) {
1903                 ui__warning("%u samples with id not present in the header\n",
1904                             stats->nr_unknown_id);
1905         }
1906
1907         if (stats->nr_invalid_chains != 0) {
1908                 ui__warning("Found invalid callchains!\n\n"
1909                             "%u out of %u events were discarded for this reason.\n\n"
1910                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1911                             stats->nr_invalid_chains,
1912                             stats->nr_events[PERF_RECORD_SAMPLE]);
1913         }
1914
1915         if (stats->nr_unprocessable_samples != 0) {
1916                 ui__warning("%u unprocessable samples recorded.\n"
1917                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1918                             stats->nr_unprocessable_samples);
1919         }
1920
1921         perf_session__warn_order(session);
1922
1923         events_stats__auxtrace_error_warn(stats);
1924
1925         if (stats->nr_proc_map_timeout != 0) {
1926                 ui__warning("%d map information files for pre-existing threads were\n"
1927                             "not processed, if there are samples for addresses they\n"
1928                             "will not be resolved, you may find out which are these\n"
1929                             "threads by running with -v and redirecting the output\n"
1930                             "to a file.\n"
1931                             "The time limit to process proc map is too short?\n"
1932                             "Increase it by --proc-map-timeout\n",
1933                             stats->nr_proc_map_timeout);
1934         }
1935 }
1936
1937 static int perf_session__flush_thread_stack(struct thread *thread,
1938                                             void *p __maybe_unused)
1939 {
1940         return thread_stack__flush(thread);
1941 }
1942
1943 static int perf_session__flush_thread_stacks(struct perf_session *session)
1944 {
1945         return machines__for_each_thread(&session->machines,
1946                                          perf_session__flush_thread_stack,
1947                                          NULL);
1948 }
1949
1950 volatile int session_done;
1951
1952 static int __perf_session__process_decomp_events(struct perf_session *session);
1953
1954 static int __perf_session__process_pipe_events(struct perf_session *session)
1955 {
1956         struct ordered_events *oe = &session->ordered_events;
1957         struct perf_tool *tool = session->tool;
1958         union perf_event *event;
1959         uint32_t size, cur_size = 0;
1960         void *buf = NULL;
1961         s64 skip = 0;
1962         u64 head;
1963         ssize_t err;
1964         void *p;
1965
1966         perf_tool__fill_defaults(tool);
1967
1968         head = 0;
1969         cur_size = sizeof(union perf_event);
1970
1971         buf = malloc(cur_size);
1972         if (!buf)
1973                 return -errno;
1974         ordered_events__set_copy_on_queue(oe, true);
1975 more:
1976         event = buf;
1977         err = perf_data__read(session->data, event,
1978                               sizeof(struct perf_event_header));
1979         if (err <= 0) {
1980                 if (err == 0)
1981                         goto done;
1982
1983                 pr_err("failed to read event header\n");
1984                 goto out_err;
1985         }
1986
1987         if (session->header.needs_swap)
1988                 perf_event_header__bswap(&event->header);
1989
1990         size = event->header.size;
1991         if (size < sizeof(struct perf_event_header)) {
1992                 pr_err("bad event header size\n");
1993                 goto out_err;
1994         }
1995
1996         if (size > cur_size) {
1997                 void *new = realloc(buf, size);
1998                 if (!new) {
1999                         pr_err("failed to allocate memory to read event\n");
2000                         goto out_err;
2001                 }
2002                 buf = new;
2003                 cur_size = size;
2004                 event = buf;
2005         }
2006         p = event;
2007         p += sizeof(struct perf_event_header);
2008
2009         if (size - sizeof(struct perf_event_header)) {
2010                 err = perf_data__read(session->data, p,
2011                                       size - sizeof(struct perf_event_header));
2012                 if (err <= 0) {
2013                         if (err == 0) {
2014                                 pr_err("unexpected end of event stream\n");
2015                                 goto done;
2016                         }
2017
2018                         pr_err("failed to read event data\n");
2019                         goto out_err;
2020                 }
2021         }
2022
2023         if ((skip = perf_session__process_event(session, event, head)) < 0) {
2024                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2025                        head, event->header.size, event->header.type);
2026                 err = -EINVAL;
2027                 goto out_err;
2028         }
2029
2030         head += size;
2031
2032         if (skip > 0)
2033                 head += skip;
2034
2035         err = __perf_session__process_decomp_events(session);
2036         if (err)
2037                 goto out_err;
2038
2039         if (!session_done())
2040                 goto more;
2041 done:
2042         /* do the final flush for ordered samples */
2043         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2044         if (err)
2045                 goto out_err;
2046         err = auxtrace__flush_events(session, tool);
2047         if (err)
2048                 goto out_err;
2049         err = perf_session__flush_thread_stacks(session);
2050 out_err:
2051         free(buf);
2052         if (!tool->no_warn)
2053                 perf_session__warn_about_errors(session);
2054         ordered_events__free(&session->ordered_events);
2055         auxtrace__free_events(session);
2056         return err;
2057 }
2058
2059 static union perf_event *
2060 prefetch_event(char *buf, u64 head, size_t mmap_size,
2061                bool needs_swap, union perf_event *error)
2062 {
2063         union perf_event *event;
2064
2065         /*
2066          * Ensure we have enough space remaining to read
2067          * the size of the event in the headers.
2068          */
2069         if (head + sizeof(event->header) > mmap_size)
2070                 return NULL;
2071
2072         event = (union perf_event *)(buf + head);
2073         if (needs_swap)
2074                 perf_event_header__bswap(&event->header);
2075
2076         if (head + event->header.size <= mmap_size)
2077                 return event;
2078
2079         /* We're not fetching the event so swap back again */
2080         if (needs_swap)
2081                 perf_event_header__bswap(&event->header);
2082
2083         pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2084                  " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2085
2086         return error;
2087 }
2088
2089 static union perf_event *
2090 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2091 {
2092         return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2093 }
2094
2095 static union perf_event *
2096 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2097 {
2098         return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2099 }
2100
2101 static int __perf_session__process_decomp_events(struct perf_session *session)
2102 {
2103         s64 skip;
2104         u64 size, file_pos = 0;
2105         struct decomp *decomp = session->decomp_last;
2106
2107         if (!decomp)
2108                 return 0;
2109
2110         while (decomp->head < decomp->size && !session_done()) {
2111                 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2112                                                              session->header.needs_swap);
2113
2114                 if (!event)
2115                         break;
2116
2117                 size = event->header.size;
2118
2119                 if (size < sizeof(struct perf_event_header) ||
2120                     (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2121                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2122                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2123                         return -EINVAL;
2124                 }
2125
2126                 if (skip)
2127                         size += skip;
2128
2129                 decomp->head += size;
2130         }
2131
2132         return 0;
2133 }
2134
2135 /*
2136  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2137  * slices. On 32bit we use 32MB.
2138  */
2139 #if BITS_PER_LONG == 64
2140 #define MMAP_SIZE ULLONG_MAX
2141 #define NUM_MMAPS 1
2142 #else
2143 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2144 #define NUM_MMAPS 128
2145 #endif
2146
2147 struct reader;
2148
2149 typedef s64 (*reader_cb_t)(struct perf_session *session,
2150                            union perf_event *event,
2151                            u64 file_offset);
2152
2153 struct reader {
2154         int              fd;
2155         u64              data_size;
2156         u64              data_offset;
2157         reader_cb_t      process;
2158 };
2159
2160 static int
2161 reader__process_events(struct reader *rd, struct perf_session *session,
2162                        struct ui_progress *prog)
2163 {
2164         u64 data_size = rd->data_size;
2165         u64 head, page_offset, file_offset, file_pos, size;
2166         int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2167         size_t  mmap_size;
2168         char *buf, *mmaps[NUM_MMAPS];
2169         union perf_event *event;
2170         s64 skip;
2171
2172         page_offset = page_size * (rd->data_offset / page_size);
2173         file_offset = page_offset;
2174         head = rd->data_offset - page_offset;
2175
2176         ui_progress__init_size(prog, data_size, "Processing events...");
2177
2178         data_size += rd->data_offset;
2179
2180         mmap_size = MMAP_SIZE;
2181         if (mmap_size > data_size) {
2182                 mmap_size = data_size;
2183                 session->one_mmap = true;
2184         }
2185
2186         memset(mmaps, 0, sizeof(mmaps));
2187
2188         mmap_prot  = PROT_READ;
2189         mmap_flags = MAP_SHARED;
2190
2191         if (session->header.needs_swap) {
2192                 mmap_prot  |= PROT_WRITE;
2193                 mmap_flags = MAP_PRIVATE;
2194         }
2195 remap:
2196         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2197                    file_offset);
2198         if (buf == MAP_FAILED) {
2199                 pr_err("failed to mmap file\n");
2200                 err = -errno;
2201                 goto out;
2202         }
2203         mmaps[map_idx] = buf;
2204         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2205         file_pos = file_offset + head;
2206         if (session->one_mmap) {
2207                 session->one_mmap_addr = buf;
2208                 session->one_mmap_offset = file_offset;
2209         }
2210
2211 more:
2212         event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2213         if (IS_ERR(event))
2214                 return PTR_ERR(event);
2215
2216         if (!event) {
2217                 if (mmaps[map_idx]) {
2218                         munmap(mmaps[map_idx], mmap_size);
2219                         mmaps[map_idx] = NULL;
2220                 }
2221
2222                 page_offset = page_size * (head / page_size);
2223                 file_offset += page_offset;
2224                 head -= page_offset;
2225                 goto remap;
2226         }
2227
2228         size = event->header.size;
2229
2230         skip = -EINVAL;
2231
2232         if (size < sizeof(struct perf_event_header) ||
2233             (skip = rd->process(session, event, file_pos)) < 0) {
2234                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2235                        file_offset + head, event->header.size,
2236                        event->header.type, strerror(-skip));
2237                 err = skip;
2238                 goto out;
2239         }
2240
2241         if (skip)
2242                 size += skip;
2243
2244         head += size;
2245         file_pos += size;
2246
2247         err = __perf_session__process_decomp_events(session);
2248         if (err)
2249                 goto out;
2250
2251         ui_progress__update(prog, size);
2252
2253         if (session_done())
2254                 goto out;
2255
2256         if (file_pos < data_size)
2257                 goto more;
2258
2259 out:
2260         return err;
2261 }
2262
2263 static s64 process_simple(struct perf_session *session,
2264                           union perf_event *event,
2265                           u64 file_offset)
2266 {
2267         return perf_session__process_event(session, event, file_offset);
2268 }
2269
2270 static int __perf_session__process_events(struct perf_session *session)
2271 {
2272         struct reader rd = {
2273                 .fd             = perf_data__fd(session->data),
2274                 .data_size      = session->header.data_size,
2275                 .data_offset    = session->header.data_offset,
2276                 .process        = process_simple,
2277         };
2278         struct ordered_events *oe = &session->ordered_events;
2279         struct perf_tool *tool = session->tool;
2280         struct ui_progress prog;
2281         int err;
2282
2283         perf_tool__fill_defaults(tool);
2284
2285         if (rd.data_size == 0)
2286                 return -1;
2287
2288         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2289
2290         err = reader__process_events(&rd, session, &prog);
2291         if (err)
2292                 goto out_err;
2293         /* do the final flush for ordered samples */
2294         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2295         if (err)
2296                 goto out_err;
2297         err = auxtrace__flush_events(session, tool);
2298         if (err)
2299                 goto out_err;
2300         err = perf_session__flush_thread_stacks(session);
2301 out_err:
2302         ui_progress__finish();
2303         if (!tool->no_warn)
2304                 perf_session__warn_about_errors(session);
2305         /*
2306          * We may switching perf.data output, make ordered_events
2307          * reusable.
2308          */
2309         ordered_events__reinit(&session->ordered_events);
2310         auxtrace__free_events(session);
2311         session->one_mmap = false;
2312         return err;
2313 }
2314
2315 int perf_session__process_events(struct perf_session *session)
2316 {
2317         if (perf_session__register_idle_thread(session) < 0)
2318                 return -ENOMEM;
2319
2320         if (perf_data__is_pipe(session->data))
2321                 return __perf_session__process_pipe_events(session);
2322
2323         return __perf_session__process_events(session);
2324 }
2325
2326 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2327 {
2328         struct evsel *evsel;
2329
2330         evlist__for_each_entry(session->evlist, evsel) {
2331                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2332                         return true;
2333         }
2334
2335         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2336         return false;
2337 }
2338
2339 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2340 {
2341         char *bracket;
2342         struct ref_reloc_sym *ref;
2343         struct kmap *kmap;
2344
2345         ref = zalloc(sizeof(struct ref_reloc_sym));
2346         if (ref == NULL)
2347                 return -ENOMEM;
2348
2349         ref->name = strdup(symbol_name);
2350         if (ref->name == NULL) {
2351                 free(ref);
2352                 return -ENOMEM;
2353         }
2354
2355         bracket = strchr(ref->name, ']');
2356         if (bracket)
2357                 *bracket = '\0';
2358
2359         ref->addr = addr;
2360
2361         kmap = map__kmap(map);
2362         if (kmap)
2363                 kmap->ref_reloc_sym = ref;
2364
2365         return 0;
2366 }
2367
2368 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2369 {
2370         return machines__fprintf_dsos(&session->machines, fp);
2371 }
2372
2373 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2374                                           bool (skip)(struct dso *dso, int parm), int parm)
2375 {
2376         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2377 }
2378
2379 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
2380                                        bool skip_empty)
2381 {
2382         size_t ret;
2383         const char *msg = "";
2384
2385         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2386                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2387
2388         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2389
2390         ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
2391         return ret;
2392 }
2393
2394 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2395 {
2396         /*
2397          * FIXME: Here we have to actually print all the machines in this
2398          * session, not just the host...
2399          */
2400         return machine__fprintf(&session->machines.host, fp);
2401 }
2402
2403 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2404                                               unsigned int type)
2405 {
2406         struct evsel *pos;
2407
2408         evlist__for_each_entry(session->evlist, pos) {
2409                 if (pos->core.attr.type == type)
2410                         return pos;
2411         }
2412         return NULL;
2413 }
2414
2415 int perf_session__cpu_bitmap(struct perf_session *session,
2416                              const char *cpu_list, unsigned long *cpu_bitmap)
2417 {
2418         int i, err = -1;
2419         struct perf_cpu_map *map;
2420         int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2421
2422         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2423                 struct evsel *evsel;
2424
2425                 evsel = perf_session__find_first_evtype(session, i);
2426                 if (!evsel)
2427                         continue;
2428
2429                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2430                         pr_err("File does not contain CPU events. "
2431                                "Remove -C option to proceed.\n");
2432                         return -1;
2433                 }
2434         }
2435
2436         map = perf_cpu_map__new(cpu_list);
2437         if (map == NULL) {
2438                 pr_err("Invalid cpu_list\n");
2439                 return -1;
2440         }
2441
2442         for (i = 0; i < map->nr; i++) {
2443                 int cpu = map->map[i];
2444
2445                 if (cpu >= nr_cpus) {
2446                         pr_err("Requested CPU %d too large. "
2447                                "Consider raising MAX_NR_CPUS\n", cpu);
2448                         goto out_delete_map;
2449                 }
2450
2451                 set_bit(cpu, cpu_bitmap);
2452         }
2453
2454         err = 0;
2455
2456 out_delete_map:
2457         perf_cpu_map__put(map);
2458         return err;
2459 }
2460
2461 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2462                                 bool full)
2463 {
2464         if (session == NULL || fp == NULL)
2465                 return;
2466
2467         fprintf(fp, "# ========\n");
2468         perf_header__fprintf_info(session, fp, full);
2469         fprintf(fp, "# ========\n#\n");
2470 }
2471
2472 int perf_event__process_id_index(struct perf_session *session,
2473                                  union perf_event *event)
2474 {
2475         struct evlist *evlist = session->evlist;
2476         struct perf_record_id_index *ie = &event->id_index;
2477         size_t i, nr, max_nr;
2478
2479         max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2480                  sizeof(struct id_index_entry);
2481         nr = ie->nr;
2482         if (nr > max_nr)
2483                 return -EINVAL;
2484
2485         if (dump_trace)
2486                 fprintf(stdout, " nr: %zu\n", nr);
2487
2488         for (i = 0; i < nr; i++) {
2489                 struct id_index_entry *e = &ie->entries[i];
2490                 struct perf_sample_id *sid;
2491
2492                 if (dump_trace) {
2493                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2494                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2495                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2496                         fprintf(stdout, "  tid: %"PRI_ld64"\n", e->tid);
2497                 }
2498
2499                 sid = evlist__id2sid(evlist, e->id);
2500                 if (!sid)
2501                         return -ENOENT;
2502                 sid->idx = e->idx;
2503                 sid->cpu = e->cpu;
2504                 sid->tid = e->tid;
2505         }
2506         return 0;
2507 }