Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14
15 #include "map_symbol.h"
16 #include "branch.h"
17 #include "debug.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "memswap.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "session.h"
24 #include "tool.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "ui/progress.h"
33 #include "../perf.h"
34 #include "arch/common.h"
35 #include <internal/lib.h>
36 #include <linux/err.h>
37
38 #ifdef HAVE_ZSTD_SUPPORT
39 static int perf_session__process_compressed_event(struct perf_session *session,
40                                                   union perf_event *event, u64 file_offset)
41 {
42         void *src;
43         size_t decomp_size, src_size;
44         u64 decomp_last_rem = 0;
45         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
46         struct decomp *decomp, *decomp_last = session->decomp_last;
47
48         if (decomp_last) {
49                 decomp_last_rem = decomp_last->size - decomp_last->head;
50                 decomp_len += decomp_last_rem;
51         }
52
53         mmap_len = sizeof(struct decomp) + decomp_len;
54         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
55                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
56         if (decomp == MAP_FAILED) {
57                 pr_err("Couldn't allocate memory for decompression\n");
58                 return -1;
59         }
60
61         decomp->file_pos = file_offset;
62         decomp->mmap_len = mmap_len;
63         decomp->head = 0;
64
65         if (decomp_last_rem) {
66                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
67                 decomp->size = decomp_last_rem;
68         }
69
70         src = (void *)event + sizeof(struct perf_record_compressed);
71         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
72
73         decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
74                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
75         if (!decomp_size) {
76                 munmap(decomp, mmap_len);
77                 pr_err("Couldn't decompress data\n");
78                 return -1;
79         }
80
81         decomp->size += decomp_size;
82
83         if (session->decomp == NULL) {
84                 session->decomp = decomp;
85                 session->decomp_last = decomp;
86         } else {
87                 session->decomp_last->next = decomp;
88                 session->decomp_last = decomp;
89         }
90
91         pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
92
93         return 0;
94 }
95 #else /* !HAVE_ZSTD_SUPPORT */
96 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
97 #endif
98
99 static int perf_session__deliver_event(struct perf_session *session,
100                                        union perf_event *event,
101                                        struct perf_tool *tool,
102                                        u64 file_offset);
103
104 static int perf_session__open(struct perf_session *session)
105 {
106         struct perf_data *data = session->data;
107
108         if (perf_session__read_header(session) < 0) {
109                 pr_err("incompatible file format (rerun with -v to learn more)\n");
110                 return -1;
111         }
112
113         if (perf_data__is_pipe(data))
114                 return 0;
115
116         if (perf_header__has_feat(&session->header, HEADER_STAT))
117                 return 0;
118
119         if (!perf_evlist__valid_sample_type(session->evlist)) {
120                 pr_err("non matching sample_type\n");
121                 return -1;
122         }
123
124         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
125                 pr_err("non matching sample_id_all\n");
126                 return -1;
127         }
128
129         if (!perf_evlist__valid_read_format(session->evlist)) {
130                 pr_err("non matching read_format\n");
131                 return -1;
132         }
133
134         return 0;
135 }
136
137 void perf_session__set_id_hdr_size(struct perf_session *session)
138 {
139         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
140
141         machines__set_id_hdr_size(&session->machines, id_hdr_size);
142 }
143
144 int perf_session__create_kernel_maps(struct perf_session *session)
145 {
146         int ret = machine__create_kernel_maps(&session->machines.host);
147
148         if (ret >= 0)
149                 ret = machines__create_guest_kernel_maps(&session->machines);
150         return ret;
151 }
152
153 static void perf_session__destroy_kernel_maps(struct perf_session *session)
154 {
155         machines__destroy_kernel_maps(&session->machines);
156 }
157
158 static bool perf_session__has_comm_exec(struct perf_session *session)
159 {
160         struct evsel *evsel;
161
162         evlist__for_each_entry(session->evlist, evsel) {
163                 if (evsel->core.attr.comm_exec)
164                         return true;
165         }
166
167         return false;
168 }
169
170 static void perf_session__set_comm_exec(struct perf_session *session)
171 {
172         bool comm_exec = perf_session__has_comm_exec(session);
173
174         machines__set_comm_exec(&session->machines, comm_exec);
175 }
176
177 static int ordered_events__deliver_event(struct ordered_events *oe,
178                                          struct ordered_event *event)
179 {
180         struct perf_session *session = container_of(oe, struct perf_session,
181                                                     ordered_events);
182
183         return perf_session__deliver_event(session, event->event,
184                                            session->tool, event->file_offset);
185 }
186
187 struct perf_session *perf_session__new(struct perf_data *data,
188                                        bool repipe, struct perf_tool *tool)
189 {
190         int ret = -ENOMEM;
191         struct perf_session *session = zalloc(sizeof(*session));
192
193         if (!session)
194                 goto out;
195
196         session->repipe = repipe;
197         session->tool   = tool;
198         INIT_LIST_HEAD(&session->auxtrace_index);
199         machines__init(&session->machines);
200         ordered_events__init(&session->ordered_events,
201                              ordered_events__deliver_event, NULL);
202
203         perf_env__init(&session->header.env);
204         if (data) {
205                 ret = perf_data__open(data);
206                 if (ret < 0)
207                         goto out_delete;
208
209                 session->data = data;
210
211                 if (perf_data__is_read(data)) {
212                         ret = perf_session__open(session);
213                         if (ret < 0)
214                                 goto out_delete;
215
216                         /*
217                          * set session attributes that are present in perf.data
218                          * but not in pipe-mode.
219                          */
220                         if (!data->is_pipe) {
221                                 perf_session__set_id_hdr_size(session);
222                                 perf_session__set_comm_exec(session);
223                         }
224
225                         perf_evlist__init_trace_event_sample_raw(session->evlist);
226
227                         /* Open the directory data. */
228                         if (data->is_dir) {
229                                 ret = perf_data__open_dir(data);
230                         if (ret)
231                                 goto out_delete;
232                         }
233                 }
234         } else  {
235                 session->machines.host.env = &perf_env;
236         }
237
238         session->machines.host.single_address_space =
239                 perf_env__single_address_space(session->machines.host.env);
240
241         if (!data || perf_data__is_write(data)) {
242                 /*
243                  * In O_RDONLY mode this will be performed when reading the
244                  * kernel MMAP event, in perf_event__process_mmap().
245                  */
246                 if (perf_session__create_kernel_maps(session) < 0)
247                         pr_warning("Cannot read kernel map\n");
248         }
249
250         /*
251          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
252          * processed, so perf_evlist__sample_id_all is not meaningful here.
253          */
254         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
255             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
256                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
257                 tool->ordered_events = false;
258         }
259
260         return session;
261
262  out_delete:
263         perf_session__delete(session);
264  out:
265         return ERR_PTR(ret);
266 }
267
268 static void perf_session__delete_threads(struct perf_session *session)
269 {
270         machine__delete_threads(&session->machines.host);
271 }
272
273 static void perf_session__release_decomp_events(struct perf_session *session)
274 {
275         struct decomp *next, *decomp;
276         size_t mmap_len;
277         next = session->decomp;
278         do {
279                 decomp = next;
280                 if (decomp == NULL)
281                         break;
282                 next = decomp->next;
283                 mmap_len = decomp->mmap_len;
284                 munmap(decomp, mmap_len);
285         } while (1);
286 }
287
288 void perf_session__delete(struct perf_session *session)
289 {
290         if (session == NULL)
291                 return;
292         auxtrace__free(session);
293         auxtrace_index__free(&session->auxtrace_index);
294         perf_session__destroy_kernel_maps(session);
295         perf_session__delete_threads(session);
296         perf_session__release_decomp_events(session);
297         perf_env__exit(&session->header.env);
298         machines__exit(&session->machines);
299         if (session->data)
300                 perf_data__close(session->data);
301         free(session);
302 }
303
304 static int process_event_synth_tracing_data_stub(struct perf_session *session
305                                                  __maybe_unused,
306                                                  union perf_event *event
307                                                  __maybe_unused)
308 {
309         dump_printf(": unhandled!\n");
310         return 0;
311 }
312
313 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
314                                          union perf_event *event __maybe_unused,
315                                          struct evlist **pevlist
316                                          __maybe_unused)
317 {
318         dump_printf(": unhandled!\n");
319         return 0;
320 }
321
322 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
323                                                  union perf_event *event __maybe_unused,
324                                                  struct evlist **pevlist
325                                                  __maybe_unused)
326 {
327         if (dump_trace)
328                 perf_event__fprintf_event_update(event, stdout);
329
330         dump_printf(": unhandled!\n");
331         return 0;
332 }
333
334 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
335                                      union perf_event *event __maybe_unused,
336                                      struct perf_sample *sample __maybe_unused,
337                                      struct evsel *evsel __maybe_unused,
338                                      struct machine *machine __maybe_unused)
339 {
340         dump_printf(": unhandled!\n");
341         return 0;
342 }
343
344 static int process_event_stub(struct perf_tool *tool __maybe_unused,
345                               union perf_event *event __maybe_unused,
346                               struct perf_sample *sample __maybe_unused,
347                               struct machine *machine __maybe_unused)
348 {
349         dump_printf(": unhandled!\n");
350         return 0;
351 }
352
353 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
354                                        union perf_event *event __maybe_unused,
355                                        struct ordered_events *oe __maybe_unused)
356 {
357         dump_printf(": unhandled!\n");
358         return 0;
359 }
360
361 static int process_finished_round(struct perf_tool *tool,
362                                   union perf_event *event,
363                                   struct ordered_events *oe);
364
365 static int skipn(int fd, off_t n)
366 {
367         char buf[4096];
368         ssize_t ret;
369
370         while (n > 0) {
371                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
372                 if (ret <= 0)
373                         return ret;
374                 n -= ret;
375         }
376
377         return 0;
378 }
379
380 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
381                                        union perf_event *event)
382 {
383         dump_printf(": unhandled!\n");
384         if (perf_data__is_pipe(session->data))
385                 skipn(perf_data__fd(session->data), event->auxtrace.size);
386         return event->auxtrace.size;
387 }
388
389 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
390                                   union perf_event *event __maybe_unused)
391 {
392         dump_printf(": unhandled!\n");
393         return 0;
394 }
395
396
397 static
398 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
399                                   union perf_event *event __maybe_unused)
400 {
401         if (dump_trace)
402                 perf_event__fprintf_thread_map(event, stdout);
403
404         dump_printf(": unhandled!\n");
405         return 0;
406 }
407
408 static
409 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
410                                union perf_event *event __maybe_unused)
411 {
412         if (dump_trace)
413                 perf_event__fprintf_cpu_map(event, stdout);
414
415         dump_printf(": unhandled!\n");
416         return 0;
417 }
418
419 static
420 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
421                                    union perf_event *event __maybe_unused)
422 {
423         if (dump_trace)
424                 perf_event__fprintf_stat_config(event, stdout);
425
426         dump_printf(": unhandled!\n");
427         return 0;
428 }
429
430 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
431                              union perf_event *event)
432 {
433         if (dump_trace)
434                 perf_event__fprintf_stat(event, stdout);
435
436         dump_printf(": unhandled!\n");
437         return 0;
438 }
439
440 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
441                                    union perf_event *event)
442 {
443         if (dump_trace)
444                 perf_event__fprintf_stat_round(event, stdout);
445
446         dump_printf(": unhandled!\n");
447         return 0;
448 }
449
450 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
451                                                        union perf_event *event __maybe_unused,
452                                                        u64 file_offset __maybe_unused)
453 {
454        dump_printf(": unhandled!\n");
455        return 0;
456 }
457
458 void perf_tool__fill_defaults(struct perf_tool *tool)
459 {
460         if (tool->sample == NULL)
461                 tool->sample = process_event_sample_stub;
462         if (tool->mmap == NULL)
463                 tool->mmap = process_event_stub;
464         if (tool->mmap2 == NULL)
465                 tool->mmap2 = process_event_stub;
466         if (tool->comm == NULL)
467                 tool->comm = process_event_stub;
468         if (tool->namespaces == NULL)
469                 tool->namespaces = process_event_stub;
470         if (tool->fork == NULL)
471                 tool->fork = process_event_stub;
472         if (tool->exit == NULL)
473                 tool->exit = process_event_stub;
474         if (tool->lost == NULL)
475                 tool->lost = perf_event__process_lost;
476         if (tool->lost_samples == NULL)
477                 tool->lost_samples = perf_event__process_lost_samples;
478         if (tool->aux == NULL)
479                 tool->aux = perf_event__process_aux;
480         if (tool->itrace_start == NULL)
481                 tool->itrace_start = perf_event__process_itrace_start;
482         if (tool->context_switch == NULL)
483                 tool->context_switch = perf_event__process_switch;
484         if (tool->ksymbol == NULL)
485                 tool->ksymbol = perf_event__process_ksymbol;
486         if (tool->bpf == NULL)
487                 tool->bpf = perf_event__process_bpf;
488         if (tool->read == NULL)
489                 tool->read = process_event_sample_stub;
490         if (tool->throttle == NULL)
491                 tool->throttle = process_event_stub;
492         if (tool->unthrottle == NULL)
493                 tool->unthrottle = process_event_stub;
494         if (tool->attr == NULL)
495                 tool->attr = process_event_synth_attr_stub;
496         if (tool->event_update == NULL)
497                 tool->event_update = process_event_synth_event_update_stub;
498         if (tool->tracing_data == NULL)
499                 tool->tracing_data = process_event_synth_tracing_data_stub;
500         if (tool->build_id == NULL)
501                 tool->build_id = process_event_op2_stub;
502         if (tool->finished_round == NULL) {
503                 if (tool->ordered_events)
504                         tool->finished_round = process_finished_round;
505                 else
506                         tool->finished_round = process_finished_round_stub;
507         }
508         if (tool->id_index == NULL)
509                 tool->id_index = process_event_op2_stub;
510         if (tool->auxtrace_info == NULL)
511                 tool->auxtrace_info = process_event_op2_stub;
512         if (tool->auxtrace == NULL)
513                 tool->auxtrace = process_event_auxtrace_stub;
514         if (tool->auxtrace_error == NULL)
515                 tool->auxtrace_error = process_event_op2_stub;
516         if (tool->thread_map == NULL)
517                 tool->thread_map = process_event_thread_map_stub;
518         if (tool->cpu_map == NULL)
519                 tool->cpu_map = process_event_cpu_map_stub;
520         if (tool->stat_config == NULL)
521                 tool->stat_config = process_event_stat_config_stub;
522         if (tool->stat == NULL)
523                 tool->stat = process_stat_stub;
524         if (tool->stat_round == NULL)
525                 tool->stat_round = process_stat_round_stub;
526         if (tool->time_conv == NULL)
527                 tool->time_conv = process_event_op2_stub;
528         if (tool->feature == NULL)
529                 tool->feature = process_event_op2_stub;
530         if (tool->compressed == NULL)
531                 tool->compressed = perf_session__process_compressed_event;
532 }
533
534 static void swap_sample_id_all(union perf_event *event, void *data)
535 {
536         void *end = (void *) event + event->header.size;
537         int size = end - data;
538
539         BUG_ON(size % sizeof(u64));
540         mem_bswap_64(data, size);
541 }
542
543 static void perf_event__all64_swap(union perf_event *event,
544                                    bool sample_id_all __maybe_unused)
545 {
546         struct perf_event_header *hdr = &event->header;
547         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
548 }
549
550 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
551 {
552         event->comm.pid = bswap_32(event->comm.pid);
553         event->comm.tid = bswap_32(event->comm.tid);
554
555         if (sample_id_all) {
556                 void *data = &event->comm.comm;
557
558                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
559                 swap_sample_id_all(event, data);
560         }
561 }
562
563 static void perf_event__mmap_swap(union perf_event *event,
564                                   bool sample_id_all)
565 {
566         event->mmap.pid   = bswap_32(event->mmap.pid);
567         event->mmap.tid   = bswap_32(event->mmap.tid);
568         event->mmap.start = bswap_64(event->mmap.start);
569         event->mmap.len   = bswap_64(event->mmap.len);
570         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
571
572         if (sample_id_all) {
573                 void *data = &event->mmap.filename;
574
575                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
576                 swap_sample_id_all(event, data);
577         }
578 }
579
580 static void perf_event__mmap2_swap(union perf_event *event,
581                                   bool sample_id_all)
582 {
583         event->mmap2.pid   = bswap_32(event->mmap2.pid);
584         event->mmap2.tid   = bswap_32(event->mmap2.tid);
585         event->mmap2.start = bswap_64(event->mmap2.start);
586         event->mmap2.len   = bswap_64(event->mmap2.len);
587         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
588         event->mmap2.maj   = bswap_32(event->mmap2.maj);
589         event->mmap2.min   = bswap_32(event->mmap2.min);
590         event->mmap2.ino   = bswap_64(event->mmap2.ino);
591
592         if (sample_id_all) {
593                 void *data = &event->mmap2.filename;
594
595                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
596                 swap_sample_id_all(event, data);
597         }
598 }
599 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
600 {
601         event->fork.pid  = bswap_32(event->fork.pid);
602         event->fork.tid  = bswap_32(event->fork.tid);
603         event->fork.ppid = bswap_32(event->fork.ppid);
604         event->fork.ptid = bswap_32(event->fork.ptid);
605         event->fork.time = bswap_64(event->fork.time);
606
607         if (sample_id_all)
608                 swap_sample_id_all(event, &event->fork + 1);
609 }
610
611 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
612 {
613         event->read.pid          = bswap_32(event->read.pid);
614         event->read.tid          = bswap_32(event->read.tid);
615         event->read.value        = bswap_64(event->read.value);
616         event->read.time_enabled = bswap_64(event->read.time_enabled);
617         event->read.time_running = bswap_64(event->read.time_running);
618         event->read.id           = bswap_64(event->read.id);
619
620         if (sample_id_all)
621                 swap_sample_id_all(event, &event->read + 1);
622 }
623
624 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
625 {
626         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
627         event->aux.aux_size   = bswap_64(event->aux.aux_size);
628         event->aux.flags      = bswap_64(event->aux.flags);
629
630         if (sample_id_all)
631                 swap_sample_id_all(event, &event->aux + 1);
632 }
633
634 static void perf_event__itrace_start_swap(union perf_event *event,
635                                           bool sample_id_all)
636 {
637         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
638         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
639
640         if (sample_id_all)
641                 swap_sample_id_all(event, &event->itrace_start + 1);
642 }
643
644 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
645 {
646         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
647                 event->context_switch.next_prev_pid =
648                                 bswap_32(event->context_switch.next_prev_pid);
649                 event->context_switch.next_prev_tid =
650                                 bswap_32(event->context_switch.next_prev_tid);
651         }
652
653         if (sample_id_all)
654                 swap_sample_id_all(event, &event->context_switch + 1);
655 }
656
657 static void perf_event__throttle_swap(union perf_event *event,
658                                       bool sample_id_all)
659 {
660         event->throttle.time      = bswap_64(event->throttle.time);
661         event->throttle.id        = bswap_64(event->throttle.id);
662         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
663
664         if (sample_id_all)
665                 swap_sample_id_all(event, &event->throttle + 1);
666 }
667
668 static void perf_event__namespaces_swap(union perf_event *event,
669                                         bool sample_id_all)
670 {
671         u64 i;
672
673         event->namespaces.pid           = bswap_32(event->namespaces.pid);
674         event->namespaces.tid           = bswap_32(event->namespaces.tid);
675         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
676
677         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
678                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
679
680                 ns->dev = bswap_64(ns->dev);
681                 ns->ino = bswap_64(ns->ino);
682         }
683
684         if (sample_id_all)
685                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
686 }
687
688 static u8 revbyte(u8 b)
689 {
690         int rev = (b >> 4) | ((b & 0xf) << 4);
691         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
692         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
693         return (u8) rev;
694 }
695
696 /*
697  * XXX this is hack in attempt to carry flags bitfield
698  * through endian village. ABI says:
699  *
700  * Bit-fields are allocated from right to left (least to most significant)
701  * on little-endian implementations and from left to right (most to least
702  * significant) on big-endian implementations.
703  *
704  * The above seems to be byte specific, so we need to reverse each
705  * byte of the bitfield. 'Internet' also says this might be implementation
706  * specific and we probably need proper fix and carry perf_event_attr
707  * bitfield flags in separate data file FEAT_ section. Thought this seems
708  * to work for now.
709  */
710 static void swap_bitfield(u8 *p, unsigned len)
711 {
712         unsigned i;
713
714         for (i = 0; i < len; i++) {
715                 *p = revbyte(*p);
716                 p++;
717         }
718 }
719
720 /* exported for swapping attributes in file header */
721 void perf_event__attr_swap(struct perf_event_attr *attr)
722 {
723         attr->type              = bswap_32(attr->type);
724         attr->size              = bswap_32(attr->size);
725
726 #define bswap_safe(f, n)                                        \
727         (attr->size > (offsetof(struct perf_event_attr, f) +    \
728                        sizeof(attr->f) * (n)))
729 #define bswap_field(f, sz)                      \
730 do {                                            \
731         if (bswap_safe(f, 0))                   \
732                 attr->f = bswap_##sz(attr->f);  \
733 } while(0)
734 #define bswap_field_16(f) bswap_field(f, 16)
735 #define bswap_field_32(f) bswap_field(f, 32)
736 #define bswap_field_64(f) bswap_field(f, 64)
737
738         bswap_field_64(config);
739         bswap_field_64(sample_period);
740         bswap_field_64(sample_type);
741         bswap_field_64(read_format);
742         bswap_field_32(wakeup_events);
743         bswap_field_32(bp_type);
744         bswap_field_64(bp_addr);
745         bswap_field_64(bp_len);
746         bswap_field_64(branch_sample_type);
747         bswap_field_64(sample_regs_user);
748         bswap_field_32(sample_stack_user);
749         bswap_field_32(aux_watermark);
750         bswap_field_16(sample_max_stack);
751
752         /*
753          * After read_format are bitfields. Check read_format because
754          * we are unable to use offsetof on bitfield.
755          */
756         if (bswap_safe(read_format, 1))
757                 swap_bitfield((u8 *) (&attr->read_format + 1),
758                               sizeof(u64));
759 #undef bswap_field_64
760 #undef bswap_field_32
761 #undef bswap_field
762 #undef bswap_safe
763 }
764
765 static void perf_event__hdr_attr_swap(union perf_event *event,
766                                       bool sample_id_all __maybe_unused)
767 {
768         size_t size;
769
770         perf_event__attr_swap(&event->attr.attr);
771
772         size = event->header.size;
773         size -= (void *)&event->attr.id - (void *)event;
774         mem_bswap_64(event->attr.id, size);
775 }
776
777 static void perf_event__event_update_swap(union perf_event *event,
778                                           bool sample_id_all __maybe_unused)
779 {
780         event->event_update.type = bswap_64(event->event_update.type);
781         event->event_update.id   = bswap_64(event->event_update.id);
782 }
783
784 static void perf_event__event_type_swap(union perf_event *event,
785                                         bool sample_id_all __maybe_unused)
786 {
787         event->event_type.event_type.event_id =
788                 bswap_64(event->event_type.event_type.event_id);
789 }
790
791 static void perf_event__tracing_data_swap(union perf_event *event,
792                                           bool sample_id_all __maybe_unused)
793 {
794         event->tracing_data.size = bswap_32(event->tracing_data.size);
795 }
796
797 static void perf_event__auxtrace_info_swap(union perf_event *event,
798                                            bool sample_id_all __maybe_unused)
799 {
800         size_t size;
801
802         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
803
804         size = event->header.size;
805         size -= (void *)&event->auxtrace_info.priv - (void *)event;
806         mem_bswap_64(event->auxtrace_info.priv, size);
807 }
808
809 static void perf_event__auxtrace_swap(union perf_event *event,
810                                       bool sample_id_all __maybe_unused)
811 {
812         event->auxtrace.size      = bswap_64(event->auxtrace.size);
813         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
814         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
815         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
816         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
817         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
818 }
819
820 static void perf_event__auxtrace_error_swap(union perf_event *event,
821                                             bool sample_id_all __maybe_unused)
822 {
823         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
824         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
825         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
826         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
827         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
828         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
829         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
830         if (event->auxtrace_error.fmt)
831                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
832 }
833
834 static void perf_event__thread_map_swap(union perf_event *event,
835                                         bool sample_id_all __maybe_unused)
836 {
837         unsigned i;
838
839         event->thread_map.nr = bswap_64(event->thread_map.nr);
840
841         for (i = 0; i < event->thread_map.nr; i++)
842                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
843 }
844
845 static void perf_event__cpu_map_swap(union perf_event *event,
846                                      bool sample_id_all __maybe_unused)
847 {
848         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
849         struct cpu_map_entries *cpus;
850         struct perf_record_record_cpu_map *mask;
851         unsigned i;
852
853         data->type = bswap_64(data->type);
854
855         switch (data->type) {
856         case PERF_CPU_MAP__CPUS:
857                 cpus = (struct cpu_map_entries *)data->data;
858
859                 cpus->nr = bswap_16(cpus->nr);
860
861                 for (i = 0; i < cpus->nr; i++)
862                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
863                 break;
864         case PERF_CPU_MAP__MASK:
865                 mask = (struct perf_record_record_cpu_map *)data->data;
866
867                 mask->nr = bswap_16(mask->nr);
868                 mask->long_size = bswap_16(mask->long_size);
869
870                 switch (mask->long_size) {
871                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
872                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
873                 default:
874                         pr_err("cpu_map swap: unsupported long size\n");
875                 }
876         default:
877                 break;
878         }
879 }
880
881 static void perf_event__stat_config_swap(union perf_event *event,
882                                          bool sample_id_all __maybe_unused)
883 {
884         u64 size;
885
886         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
887         size += 1; /* nr item itself */
888         mem_bswap_64(&event->stat_config.nr, size);
889 }
890
891 static void perf_event__stat_swap(union perf_event *event,
892                                   bool sample_id_all __maybe_unused)
893 {
894         event->stat.id     = bswap_64(event->stat.id);
895         event->stat.thread = bswap_32(event->stat.thread);
896         event->stat.cpu    = bswap_32(event->stat.cpu);
897         event->stat.val    = bswap_64(event->stat.val);
898         event->stat.ena    = bswap_64(event->stat.ena);
899         event->stat.run    = bswap_64(event->stat.run);
900 }
901
902 static void perf_event__stat_round_swap(union perf_event *event,
903                                         bool sample_id_all __maybe_unused)
904 {
905         event->stat_round.type = bswap_64(event->stat_round.type);
906         event->stat_round.time = bswap_64(event->stat_round.time);
907 }
908
909 typedef void (*perf_event__swap_op)(union perf_event *event,
910                                     bool sample_id_all);
911
912 static perf_event__swap_op perf_event__swap_ops[] = {
913         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
914         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
915         [PERF_RECORD_COMM]                = perf_event__comm_swap,
916         [PERF_RECORD_FORK]                = perf_event__task_swap,
917         [PERF_RECORD_EXIT]                = perf_event__task_swap,
918         [PERF_RECORD_LOST]                = perf_event__all64_swap,
919         [PERF_RECORD_READ]                = perf_event__read_swap,
920         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
921         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
922         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
923         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
924         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
925         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
926         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
927         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
928         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
929         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
930         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
931         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
932         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
933         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
934         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
935         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
936         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
937         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
938         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
939         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
940         [PERF_RECORD_STAT]                = perf_event__stat_swap,
941         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
942         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
943         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
944         [PERF_RECORD_HEADER_MAX]          = NULL,
945 };
946
947 /*
948  * When perf record finishes a pass on every buffers, it records this pseudo
949  * event.
950  * We record the max timestamp t found in the pass n.
951  * Assuming these timestamps are monotonic across cpus, we know that if
952  * a buffer still has events with timestamps below t, they will be all
953  * available and then read in the pass n + 1.
954  * Hence when we start to read the pass n + 2, we can safely flush every
955  * events with timestamps below t.
956  *
957  *    ============ PASS n =================
958  *       CPU 0         |   CPU 1
959  *                     |
960  *    cnt1 timestamps  |   cnt2 timestamps
961  *          1          |         2
962  *          2          |         3
963  *          -          |         4  <--- max recorded
964  *
965  *    ============ PASS n + 1 ==============
966  *       CPU 0         |   CPU 1
967  *                     |
968  *    cnt1 timestamps  |   cnt2 timestamps
969  *          3          |         5
970  *          4          |         6
971  *          5          |         7 <---- max recorded
972  *
973  *      Flush every events below timestamp 4
974  *
975  *    ============ PASS n + 2 ==============
976  *       CPU 0         |   CPU 1
977  *                     |
978  *    cnt1 timestamps  |   cnt2 timestamps
979  *          6          |         8
980  *          7          |         9
981  *          -          |         10
982  *
983  *      Flush every events below timestamp 7
984  *      etc...
985  */
986 static int process_finished_round(struct perf_tool *tool __maybe_unused,
987                                   union perf_event *event __maybe_unused,
988                                   struct ordered_events *oe)
989 {
990         if (dump_trace)
991                 fprintf(stdout, "\n");
992         return ordered_events__flush(oe, OE_FLUSH__ROUND);
993 }
994
995 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
996                               u64 timestamp, u64 file_offset)
997 {
998         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
999 }
1000
1001 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1002 {
1003         struct ip_callchain *callchain = sample->callchain;
1004         struct branch_stack *lbr_stack = sample->branch_stack;
1005         u64 kernel_callchain_nr = callchain->nr;
1006         unsigned int i;
1007
1008         for (i = 0; i < kernel_callchain_nr; i++) {
1009                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1010                         break;
1011         }
1012
1013         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1014                 u64 total_nr;
1015                 /*
1016                  * LBR callstack can only get user call chain,
1017                  * i is kernel call chain number,
1018                  * 1 is PERF_CONTEXT_USER.
1019                  *
1020                  * The user call chain is stored in LBR registers.
1021                  * LBR are pair registers. The caller is stored
1022                  * in "from" register, while the callee is stored
1023                  * in "to" register.
1024                  * For example, there is a call stack
1025                  * "A"->"B"->"C"->"D".
1026                  * The LBR registers will recorde like
1027                  * "C"->"D", "B"->"C", "A"->"B".
1028                  * So only the first "to" register and all "from"
1029                  * registers are needed to construct the whole stack.
1030                  */
1031                 total_nr = i + 1 + lbr_stack->nr + 1;
1032                 kernel_callchain_nr = i + 1;
1033
1034                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1035
1036                 for (i = 0; i < kernel_callchain_nr; i++)
1037                         printf("..... %2d: %016" PRIx64 "\n",
1038                                i, callchain->ips[i]);
1039
1040                 printf("..... %2d: %016" PRIx64 "\n",
1041                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1042                 for (i = 0; i < lbr_stack->nr; i++)
1043                         printf("..... %2d: %016" PRIx64 "\n",
1044                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1045         }
1046 }
1047
1048 static void callchain__printf(struct evsel *evsel,
1049                               struct perf_sample *sample)
1050 {
1051         unsigned int i;
1052         struct ip_callchain *callchain = sample->callchain;
1053
1054         if (perf_evsel__has_branch_callstack(evsel))
1055                 callchain__lbr_callstack_printf(sample);
1056
1057         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1058
1059         for (i = 0; i < callchain->nr; i++)
1060                 printf("..... %2d: %016" PRIx64 "\n",
1061                        i, callchain->ips[i]);
1062 }
1063
1064 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1065 {
1066         uint64_t i;
1067
1068         printf("%s: nr:%" PRIu64 "\n",
1069                 !callstack ? "... branch stack" : "... branch callstack",
1070                 sample->branch_stack->nr);
1071
1072         for (i = 0; i < sample->branch_stack->nr; i++) {
1073                 struct branch_entry *e = &sample->branch_stack->entries[i];
1074
1075                 if (!callstack) {
1076                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1077                                 i, e->from, e->to,
1078                                 (unsigned short)e->flags.cycles,
1079                                 e->flags.mispred ? "M" : " ",
1080                                 e->flags.predicted ? "P" : " ",
1081                                 e->flags.abort ? "A" : " ",
1082                                 e->flags.in_tx ? "T" : " ",
1083                                 (unsigned)e->flags.reserved);
1084                 } else {
1085                         printf("..... %2"PRIu64": %016" PRIx64 "\n",
1086                                 i, i > 0 ? e->from : e->to);
1087                 }
1088         }
1089 }
1090
1091 static void regs_dump__printf(u64 mask, u64 *regs)
1092 {
1093         unsigned rid, i = 0;
1094
1095         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1096                 u64 val = regs[i++];
1097
1098                 printf(".... %-5s 0x%" PRIx64 "\n",
1099                        perf_reg_name(rid), val);
1100         }
1101 }
1102
1103 static const char *regs_abi[] = {
1104         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1105         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1106         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1107 };
1108
1109 static inline const char *regs_dump_abi(struct regs_dump *d)
1110 {
1111         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1112                 return "unknown";
1113
1114         return regs_abi[d->abi];
1115 }
1116
1117 static void regs__printf(const char *type, struct regs_dump *regs)
1118 {
1119         u64 mask = regs->mask;
1120
1121         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1122                type,
1123                mask,
1124                regs_dump_abi(regs));
1125
1126         regs_dump__printf(mask, regs->regs);
1127 }
1128
1129 static void regs_user__printf(struct perf_sample *sample)
1130 {
1131         struct regs_dump *user_regs = &sample->user_regs;
1132
1133         if (user_regs->regs)
1134                 regs__printf("user", user_regs);
1135 }
1136
1137 static void regs_intr__printf(struct perf_sample *sample)
1138 {
1139         struct regs_dump *intr_regs = &sample->intr_regs;
1140
1141         if (intr_regs->regs)
1142                 regs__printf("intr", intr_regs);
1143 }
1144
1145 static void stack_user__printf(struct stack_dump *dump)
1146 {
1147         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1148                dump->size, dump->offset);
1149 }
1150
1151 static void perf_evlist__print_tstamp(struct evlist *evlist,
1152                                        union perf_event *event,
1153                                        struct perf_sample *sample)
1154 {
1155         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1156
1157         if (event->header.type != PERF_RECORD_SAMPLE &&
1158             !perf_evlist__sample_id_all(evlist)) {
1159                 fputs("-1 -1 ", stdout);
1160                 return;
1161         }
1162
1163         if ((sample_type & PERF_SAMPLE_CPU))
1164                 printf("%u ", sample->cpu);
1165
1166         if (sample_type & PERF_SAMPLE_TIME)
1167                 printf("%" PRIu64 " ", sample->time);
1168 }
1169
1170 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1171 {
1172         printf("... sample_read:\n");
1173
1174         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1175                 printf("...... time enabled %016" PRIx64 "\n",
1176                        sample->read.time_enabled);
1177
1178         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1179                 printf("...... time running %016" PRIx64 "\n",
1180                        sample->read.time_running);
1181
1182         if (read_format & PERF_FORMAT_GROUP) {
1183                 u64 i;
1184
1185                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1186
1187                 for (i = 0; i < sample->read.group.nr; i++) {
1188                         struct sample_read_value *value;
1189
1190                         value = &sample->read.group.values[i];
1191                         printf("..... id %016" PRIx64
1192                                ", value %016" PRIx64 "\n",
1193                                value->id, value->value);
1194                 }
1195         } else
1196                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1197                         sample->read.one.id, sample->read.one.value);
1198 }
1199
1200 static void dump_event(struct evlist *evlist, union perf_event *event,
1201                        u64 file_offset, struct perf_sample *sample)
1202 {
1203         if (!dump_trace)
1204                 return;
1205
1206         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1207                file_offset, event->header.size, event->header.type);
1208
1209         trace_event(event);
1210         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1211                 evlist->trace_event_sample_raw(evlist, event, sample);
1212
1213         if (sample)
1214                 perf_evlist__print_tstamp(evlist, event, sample);
1215
1216         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1217                event->header.size, perf_event__name(event->header.type));
1218 }
1219
1220 static void dump_sample(struct evsel *evsel, union perf_event *event,
1221                         struct perf_sample *sample)
1222 {
1223         u64 sample_type;
1224
1225         if (!dump_trace)
1226                 return;
1227
1228         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1229                event->header.misc, sample->pid, sample->tid, sample->ip,
1230                sample->period, sample->addr);
1231
1232         sample_type = evsel->core.attr.sample_type;
1233
1234         if (evsel__has_callchain(evsel))
1235                 callchain__printf(evsel, sample);
1236
1237         if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1238                 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel));
1239
1240         if (sample_type & PERF_SAMPLE_REGS_USER)
1241                 regs_user__printf(sample);
1242
1243         if (sample_type & PERF_SAMPLE_REGS_INTR)
1244                 regs_intr__printf(sample);
1245
1246         if (sample_type & PERF_SAMPLE_STACK_USER)
1247                 stack_user__printf(&sample->user_stack);
1248
1249         if (sample_type & PERF_SAMPLE_WEIGHT)
1250                 printf("... weight: %" PRIu64 "\n", sample->weight);
1251
1252         if (sample_type & PERF_SAMPLE_DATA_SRC)
1253                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1254
1255         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1256                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1257
1258         if (sample_type & PERF_SAMPLE_TRANSACTION)
1259                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1260
1261         if (sample_type & PERF_SAMPLE_READ)
1262                 sample_read__printf(sample, evsel->core.attr.read_format);
1263 }
1264
1265 static void dump_read(struct evsel *evsel, union perf_event *event)
1266 {
1267         struct perf_record_read *read_event = &event->read;
1268         u64 read_format;
1269
1270         if (!dump_trace)
1271                 return;
1272
1273         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1274                perf_evsel__name(evsel),
1275                event->read.value);
1276
1277         if (!evsel)
1278                 return;
1279
1280         read_format = evsel->core.attr.read_format;
1281
1282         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1283                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1284
1285         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1286                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1287
1288         if (read_format & PERF_FORMAT_ID)
1289                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1290 }
1291
1292 static struct machine *machines__find_for_cpumode(struct machines *machines,
1293                                                union perf_event *event,
1294                                                struct perf_sample *sample)
1295 {
1296         struct machine *machine;
1297
1298         if (perf_guest &&
1299             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1300              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1301                 u32 pid;
1302
1303                 if (event->header.type == PERF_RECORD_MMAP
1304                     || event->header.type == PERF_RECORD_MMAP2)
1305                         pid = event->mmap.pid;
1306                 else
1307                         pid = sample->pid;
1308
1309                 machine = machines__find(machines, pid);
1310                 if (!machine)
1311                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1312                 return machine;
1313         }
1314
1315         return &machines->host;
1316 }
1317
1318 static int deliver_sample_value(struct evlist *evlist,
1319                                 struct perf_tool *tool,
1320                                 union perf_event *event,
1321                                 struct perf_sample *sample,
1322                                 struct sample_read_value *v,
1323                                 struct machine *machine)
1324 {
1325         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1326         struct evsel *evsel;
1327
1328         if (sid) {
1329                 sample->id     = v->id;
1330                 sample->period = v->value - sid->period;
1331                 sid->period    = v->value;
1332         }
1333
1334         if (!sid || sid->evsel == NULL) {
1335                 ++evlist->stats.nr_unknown_id;
1336                 return 0;
1337         }
1338
1339         /*
1340          * There's no reason to deliver sample
1341          * for zero period, bail out.
1342          */
1343         if (!sample->period)
1344                 return 0;
1345
1346         evsel = container_of(sid->evsel, struct evsel, core);
1347         return tool->sample(tool, event, sample, evsel, machine);
1348 }
1349
1350 static int deliver_sample_group(struct evlist *evlist,
1351                                 struct perf_tool *tool,
1352                                 union  perf_event *event,
1353                                 struct perf_sample *sample,
1354                                 struct machine *machine)
1355 {
1356         int ret = -EINVAL;
1357         u64 i;
1358
1359         for (i = 0; i < sample->read.group.nr; i++) {
1360                 ret = deliver_sample_value(evlist, tool, event, sample,
1361                                            &sample->read.group.values[i],
1362                                            machine);
1363                 if (ret)
1364                         break;
1365         }
1366
1367         return ret;
1368 }
1369
1370 static int
1371  perf_evlist__deliver_sample(struct evlist *evlist,
1372                              struct perf_tool *tool,
1373                              union  perf_event *event,
1374                              struct perf_sample *sample,
1375                              struct evsel *evsel,
1376                              struct machine *machine)
1377 {
1378         /* We know evsel != NULL. */
1379         u64 sample_type = evsel->core.attr.sample_type;
1380         u64 read_format = evsel->core.attr.read_format;
1381
1382         /* Standard sample delivery. */
1383         if (!(sample_type & PERF_SAMPLE_READ))
1384                 return tool->sample(tool, event, sample, evsel, machine);
1385
1386         /* For PERF_SAMPLE_READ we have either single or group mode. */
1387         if (read_format & PERF_FORMAT_GROUP)
1388                 return deliver_sample_group(evlist, tool, event, sample,
1389                                             machine);
1390         else
1391                 return deliver_sample_value(evlist, tool, event, sample,
1392                                             &sample->read.one, machine);
1393 }
1394
1395 static int machines__deliver_event(struct machines *machines,
1396                                    struct evlist *evlist,
1397                                    union perf_event *event,
1398                                    struct perf_sample *sample,
1399                                    struct perf_tool *tool, u64 file_offset)
1400 {
1401         struct evsel *evsel;
1402         struct machine *machine;
1403
1404         dump_event(evlist, event, file_offset, sample);
1405
1406         evsel = perf_evlist__id2evsel(evlist, sample->id);
1407
1408         machine = machines__find_for_cpumode(machines, event, sample);
1409
1410         switch (event->header.type) {
1411         case PERF_RECORD_SAMPLE:
1412                 if (evsel == NULL) {
1413                         ++evlist->stats.nr_unknown_id;
1414                         return 0;
1415                 }
1416                 dump_sample(evsel, event, sample);
1417                 if (machine == NULL) {
1418                         ++evlist->stats.nr_unprocessable_samples;
1419                         return 0;
1420                 }
1421                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1422         case PERF_RECORD_MMAP:
1423                 return tool->mmap(tool, event, sample, machine);
1424         case PERF_RECORD_MMAP2:
1425                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1426                         ++evlist->stats.nr_proc_map_timeout;
1427                 return tool->mmap2(tool, event, sample, machine);
1428         case PERF_RECORD_COMM:
1429                 return tool->comm(tool, event, sample, machine);
1430         case PERF_RECORD_NAMESPACES:
1431                 return tool->namespaces(tool, event, sample, machine);
1432         case PERF_RECORD_FORK:
1433                 return tool->fork(tool, event, sample, machine);
1434         case PERF_RECORD_EXIT:
1435                 return tool->exit(tool, event, sample, machine);
1436         case PERF_RECORD_LOST:
1437                 if (tool->lost == perf_event__process_lost)
1438                         evlist->stats.total_lost += event->lost.lost;
1439                 return tool->lost(tool, event, sample, machine);
1440         case PERF_RECORD_LOST_SAMPLES:
1441                 if (tool->lost_samples == perf_event__process_lost_samples)
1442                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1443                 return tool->lost_samples(tool, event, sample, machine);
1444         case PERF_RECORD_READ:
1445                 dump_read(evsel, event);
1446                 return tool->read(tool, event, sample, evsel, machine);
1447         case PERF_RECORD_THROTTLE:
1448                 return tool->throttle(tool, event, sample, machine);
1449         case PERF_RECORD_UNTHROTTLE:
1450                 return tool->unthrottle(tool, event, sample, machine);
1451         case PERF_RECORD_AUX:
1452                 if (tool->aux == perf_event__process_aux) {
1453                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1454                                 evlist->stats.total_aux_lost += 1;
1455                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1456                                 evlist->stats.total_aux_partial += 1;
1457                 }
1458                 return tool->aux(tool, event, sample, machine);
1459         case PERF_RECORD_ITRACE_START:
1460                 return tool->itrace_start(tool, event, sample, machine);
1461         case PERF_RECORD_SWITCH:
1462         case PERF_RECORD_SWITCH_CPU_WIDE:
1463                 return tool->context_switch(tool, event, sample, machine);
1464         case PERF_RECORD_KSYMBOL:
1465                 return tool->ksymbol(tool, event, sample, machine);
1466         case PERF_RECORD_BPF_EVENT:
1467                 return tool->bpf(tool, event, sample, machine);
1468         default:
1469                 ++evlist->stats.nr_unknown_events;
1470                 return -1;
1471         }
1472 }
1473
1474 static int perf_session__deliver_event(struct perf_session *session,
1475                                        union perf_event *event,
1476                                        struct perf_tool *tool,
1477                                        u64 file_offset)
1478 {
1479         struct perf_sample sample;
1480         int ret;
1481
1482         ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1483         if (ret) {
1484                 pr_err("Can't parse sample, err = %d\n", ret);
1485                 return ret;
1486         }
1487
1488         ret = auxtrace__process_event(session, event, &sample, tool);
1489         if (ret < 0)
1490                 return ret;
1491         if (ret > 0)
1492                 return 0;
1493
1494         return machines__deliver_event(&session->machines, session->evlist,
1495                                        event, &sample, tool, file_offset);
1496 }
1497
1498 static s64 perf_session__process_user_event(struct perf_session *session,
1499                                             union perf_event *event,
1500                                             u64 file_offset)
1501 {
1502         struct ordered_events *oe = &session->ordered_events;
1503         struct perf_tool *tool = session->tool;
1504         struct perf_sample sample = { .time = 0, };
1505         int fd = perf_data__fd(session->data);
1506         int err;
1507
1508         if (event->header.type != PERF_RECORD_COMPRESSED ||
1509             tool->compressed == perf_session__process_compressed_event_stub)
1510                 dump_event(session->evlist, event, file_offset, &sample);
1511
1512         /* These events are processed right away */
1513         switch (event->header.type) {
1514         case PERF_RECORD_HEADER_ATTR:
1515                 err = tool->attr(tool, event, &session->evlist);
1516                 if (err == 0) {
1517                         perf_session__set_id_hdr_size(session);
1518                         perf_session__set_comm_exec(session);
1519                 }
1520                 return err;
1521         case PERF_RECORD_EVENT_UPDATE:
1522                 return tool->event_update(tool, event, &session->evlist);
1523         case PERF_RECORD_HEADER_EVENT_TYPE:
1524                 /*
1525                  * Depreceated, but we need to handle it for sake
1526                  * of old data files create in pipe mode.
1527                  */
1528                 return 0;
1529         case PERF_RECORD_HEADER_TRACING_DATA:
1530                 /* setup for reading amidst mmap */
1531                 lseek(fd, file_offset, SEEK_SET);
1532                 return tool->tracing_data(session, event);
1533         case PERF_RECORD_HEADER_BUILD_ID:
1534                 return tool->build_id(session, event);
1535         case PERF_RECORD_FINISHED_ROUND:
1536                 return tool->finished_round(tool, event, oe);
1537         case PERF_RECORD_ID_INDEX:
1538                 return tool->id_index(session, event);
1539         case PERF_RECORD_AUXTRACE_INFO:
1540                 return tool->auxtrace_info(session, event);
1541         case PERF_RECORD_AUXTRACE:
1542                 /* setup for reading amidst mmap */
1543                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1544                 return tool->auxtrace(session, event);
1545         case PERF_RECORD_AUXTRACE_ERROR:
1546                 perf_session__auxtrace_error_inc(session, event);
1547                 return tool->auxtrace_error(session, event);
1548         case PERF_RECORD_THREAD_MAP:
1549                 return tool->thread_map(session, event);
1550         case PERF_RECORD_CPU_MAP:
1551                 return tool->cpu_map(session, event);
1552         case PERF_RECORD_STAT_CONFIG:
1553                 return tool->stat_config(session, event);
1554         case PERF_RECORD_STAT:
1555                 return tool->stat(session, event);
1556         case PERF_RECORD_STAT_ROUND:
1557                 return tool->stat_round(session, event);
1558         case PERF_RECORD_TIME_CONV:
1559                 session->time_conv = event->time_conv;
1560                 return tool->time_conv(session, event);
1561         case PERF_RECORD_HEADER_FEATURE:
1562                 return tool->feature(session, event);
1563         case PERF_RECORD_COMPRESSED:
1564                 err = tool->compressed(session, event, file_offset);
1565                 if (err)
1566                         dump_event(session->evlist, event, file_offset, &sample);
1567                 return err;
1568         default:
1569                 return -EINVAL;
1570         }
1571 }
1572
1573 int perf_session__deliver_synth_event(struct perf_session *session,
1574                                       union perf_event *event,
1575                                       struct perf_sample *sample)
1576 {
1577         struct evlist *evlist = session->evlist;
1578         struct perf_tool *tool = session->tool;
1579
1580         events_stats__inc(&evlist->stats, event->header.type);
1581
1582         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1583                 return perf_session__process_user_event(session, event, 0);
1584
1585         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1586 }
1587
1588 static void event_swap(union perf_event *event, bool sample_id_all)
1589 {
1590         perf_event__swap_op swap;
1591
1592         swap = perf_event__swap_ops[event->header.type];
1593         if (swap)
1594                 swap(event, sample_id_all);
1595 }
1596
1597 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1598                              void *buf, size_t buf_sz,
1599                              union perf_event **event_ptr,
1600                              struct perf_sample *sample)
1601 {
1602         union perf_event *event;
1603         size_t hdr_sz, rest;
1604         int fd;
1605
1606         if (session->one_mmap && !session->header.needs_swap) {
1607                 event = file_offset - session->one_mmap_offset +
1608                         session->one_mmap_addr;
1609                 goto out_parse_sample;
1610         }
1611
1612         if (perf_data__is_pipe(session->data))
1613                 return -1;
1614
1615         fd = perf_data__fd(session->data);
1616         hdr_sz = sizeof(struct perf_event_header);
1617
1618         if (buf_sz < hdr_sz)
1619                 return -1;
1620
1621         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1622             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1623                 return -1;
1624
1625         event = (union perf_event *)buf;
1626
1627         if (session->header.needs_swap)
1628                 perf_event_header__bswap(&event->header);
1629
1630         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1631                 return -1;
1632
1633         rest = event->header.size - hdr_sz;
1634
1635         if (readn(fd, buf, rest) != (ssize_t)rest)
1636                 return -1;
1637
1638         if (session->header.needs_swap)
1639                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1640
1641 out_parse_sample:
1642
1643         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1644             perf_evlist__parse_sample(session->evlist, event, sample))
1645                 return -1;
1646
1647         *event_ptr = event;
1648
1649         return 0;
1650 }
1651
1652 static s64 perf_session__process_event(struct perf_session *session,
1653                                        union perf_event *event, u64 file_offset)
1654 {
1655         struct evlist *evlist = session->evlist;
1656         struct perf_tool *tool = session->tool;
1657         int ret;
1658
1659         if (session->header.needs_swap)
1660                 event_swap(event, perf_evlist__sample_id_all(evlist));
1661
1662         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1663                 return -EINVAL;
1664
1665         events_stats__inc(&evlist->stats, event->header.type);
1666
1667         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1668                 return perf_session__process_user_event(session, event, file_offset);
1669
1670         if (tool->ordered_events) {
1671                 u64 timestamp = -1ULL;
1672
1673                 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1674                 if (ret && ret != -1)
1675                         return ret;
1676
1677                 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1678                 if (ret != -ETIME)
1679                         return ret;
1680         }
1681
1682         return perf_session__deliver_event(session, event, tool, file_offset);
1683 }
1684
1685 void perf_event_header__bswap(struct perf_event_header *hdr)
1686 {
1687         hdr->type = bswap_32(hdr->type);
1688         hdr->misc = bswap_16(hdr->misc);
1689         hdr->size = bswap_16(hdr->size);
1690 }
1691
1692 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1693 {
1694         return machine__findnew_thread(&session->machines.host, -1, pid);
1695 }
1696
1697 /*
1698  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1699  * So here a single thread is created for that, but actually there is a separate
1700  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1701  * is only 1. That causes problems for some tools, requiring workarounds. For
1702  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1703  */
1704 int perf_session__register_idle_thread(struct perf_session *session)
1705 {
1706         struct thread *thread;
1707         int err = 0;
1708
1709         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1710         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1711                 pr_err("problem inserting idle task.\n");
1712                 err = -1;
1713         }
1714
1715         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1716                 pr_err("problem inserting idle task.\n");
1717                 err = -1;
1718         }
1719
1720         /* machine__findnew_thread() got the thread, so put it */
1721         thread__put(thread);
1722         return err;
1723 }
1724
1725 static void
1726 perf_session__warn_order(const struct perf_session *session)
1727 {
1728         const struct ordered_events *oe = &session->ordered_events;
1729         struct evsel *evsel;
1730         bool should_warn = true;
1731
1732         evlist__for_each_entry(session->evlist, evsel) {
1733                 if (evsel->core.attr.write_backward)
1734                         should_warn = false;
1735         }
1736
1737         if (!should_warn)
1738                 return;
1739         if (oe->nr_unordered_events != 0)
1740                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1741 }
1742
1743 static void perf_session__warn_about_errors(const struct perf_session *session)
1744 {
1745         const struct events_stats *stats = &session->evlist->stats;
1746
1747         if (session->tool->lost == perf_event__process_lost &&
1748             stats->nr_events[PERF_RECORD_LOST] != 0) {
1749                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1750                             "Check IO/CPU overload!\n\n",
1751                             stats->nr_events[0],
1752                             stats->nr_events[PERF_RECORD_LOST]);
1753         }
1754
1755         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1756                 double drop_rate;
1757
1758                 drop_rate = (double)stats->total_lost_samples /
1759                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1760                 if (drop_rate > 0.05) {
1761                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1762                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1763                                     drop_rate * 100.0);
1764                 }
1765         }
1766
1767         if (session->tool->aux == perf_event__process_aux &&
1768             stats->total_aux_lost != 0) {
1769                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1770                             stats->total_aux_lost,
1771                             stats->nr_events[PERF_RECORD_AUX]);
1772         }
1773
1774         if (session->tool->aux == perf_event__process_aux &&
1775             stats->total_aux_partial != 0) {
1776                 bool vmm_exclusive = false;
1777
1778                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1779                                        &vmm_exclusive);
1780
1781                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1782                             "Are you running a KVM guest in the background?%s\n\n",
1783                             stats->total_aux_partial,
1784                             stats->nr_events[PERF_RECORD_AUX],
1785                             vmm_exclusive ?
1786                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1787                             "will reduce the gaps to only guest's timeslices." :
1788                             "");
1789         }
1790
1791         if (stats->nr_unknown_events != 0) {
1792                 ui__warning("Found %u unknown events!\n\n"
1793                             "Is this an older tool processing a perf.data "
1794                             "file generated by a more recent tool?\n\n"
1795                             "If that is not the case, consider "
1796                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1797                             stats->nr_unknown_events);
1798         }
1799
1800         if (stats->nr_unknown_id != 0) {
1801                 ui__warning("%u samples with id not present in the header\n",
1802                             stats->nr_unknown_id);
1803         }
1804
1805         if (stats->nr_invalid_chains != 0) {
1806                 ui__warning("Found invalid callchains!\n\n"
1807                             "%u out of %u events were discarded for this reason.\n\n"
1808                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1809                             stats->nr_invalid_chains,
1810                             stats->nr_events[PERF_RECORD_SAMPLE]);
1811         }
1812
1813         if (stats->nr_unprocessable_samples != 0) {
1814                 ui__warning("%u unprocessable samples recorded.\n"
1815                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1816                             stats->nr_unprocessable_samples);
1817         }
1818
1819         perf_session__warn_order(session);
1820
1821         events_stats__auxtrace_error_warn(stats);
1822
1823         if (stats->nr_proc_map_timeout != 0) {
1824                 ui__warning("%d map information files for pre-existing threads were\n"
1825                             "not processed, if there are samples for addresses they\n"
1826                             "will not be resolved, you may find out which are these\n"
1827                             "threads by running with -v and redirecting the output\n"
1828                             "to a file.\n"
1829                             "The time limit to process proc map is too short?\n"
1830                             "Increase it by --proc-map-timeout\n",
1831                             stats->nr_proc_map_timeout);
1832         }
1833 }
1834
1835 static int perf_session__flush_thread_stack(struct thread *thread,
1836                                             void *p __maybe_unused)
1837 {
1838         return thread_stack__flush(thread);
1839 }
1840
1841 static int perf_session__flush_thread_stacks(struct perf_session *session)
1842 {
1843         return machines__for_each_thread(&session->machines,
1844                                          perf_session__flush_thread_stack,
1845                                          NULL);
1846 }
1847
1848 volatile int session_done;
1849
1850 static int __perf_session__process_decomp_events(struct perf_session *session);
1851
1852 static int __perf_session__process_pipe_events(struct perf_session *session)
1853 {
1854         struct ordered_events *oe = &session->ordered_events;
1855         struct perf_tool *tool = session->tool;
1856         int fd = perf_data__fd(session->data);
1857         union perf_event *event;
1858         uint32_t size, cur_size = 0;
1859         void *buf = NULL;
1860         s64 skip = 0;
1861         u64 head;
1862         ssize_t err;
1863         void *p;
1864
1865         perf_tool__fill_defaults(tool);
1866
1867         head = 0;
1868         cur_size = sizeof(union perf_event);
1869
1870         buf = malloc(cur_size);
1871         if (!buf)
1872                 return -errno;
1873         ordered_events__set_copy_on_queue(oe, true);
1874 more:
1875         event = buf;
1876         err = readn(fd, event, sizeof(struct perf_event_header));
1877         if (err <= 0) {
1878                 if (err == 0)
1879                         goto done;
1880
1881                 pr_err("failed to read event header\n");
1882                 goto out_err;
1883         }
1884
1885         if (session->header.needs_swap)
1886                 perf_event_header__bswap(&event->header);
1887
1888         size = event->header.size;
1889         if (size < sizeof(struct perf_event_header)) {
1890                 pr_err("bad event header size\n");
1891                 goto out_err;
1892         }
1893
1894         if (size > cur_size) {
1895                 void *new = realloc(buf, size);
1896                 if (!new) {
1897                         pr_err("failed to allocate memory to read event\n");
1898                         goto out_err;
1899                 }
1900                 buf = new;
1901                 cur_size = size;
1902                 event = buf;
1903         }
1904         p = event;
1905         p += sizeof(struct perf_event_header);
1906
1907         if (size - sizeof(struct perf_event_header)) {
1908                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1909                 if (err <= 0) {
1910                         if (err == 0) {
1911                                 pr_err("unexpected end of event stream\n");
1912                                 goto done;
1913                         }
1914
1915                         pr_err("failed to read event data\n");
1916                         goto out_err;
1917                 }
1918         }
1919
1920         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1921                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1922                        head, event->header.size, event->header.type);
1923                 err = -EINVAL;
1924                 goto out_err;
1925         }
1926
1927         head += size;
1928
1929         if (skip > 0)
1930                 head += skip;
1931
1932         err = __perf_session__process_decomp_events(session);
1933         if (err)
1934                 goto out_err;
1935
1936         if (!session_done())
1937                 goto more;
1938 done:
1939         /* do the final flush for ordered samples */
1940         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1941         if (err)
1942                 goto out_err;
1943         err = auxtrace__flush_events(session, tool);
1944         if (err)
1945                 goto out_err;
1946         err = perf_session__flush_thread_stacks(session);
1947 out_err:
1948         free(buf);
1949         if (!tool->no_warn)
1950                 perf_session__warn_about_errors(session);
1951         ordered_events__free(&session->ordered_events);
1952         auxtrace__free_events(session);
1953         return err;
1954 }
1955
1956 static union perf_event *
1957 fetch_mmaped_event(struct perf_session *session,
1958                    u64 head, size_t mmap_size, char *buf)
1959 {
1960         union perf_event *event;
1961
1962         /*
1963          * Ensure we have enough space remaining to read
1964          * the size of the event in the headers.
1965          */
1966         if (head + sizeof(event->header) > mmap_size)
1967                 return NULL;
1968
1969         event = (union perf_event *)(buf + head);
1970
1971         if (session->header.needs_swap)
1972                 perf_event_header__bswap(&event->header);
1973
1974         if (head + event->header.size > mmap_size) {
1975                 /* We're not fetching the event so swap back again */
1976                 if (session->header.needs_swap)
1977                         perf_event_header__bswap(&event->header);
1978                 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
1979                          __func__, head, event->header.size, mmap_size);
1980                 return ERR_PTR(-EINVAL);
1981         }
1982
1983         return event;
1984 }
1985
1986 static int __perf_session__process_decomp_events(struct perf_session *session)
1987 {
1988         s64 skip;
1989         u64 size, file_pos = 0;
1990         struct decomp *decomp = session->decomp_last;
1991
1992         if (!decomp)
1993                 return 0;
1994
1995         while (decomp->head < decomp->size && !session_done()) {
1996                 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1997
1998                 if (IS_ERR(event))
1999                         return PTR_ERR(event);
2000
2001                 if (!event)
2002                         break;
2003
2004                 size = event->header.size;
2005
2006                 if (size < sizeof(struct perf_event_header) ||
2007                     (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2008                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2009                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2010                         return -EINVAL;
2011                 }
2012
2013                 if (skip)
2014                         size += skip;
2015
2016                 decomp->head += size;
2017         }
2018
2019         return 0;
2020 }
2021
2022 /*
2023  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2024  * slices. On 32bit we use 32MB.
2025  */
2026 #if BITS_PER_LONG == 64
2027 #define MMAP_SIZE ULLONG_MAX
2028 #define NUM_MMAPS 1
2029 #else
2030 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2031 #define NUM_MMAPS 128
2032 #endif
2033
2034 struct reader;
2035
2036 typedef s64 (*reader_cb_t)(struct perf_session *session,
2037                            union perf_event *event,
2038                            u64 file_offset);
2039
2040 struct reader {
2041         int              fd;
2042         u64              data_size;
2043         u64              data_offset;
2044         reader_cb_t      process;
2045 };
2046
2047 static int
2048 reader__process_events(struct reader *rd, struct perf_session *session,
2049                        struct ui_progress *prog)
2050 {
2051         u64 data_size = rd->data_size;
2052         u64 head, page_offset, file_offset, file_pos, size;
2053         int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2054         size_t  mmap_size;
2055         char *buf, *mmaps[NUM_MMAPS];
2056         union perf_event *event;
2057         s64 skip;
2058
2059         page_offset = page_size * (rd->data_offset / page_size);
2060         file_offset = page_offset;
2061         head = rd->data_offset - page_offset;
2062
2063         ui_progress__init_size(prog, data_size, "Processing events...");
2064
2065         data_size += rd->data_offset;
2066
2067         mmap_size = MMAP_SIZE;
2068         if (mmap_size > data_size) {
2069                 mmap_size = data_size;
2070                 session->one_mmap = true;
2071         }
2072
2073         memset(mmaps, 0, sizeof(mmaps));
2074
2075         mmap_prot  = PROT_READ;
2076         mmap_flags = MAP_SHARED;
2077
2078         if (session->header.needs_swap) {
2079                 mmap_prot  |= PROT_WRITE;
2080                 mmap_flags = MAP_PRIVATE;
2081         }
2082 remap:
2083         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2084                    file_offset);
2085         if (buf == MAP_FAILED) {
2086                 pr_err("failed to mmap file\n");
2087                 err = -errno;
2088                 goto out;
2089         }
2090         mmaps[map_idx] = buf;
2091         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2092         file_pos = file_offset + head;
2093         if (session->one_mmap) {
2094                 session->one_mmap_addr = buf;
2095                 session->one_mmap_offset = file_offset;
2096         }
2097
2098 more:
2099         event = fetch_mmaped_event(session, head, mmap_size, buf);
2100         if (IS_ERR(event))
2101                 return PTR_ERR(event);
2102
2103         if (!event) {
2104                 if (mmaps[map_idx]) {
2105                         munmap(mmaps[map_idx], mmap_size);
2106                         mmaps[map_idx] = NULL;
2107                 }
2108
2109                 page_offset = page_size * (head / page_size);
2110                 file_offset += page_offset;
2111                 head -= page_offset;
2112                 goto remap;
2113         }
2114
2115         size = event->header.size;
2116
2117         skip = -EINVAL;
2118
2119         if (size < sizeof(struct perf_event_header) ||
2120             (skip = rd->process(session, event, file_pos)) < 0) {
2121                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2122                        file_offset + head, event->header.size,
2123                        event->header.type, strerror(-skip));
2124                 err = skip;
2125                 goto out;
2126         }
2127
2128         if (skip)
2129                 size += skip;
2130
2131         head += size;
2132         file_pos += size;
2133
2134         err = __perf_session__process_decomp_events(session);
2135         if (err)
2136                 goto out;
2137
2138         ui_progress__update(prog, size);
2139
2140         if (session_done())
2141                 goto out;
2142
2143         if (file_pos < data_size)
2144                 goto more;
2145
2146 out:
2147         return err;
2148 }
2149
2150 static s64 process_simple(struct perf_session *session,
2151                           union perf_event *event,
2152                           u64 file_offset)
2153 {
2154         return perf_session__process_event(session, event, file_offset);
2155 }
2156
2157 static int __perf_session__process_events(struct perf_session *session)
2158 {
2159         struct reader rd = {
2160                 .fd             = perf_data__fd(session->data),
2161                 .data_size      = session->header.data_size,
2162                 .data_offset    = session->header.data_offset,
2163                 .process        = process_simple,
2164         };
2165         struct ordered_events *oe = &session->ordered_events;
2166         struct perf_tool *tool = session->tool;
2167         struct ui_progress prog;
2168         int err;
2169
2170         perf_tool__fill_defaults(tool);
2171
2172         if (rd.data_size == 0)
2173                 return -1;
2174
2175         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2176
2177         err = reader__process_events(&rd, session, &prog);
2178         if (err)
2179                 goto out_err;
2180         /* do the final flush for ordered samples */
2181         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2182         if (err)
2183                 goto out_err;
2184         err = auxtrace__flush_events(session, tool);
2185         if (err)
2186                 goto out_err;
2187         err = perf_session__flush_thread_stacks(session);
2188 out_err:
2189         ui_progress__finish();
2190         if (!tool->no_warn)
2191                 perf_session__warn_about_errors(session);
2192         /*
2193          * We may switching perf.data output, make ordered_events
2194          * reusable.
2195          */
2196         ordered_events__reinit(&session->ordered_events);
2197         auxtrace__free_events(session);
2198         session->one_mmap = false;
2199         return err;
2200 }
2201
2202 int perf_session__process_events(struct perf_session *session)
2203 {
2204         if (perf_session__register_idle_thread(session) < 0)
2205                 return -ENOMEM;
2206
2207         if (perf_data__is_pipe(session->data))
2208                 return __perf_session__process_pipe_events(session);
2209
2210         return __perf_session__process_events(session);
2211 }
2212
2213 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2214 {
2215         struct evsel *evsel;
2216
2217         evlist__for_each_entry(session->evlist, evsel) {
2218                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2219                         return true;
2220         }
2221
2222         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2223         return false;
2224 }
2225
2226 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2227 {
2228         char *bracket;
2229         struct ref_reloc_sym *ref;
2230         struct kmap *kmap;
2231
2232         ref = zalloc(sizeof(struct ref_reloc_sym));
2233         if (ref == NULL)
2234                 return -ENOMEM;
2235
2236         ref->name = strdup(symbol_name);
2237         if (ref->name == NULL) {
2238                 free(ref);
2239                 return -ENOMEM;
2240         }
2241
2242         bracket = strchr(ref->name, ']');
2243         if (bracket)
2244                 *bracket = '\0';
2245
2246         ref->addr = addr;
2247
2248         kmap = map__kmap(map);
2249         if (kmap)
2250                 kmap->ref_reloc_sym = ref;
2251
2252         return 0;
2253 }
2254
2255 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2256 {
2257         return machines__fprintf_dsos(&session->machines, fp);
2258 }
2259
2260 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2261                                           bool (skip)(struct dso *dso, int parm), int parm)
2262 {
2263         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2264 }
2265
2266 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2267 {
2268         size_t ret;
2269         const char *msg = "";
2270
2271         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2272                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2273
2274         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2275
2276         ret += events_stats__fprintf(&session->evlist->stats, fp);
2277         return ret;
2278 }
2279
2280 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2281 {
2282         /*
2283          * FIXME: Here we have to actually print all the machines in this
2284          * session, not just the host...
2285          */
2286         return machine__fprintf(&session->machines.host, fp);
2287 }
2288
2289 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2290                                               unsigned int type)
2291 {
2292         struct evsel *pos;
2293
2294         evlist__for_each_entry(session->evlist, pos) {
2295                 if (pos->core.attr.type == type)
2296                         return pos;
2297         }
2298         return NULL;
2299 }
2300
2301 int perf_session__cpu_bitmap(struct perf_session *session,
2302                              const char *cpu_list, unsigned long *cpu_bitmap)
2303 {
2304         int i, err = -1;
2305         struct perf_cpu_map *map;
2306         int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2307
2308         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2309                 struct evsel *evsel;
2310
2311                 evsel = perf_session__find_first_evtype(session, i);
2312                 if (!evsel)
2313                         continue;
2314
2315                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2316                         pr_err("File does not contain CPU events. "
2317                                "Remove -C option to proceed.\n");
2318                         return -1;
2319                 }
2320         }
2321
2322         map = perf_cpu_map__new(cpu_list);
2323         if (map == NULL) {
2324                 pr_err("Invalid cpu_list\n");
2325                 return -1;
2326         }
2327
2328         for (i = 0; i < map->nr; i++) {
2329                 int cpu = map->map[i];
2330
2331                 if (cpu >= nr_cpus) {
2332                         pr_err("Requested CPU %d too large. "
2333                                "Consider raising MAX_NR_CPUS\n", cpu);
2334                         goto out_delete_map;
2335                 }
2336
2337                 set_bit(cpu, cpu_bitmap);
2338         }
2339
2340         err = 0;
2341
2342 out_delete_map:
2343         perf_cpu_map__put(map);
2344         return err;
2345 }
2346
2347 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2348                                 bool full)
2349 {
2350         if (session == NULL || fp == NULL)
2351                 return;
2352
2353         fprintf(fp, "# ========\n");
2354         perf_header__fprintf_info(session, fp, full);
2355         fprintf(fp, "# ========\n#\n");
2356 }
2357
2358
2359 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2360                                              const struct evsel_str_handler *assocs,
2361                                              size_t nr_assocs)
2362 {
2363         struct evsel *evsel;
2364         size_t i;
2365         int err;
2366
2367         for (i = 0; i < nr_assocs; i++) {
2368                 /*
2369                  * Adding a handler for an event not in the session,
2370                  * just ignore it.
2371                  */
2372                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2373                 if (evsel == NULL)
2374                         continue;
2375
2376                 err = -EEXIST;
2377                 if (evsel->handler != NULL)
2378                         goto out;
2379                 evsel->handler = assocs[i].handler;
2380         }
2381
2382         err = 0;
2383 out:
2384         return err;
2385 }
2386
2387 int perf_event__process_id_index(struct perf_session *session,
2388                                  union perf_event *event)
2389 {
2390         struct evlist *evlist = session->evlist;
2391         struct perf_record_id_index *ie = &event->id_index;
2392         size_t i, nr, max_nr;
2393
2394         max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2395                  sizeof(struct id_index_entry);
2396         nr = ie->nr;
2397         if (nr > max_nr)
2398                 return -EINVAL;
2399
2400         if (dump_trace)
2401                 fprintf(stdout, " nr: %zu\n", nr);
2402
2403         for (i = 0; i < nr; i++) {
2404                 struct id_index_entry *e = &ie->entries[i];
2405                 struct perf_sample_id *sid;
2406
2407                 if (dump_trace) {
2408                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2409                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2410                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2411                         fprintf(stdout, "  tid: %"PRI_ld64"\n", e->tid);
2412                 }
2413
2414                 sid = perf_evlist__id2sid(evlist, e->id);
2415                 if (!sid)
2416                         return -ENOENT;
2417                 sid->idx = e->idx;
2418                 sid->cpu = e->cpu;
2419                 sid->tid = e->tid;
2420         }
2421         return 0;
2422 }