Merge tag 'powerpc-5.11-7' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-microblaze.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14
15 #include "map_symbol.h"
16 #include "branch.h"
17 #include "debug.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "memswap.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "session.h"
24 #include "tool.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "ui/progress.h"
33 #include "../perf.h"
34 #include "arch/common.h"
35 #include "units.h"
36 #include <internal/lib.h>
37
38 #ifdef HAVE_ZSTD_SUPPORT
39 static int perf_session__process_compressed_event(struct perf_session *session,
40                                                   union perf_event *event, u64 file_offset)
41 {
42         void *src;
43         size_t decomp_size, src_size;
44         u64 decomp_last_rem = 0;
45         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
46         struct decomp *decomp, *decomp_last = session->decomp_last;
47
48         if (decomp_last) {
49                 decomp_last_rem = decomp_last->size - decomp_last->head;
50                 decomp_len += decomp_last_rem;
51         }
52
53         mmap_len = sizeof(struct decomp) + decomp_len;
54         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
55                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
56         if (decomp == MAP_FAILED) {
57                 pr_err("Couldn't allocate memory for decompression\n");
58                 return -1;
59         }
60
61         decomp->file_pos = file_offset;
62         decomp->mmap_len = mmap_len;
63         decomp->head = 0;
64
65         if (decomp_last_rem) {
66                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
67                 decomp->size = decomp_last_rem;
68         }
69
70         src = (void *)event + sizeof(struct perf_record_compressed);
71         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
72
73         decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
74                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
75         if (!decomp_size) {
76                 munmap(decomp, mmap_len);
77                 pr_err("Couldn't decompress data\n");
78                 return -1;
79         }
80
81         decomp->size += decomp_size;
82
83         if (session->decomp == NULL) {
84                 session->decomp = decomp;
85                 session->decomp_last = decomp;
86         } else {
87                 session->decomp_last->next = decomp;
88                 session->decomp_last = decomp;
89         }
90
91         pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
92
93         return 0;
94 }
95 #else /* !HAVE_ZSTD_SUPPORT */
96 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
97 #endif
98
99 static int perf_session__deliver_event(struct perf_session *session,
100                                        union perf_event *event,
101                                        struct perf_tool *tool,
102                                        u64 file_offset);
103
104 static int perf_session__open(struct perf_session *session)
105 {
106         struct perf_data *data = session->data;
107
108         if (perf_session__read_header(session) < 0) {
109                 pr_err("incompatible file format (rerun with -v to learn more)\n");
110                 return -1;
111         }
112
113         if (perf_data__is_pipe(data))
114                 return 0;
115
116         if (perf_header__has_feat(&session->header, HEADER_STAT))
117                 return 0;
118
119         if (!evlist__valid_sample_type(session->evlist)) {
120                 pr_err("non matching sample_type\n");
121                 return -1;
122         }
123
124         if (!evlist__valid_sample_id_all(session->evlist)) {
125                 pr_err("non matching sample_id_all\n");
126                 return -1;
127         }
128
129         if (!evlist__valid_read_format(session->evlist)) {
130                 pr_err("non matching read_format\n");
131                 return -1;
132         }
133
134         return 0;
135 }
136
137 void perf_session__set_id_hdr_size(struct perf_session *session)
138 {
139         u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
140
141         machines__set_id_hdr_size(&session->machines, id_hdr_size);
142 }
143
144 int perf_session__create_kernel_maps(struct perf_session *session)
145 {
146         int ret = machine__create_kernel_maps(&session->machines.host);
147
148         if (ret >= 0)
149                 ret = machines__create_guest_kernel_maps(&session->machines);
150         return ret;
151 }
152
153 static void perf_session__destroy_kernel_maps(struct perf_session *session)
154 {
155         machines__destroy_kernel_maps(&session->machines);
156 }
157
158 static bool perf_session__has_comm_exec(struct perf_session *session)
159 {
160         struct evsel *evsel;
161
162         evlist__for_each_entry(session->evlist, evsel) {
163                 if (evsel->core.attr.comm_exec)
164                         return true;
165         }
166
167         return false;
168 }
169
170 static void perf_session__set_comm_exec(struct perf_session *session)
171 {
172         bool comm_exec = perf_session__has_comm_exec(session);
173
174         machines__set_comm_exec(&session->machines, comm_exec);
175 }
176
177 static int ordered_events__deliver_event(struct ordered_events *oe,
178                                          struct ordered_event *event)
179 {
180         struct perf_session *session = container_of(oe, struct perf_session,
181                                                     ordered_events);
182
183         return perf_session__deliver_event(session, event->event,
184                                            session->tool, event->file_offset);
185 }
186
187 struct perf_session *perf_session__new(struct perf_data *data,
188                                        bool repipe, struct perf_tool *tool)
189 {
190         int ret = -ENOMEM;
191         struct perf_session *session = zalloc(sizeof(*session));
192
193         if (!session)
194                 goto out;
195
196         session->repipe = repipe;
197         session->tool   = tool;
198         INIT_LIST_HEAD(&session->auxtrace_index);
199         machines__init(&session->machines);
200         ordered_events__init(&session->ordered_events,
201                              ordered_events__deliver_event, NULL);
202
203         perf_env__init(&session->header.env);
204         if (data) {
205                 ret = perf_data__open(data);
206                 if (ret < 0)
207                         goto out_delete;
208
209                 session->data = data;
210
211                 if (perf_data__is_read(data)) {
212                         ret = perf_session__open(session);
213                         if (ret < 0)
214                                 goto out_delete;
215
216                         /*
217                          * set session attributes that are present in perf.data
218                          * but not in pipe-mode.
219                          */
220                         if (!data->is_pipe) {
221                                 perf_session__set_id_hdr_size(session);
222                                 perf_session__set_comm_exec(session);
223                         }
224
225                         evlist__init_trace_event_sample_raw(session->evlist);
226
227                         /* Open the directory data. */
228                         if (data->is_dir) {
229                                 ret = perf_data__open_dir(data);
230                                 if (ret)
231                                         goto out_delete;
232                         }
233
234                         if (!symbol_conf.kallsyms_name &&
235                             !symbol_conf.vmlinux_name)
236                                 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
237                 }
238         } else  {
239                 session->machines.host.env = &perf_env;
240         }
241
242         session->machines.host.single_address_space =
243                 perf_env__single_address_space(session->machines.host.env);
244
245         if (!data || perf_data__is_write(data)) {
246                 /*
247                  * In O_RDONLY mode this will be performed when reading the
248                  * kernel MMAP event, in perf_event__process_mmap().
249                  */
250                 if (perf_session__create_kernel_maps(session) < 0)
251                         pr_warning("Cannot read kernel map\n");
252         }
253
254         /*
255          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
256          * processed, so evlist__sample_id_all is not meaningful here.
257          */
258         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
259             tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
260                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
261                 tool->ordered_events = false;
262         }
263
264         return session;
265
266  out_delete:
267         perf_session__delete(session);
268  out:
269         return ERR_PTR(ret);
270 }
271
272 static void perf_session__delete_threads(struct perf_session *session)
273 {
274         machine__delete_threads(&session->machines.host);
275 }
276
277 static void perf_session__release_decomp_events(struct perf_session *session)
278 {
279         struct decomp *next, *decomp;
280         size_t mmap_len;
281         next = session->decomp;
282         do {
283                 decomp = next;
284                 if (decomp == NULL)
285                         break;
286                 next = decomp->next;
287                 mmap_len = decomp->mmap_len;
288                 munmap(decomp, mmap_len);
289         } while (1);
290 }
291
292 void perf_session__delete(struct perf_session *session)
293 {
294         if (session == NULL)
295                 return;
296         auxtrace__free(session);
297         auxtrace_index__free(&session->auxtrace_index);
298         perf_session__destroy_kernel_maps(session);
299         perf_session__delete_threads(session);
300         perf_session__release_decomp_events(session);
301         perf_env__exit(&session->header.env);
302         machines__exit(&session->machines);
303         if (session->data)
304                 perf_data__close(session->data);
305         free(session);
306 }
307
308 static int process_event_synth_tracing_data_stub(struct perf_session *session
309                                                  __maybe_unused,
310                                                  union perf_event *event
311                                                  __maybe_unused)
312 {
313         dump_printf(": unhandled!\n");
314         return 0;
315 }
316
317 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
318                                          union perf_event *event __maybe_unused,
319                                          struct evlist **pevlist
320                                          __maybe_unused)
321 {
322         dump_printf(": unhandled!\n");
323         return 0;
324 }
325
326 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
327                                                  union perf_event *event __maybe_unused,
328                                                  struct evlist **pevlist
329                                                  __maybe_unused)
330 {
331         if (dump_trace)
332                 perf_event__fprintf_event_update(event, stdout);
333
334         dump_printf(": unhandled!\n");
335         return 0;
336 }
337
338 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
339                                      union perf_event *event __maybe_unused,
340                                      struct perf_sample *sample __maybe_unused,
341                                      struct evsel *evsel __maybe_unused,
342                                      struct machine *machine __maybe_unused)
343 {
344         dump_printf(": unhandled!\n");
345         return 0;
346 }
347
348 static int process_event_stub(struct perf_tool *tool __maybe_unused,
349                               union perf_event *event __maybe_unused,
350                               struct perf_sample *sample __maybe_unused,
351                               struct machine *machine __maybe_unused)
352 {
353         dump_printf(": unhandled!\n");
354         return 0;
355 }
356
357 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
358                                        union perf_event *event __maybe_unused,
359                                        struct ordered_events *oe __maybe_unused)
360 {
361         dump_printf(": unhandled!\n");
362         return 0;
363 }
364
365 static int process_finished_round(struct perf_tool *tool,
366                                   union perf_event *event,
367                                   struct ordered_events *oe);
368
369 static int skipn(int fd, off_t n)
370 {
371         char buf[4096];
372         ssize_t ret;
373
374         while (n > 0) {
375                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
376                 if (ret <= 0)
377                         return ret;
378                 n -= ret;
379         }
380
381         return 0;
382 }
383
384 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
385                                        union perf_event *event)
386 {
387         dump_printf(": unhandled!\n");
388         if (perf_data__is_pipe(session->data))
389                 skipn(perf_data__fd(session->data), event->auxtrace.size);
390         return event->auxtrace.size;
391 }
392
393 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
394                                   union perf_event *event __maybe_unused)
395 {
396         dump_printf(": unhandled!\n");
397         return 0;
398 }
399
400
401 static
402 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
403                                   union perf_event *event __maybe_unused)
404 {
405         if (dump_trace)
406                 perf_event__fprintf_thread_map(event, stdout);
407
408         dump_printf(": unhandled!\n");
409         return 0;
410 }
411
412 static
413 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
414                                union perf_event *event __maybe_unused)
415 {
416         if (dump_trace)
417                 perf_event__fprintf_cpu_map(event, stdout);
418
419         dump_printf(": unhandled!\n");
420         return 0;
421 }
422
423 static
424 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
425                                    union perf_event *event __maybe_unused)
426 {
427         if (dump_trace)
428                 perf_event__fprintf_stat_config(event, stdout);
429
430         dump_printf(": unhandled!\n");
431         return 0;
432 }
433
434 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
435                              union perf_event *event)
436 {
437         if (dump_trace)
438                 perf_event__fprintf_stat(event, stdout);
439
440         dump_printf(": unhandled!\n");
441         return 0;
442 }
443
444 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
445                                    union perf_event *event)
446 {
447         if (dump_trace)
448                 perf_event__fprintf_stat_round(event, stdout);
449
450         dump_printf(": unhandled!\n");
451         return 0;
452 }
453
454 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
455                                                        union perf_event *event __maybe_unused,
456                                                        u64 file_offset __maybe_unused)
457 {
458        dump_printf(": unhandled!\n");
459        return 0;
460 }
461
462 void perf_tool__fill_defaults(struct perf_tool *tool)
463 {
464         if (tool->sample == NULL)
465                 tool->sample = process_event_sample_stub;
466         if (tool->mmap == NULL)
467                 tool->mmap = process_event_stub;
468         if (tool->mmap2 == NULL)
469                 tool->mmap2 = process_event_stub;
470         if (tool->comm == NULL)
471                 tool->comm = process_event_stub;
472         if (tool->namespaces == NULL)
473                 tool->namespaces = process_event_stub;
474         if (tool->cgroup == NULL)
475                 tool->cgroup = process_event_stub;
476         if (tool->fork == NULL)
477                 tool->fork = process_event_stub;
478         if (tool->exit == NULL)
479                 tool->exit = process_event_stub;
480         if (tool->lost == NULL)
481                 tool->lost = perf_event__process_lost;
482         if (tool->lost_samples == NULL)
483                 tool->lost_samples = perf_event__process_lost_samples;
484         if (tool->aux == NULL)
485                 tool->aux = perf_event__process_aux;
486         if (tool->itrace_start == NULL)
487                 tool->itrace_start = perf_event__process_itrace_start;
488         if (tool->context_switch == NULL)
489                 tool->context_switch = perf_event__process_switch;
490         if (tool->ksymbol == NULL)
491                 tool->ksymbol = perf_event__process_ksymbol;
492         if (tool->bpf == NULL)
493                 tool->bpf = perf_event__process_bpf;
494         if (tool->text_poke == NULL)
495                 tool->text_poke = perf_event__process_text_poke;
496         if (tool->read == NULL)
497                 tool->read = process_event_sample_stub;
498         if (tool->throttle == NULL)
499                 tool->throttle = process_event_stub;
500         if (tool->unthrottle == NULL)
501                 tool->unthrottle = process_event_stub;
502         if (tool->attr == NULL)
503                 tool->attr = process_event_synth_attr_stub;
504         if (tool->event_update == NULL)
505                 tool->event_update = process_event_synth_event_update_stub;
506         if (tool->tracing_data == NULL)
507                 tool->tracing_data = process_event_synth_tracing_data_stub;
508         if (tool->build_id == NULL)
509                 tool->build_id = process_event_op2_stub;
510         if (tool->finished_round == NULL) {
511                 if (tool->ordered_events)
512                         tool->finished_round = process_finished_round;
513                 else
514                         tool->finished_round = process_finished_round_stub;
515         }
516         if (tool->id_index == NULL)
517                 tool->id_index = process_event_op2_stub;
518         if (tool->auxtrace_info == NULL)
519                 tool->auxtrace_info = process_event_op2_stub;
520         if (tool->auxtrace == NULL)
521                 tool->auxtrace = process_event_auxtrace_stub;
522         if (tool->auxtrace_error == NULL)
523                 tool->auxtrace_error = process_event_op2_stub;
524         if (tool->thread_map == NULL)
525                 tool->thread_map = process_event_thread_map_stub;
526         if (tool->cpu_map == NULL)
527                 tool->cpu_map = process_event_cpu_map_stub;
528         if (tool->stat_config == NULL)
529                 tool->stat_config = process_event_stat_config_stub;
530         if (tool->stat == NULL)
531                 tool->stat = process_stat_stub;
532         if (tool->stat_round == NULL)
533                 tool->stat_round = process_stat_round_stub;
534         if (tool->time_conv == NULL)
535                 tool->time_conv = process_event_op2_stub;
536         if (tool->feature == NULL)
537                 tool->feature = process_event_op2_stub;
538         if (tool->compressed == NULL)
539                 tool->compressed = perf_session__process_compressed_event;
540 }
541
542 static void swap_sample_id_all(union perf_event *event, void *data)
543 {
544         void *end = (void *) event + event->header.size;
545         int size = end - data;
546
547         BUG_ON(size % sizeof(u64));
548         mem_bswap_64(data, size);
549 }
550
551 static void perf_event__all64_swap(union perf_event *event,
552                                    bool sample_id_all __maybe_unused)
553 {
554         struct perf_event_header *hdr = &event->header;
555         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
556 }
557
558 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
559 {
560         event->comm.pid = bswap_32(event->comm.pid);
561         event->comm.tid = bswap_32(event->comm.tid);
562
563         if (sample_id_all) {
564                 void *data = &event->comm.comm;
565
566                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
567                 swap_sample_id_all(event, data);
568         }
569 }
570
571 static void perf_event__mmap_swap(union perf_event *event,
572                                   bool sample_id_all)
573 {
574         event->mmap.pid   = bswap_32(event->mmap.pid);
575         event->mmap.tid   = bswap_32(event->mmap.tid);
576         event->mmap.start = bswap_64(event->mmap.start);
577         event->mmap.len   = bswap_64(event->mmap.len);
578         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
579
580         if (sample_id_all) {
581                 void *data = &event->mmap.filename;
582
583                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
584                 swap_sample_id_all(event, data);
585         }
586 }
587
588 static void perf_event__mmap2_swap(union perf_event *event,
589                                   bool sample_id_all)
590 {
591         event->mmap2.pid   = bswap_32(event->mmap2.pid);
592         event->mmap2.tid   = bswap_32(event->mmap2.tid);
593         event->mmap2.start = bswap_64(event->mmap2.start);
594         event->mmap2.len   = bswap_64(event->mmap2.len);
595         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
596         event->mmap2.maj   = bswap_32(event->mmap2.maj);
597         event->mmap2.min   = bswap_32(event->mmap2.min);
598         event->mmap2.ino   = bswap_64(event->mmap2.ino);
599         event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
600
601         if (sample_id_all) {
602                 void *data = &event->mmap2.filename;
603
604                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
605                 swap_sample_id_all(event, data);
606         }
607 }
608 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
609 {
610         event->fork.pid  = bswap_32(event->fork.pid);
611         event->fork.tid  = bswap_32(event->fork.tid);
612         event->fork.ppid = bswap_32(event->fork.ppid);
613         event->fork.ptid = bswap_32(event->fork.ptid);
614         event->fork.time = bswap_64(event->fork.time);
615
616         if (sample_id_all)
617                 swap_sample_id_all(event, &event->fork + 1);
618 }
619
620 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
621 {
622         event->read.pid          = bswap_32(event->read.pid);
623         event->read.tid          = bswap_32(event->read.tid);
624         event->read.value        = bswap_64(event->read.value);
625         event->read.time_enabled = bswap_64(event->read.time_enabled);
626         event->read.time_running = bswap_64(event->read.time_running);
627         event->read.id           = bswap_64(event->read.id);
628
629         if (sample_id_all)
630                 swap_sample_id_all(event, &event->read + 1);
631 }
632
633 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
634 {
635         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
636         event->aux.aux_size   = bswap_64(event->aux.aux_size);
637         event->aux.flags      = bswap_64(event->aux.flags);
638
639         if (sample_id_all)
640                 swap_sample_id_all(event, &event->aux + 1);
641 }
642
643 static void perf_event__itrace_start_swap(union perf_event *event,
644                                           bool sample_id_all)
645 {
646         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
647         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
648
649         if (sample_id_all)
650                 swap_sample_id_all(event, &event->itrace_start + 1);
651 }
652
653 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
654 {
655         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
656                 event->context_switch.next_prev_pid =
657                                 bswap_32(event->context_switch.next_prev_pid);
658                 event->context_switch.next_prev_tid =
659                                 bswap_32(event->context_switch.next_prev_tid);
660         }
661
662         if (sample_id_all)
663                 swap_sample_id_all(event, &event->context_switch + 1);
664 }
665
666 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
667 {
668         event->text_poke.addr    = bswap_64(event->text_poke.addr);
669         event->text_poke.old_len = bswap_16(event->text_poke.old_len);
670         event->text_poke.new_len = bswap_16(event->text_poke.new_len);
671
672         if (sample_id_all) {
673                 size_t len = sizeof(event->text_poke.old_len) +
674                              sizeof(event->text_poke.new_len) +
675                              event->text_poke.old_len +
676                              event->text_poke.new_len;
677                 void *data = &event->text_poke.old_len;
678
679                 data += PERF_ALIGN(len, sizeof(u64));
680                 swap_sample_id_all(event, data);
681         }
682 }
683
684 static void perf_event__throttle_swap(union perf_event *event,
685                                       bool sample_id_all)
686 {
687         event->throttle.time      = bswap_64(event->throttle.time);
688         event->throttle.id        = bswap_64(event->throttle.id);
689         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
690
691         if (sample_id_all)
692                 swap_sample_id_all(event, &event->throttle + 1);
693 }
694
695 static void perf_event__namespaces_swap(union perf_event *event,
696                                         bool sample_id_all)
697 {
698         u64 i;
699
700         event->namespaces.pid           = bswap_32(event->namespaces.pid);
701         event->namespaces.tid           = bswap_32(event->namespaces.tid);
702         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
703
704         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
705                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
706
707                 ns->dev = bswap_64(ns->dev);
708                 ns->ino = bswap_64(ns->ino);
709         }
710
711         if (sample_id_all)
712                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
713 }
714
715 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
716 {
717         event->cgroup.id = bswap_64(event->cgroup.id);
718
719         if (sample_id_all) {
720                 void *data = &event->cgroup.path;
721
722                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
723                 swap_sample_id_all(event, data);
724         }
725 }
726
727 static u8 revbyte(u8 b)
728 {
729         int rev = (b >> 4) | ((b & 0xf) << 4);
730         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
731         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
732         return (u8) rev;
733 }
734
735 /*
736  * XXX this is hack in attempt to carry flags bitfield
737  * through endian village. ABI says:
738  *
739  * Bit-fields are allocated from right to left (least to most significant)
740  * on little-endian implementations and from left to right (most to least
741  * significant) on big-endian implementations.
742  *
743  * The above seems to be byte specific, so we need to reverse each
744  * byte of the bitfield. 'Internet' also says this might be implementation
745  * specific and we probably need proper fix and carry perf_event_attr
746  * bitfield flags in separate data file FEAT_ section. Thought this seems
747  * to work for now.
748  */
749 static void swap_bitfield(u8 *p, unsigned len)
750 {
751         unsigned i;
752
753         for (i = 0; i < len; i++) {
754                 *p = revbyte(*p);
755                 p++;
756         }
757 }
758
759 /* exported for swapping attributes in file header */
760 void perf_event__attr_swap(struct perf_event_attr *attr)
761 {
762         attr->type              = bswap_32(attr->type);
763         attr->size              = bswap_32(attr->size);
764
765 #define bswap_safe(f, n)                                        \
766         (attr->size > (offsetof(struct perf_event_attr, f) +    \
767                        sizeof(attr->f) * (n)))
768 #define bswap_field(f, sz)                      \
769 do {                                            \
770         if (bswap_safe(f, 0))                   \
771                 attr->f = bswap_##sz(attr->f);  \
772 } while(0)
773 #define bswap_field_16(f) bswap_field(f, 16)
774 #define bswap_field_32(f) bswap_field(f, 32)
775 #define bswap_field_64(f) bswap_field(f, 64)
776
777         bswap_field_64(config);
778         bswap_field_64(sample_period);
779         bswap_field_64(sample_type);
780         bswap_field_64(read_format);
781         bswap_field_32(wakeup_events);
782         bswap_field_32(bp_type);
783         bswap_field_64(bp_addr);
784         bswap_field_64(bp_len);
785         bswap_field_64(branch_sample_type);
786         bswap_field_64(sample_regs_user);
787         bswap_field_32(sample_stack_user);
788         bswap_field_32(aux_watermark);
789         bswap_field_16(sample_max_stack);
790         bswap_field_32(aux_sample_size);
791
792         /*
793          * After read_format are bitfields. Check read_format because
794          * we are unable to use offsetof on bitfield.
795          */
796         if (bswap_safe(read_format, 1))
797                 swap_bitfield((u8 *) (&attr->read_format + 1),
798                               sizeof(u64));
799 #undef bswap_field_64
800 #undef bswap_field_32
801 #undef bswap_field
802 #undef bswap_safe
803 }
804
805 static void perf_event__hdr_attr_swap(union perf_event *event,
806                                       bool sample_id_all __maybe_unused)
807 {
808         size_t size;
809
810         perf_event__attr_swap(&event->attr.attr);
811
812         size = event->header.size;
813         size -= (void *)&event->attr.id - (void *)event;
814         mem_bswap_64(event->attr.id, size);
815 }
816
817 static void perf_event__event_update_swap(union perf_event *event,
818                                           bool sample_id_all __maybe_unused)
819 {
820         event->event_update.type = bswap_64(event->event_update.type);
821         event->event_update.id   = bswap_64(event->event_update.id);
822 }
823
824 static void perf_event__event_type_swap(union perf_event *event,
825                                         bool sample_id_all __maybe_unused)
826 {
827         event->event_type.event_type.event_id =
828                 bswap_64(event->event_type.event_type.event_id);
829 }
830
831 static void perf_event__tracing_data_swap(union perf_event *event,
832                                           bool sample_id_all __maybe_unused)
833 {
834         event->tracing_data.size = bswap_32(event->tracing_data.size);
835 }
836
837 static void perf_event__auxtrace_info_swap(union perf_event *event,
838                                            bool sample_id_all __maybe_unused)
839 {
840         size_t size;
841
842         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
843
844         size = event->header.size;
845         size -= (void *)&event->auxtrace_info.priv - (void *)event;
846         mem_bswap_64(event->auxtrace_info.priv, size);
847 }
848
849 static void perf_event__auxtrace_swap(union perf_event *event,
850                                       bool sample_id_all __maybe_unused)
851 {
852         event->auxtrace.size      = bswap_64(event->auxtrace.size);
853         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
854         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
855         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
856         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
857         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
858 }
859
860 static void perf_event__auxtrace_error_swap(union perf_event *event,
861                                             bool sample_id_all __maybe_unused)
862 {
863         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
864         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
865         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
866         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
867         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
868         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
869         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
870         if (event->auxtrace_error.fmt)
871                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
872 }
873
874 static void perf_event__thread_map_swap(union perf_event *event,
875                                         bool sample_id_all __maybe_unused)
876 {
877         unsigned i;
878
879         event->thread_map.nr = bswap_64(event->thread_map.nr);
880
881         for (i = 0; i < event->thread_map.nr; i++)
882                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
883 }
884
885 static void perf_event__cpu_map_swap(union perf_event *event,
886                                      bool sample_id_all __maybe_unused)
887 {
888         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
889         struct cpu_map_entries *cpus;
890         struct perf_record_record_cpu_map *mask;
891         unsigned i;
892
893         data->type = bswap_64(data->type);
894
895         switch (data->type) {
896         case PERF_CPU_MAP__CPUS:
897                 cpus = (struct cpu_map_entries *)data->data;
898
899                 cpus->nr = bswap_16(cpus->nr);
900
901                 for (i = 0; i < cpus->nr; i++)
902                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
903                 break;
904         case PERF_CPU_MAP__MASK:
905                 mask = (struct perf_record_record_cpu_map *)data->data;
906
907                 mask->nr = bswap_16(mask->nr);
908                 mask->long_size = bswap_16(mask->long_size);
909
910                 switch (mask->long_size) {
911                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
912                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
913                 default:
914                         pr_err("cpu_map swap: unsupported long size\n");
915                 }
916         default:
917                 break;
918         }
919 }
920
921 static void perf_event__stat_config_swap(union perf_event *event,
922                                          bool sample_id_all __maybe_unused)
923 {
924         u64 size;
925
926         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
927         size += 1; /* nr item itself */
928         mem_bswap_64(&event->stat_config.nr, size);
929 }
930
931 static void perf_event__stat_swap(union perf_event *event,
932                                   bool sample_id_all __maybe_unused)
933 {
934         event->stat.id     = bswap_64(event->stat.id);
935         event->stat.thread = bswap_32(event->stat.thread);
936         event->stat.cpu    = bswap_32(event->stat.cpu);
937         event->stat.val    = bswap_64(event->stat.val);
938         event->stat.ena    = bswap_64(event->stat.ena);
939         event->stat.run    = bswap_64(event->stat.run);
940 }
941
942 static void perf_event__stat_round_swap(union perf_event *event,
943                                         bool sample_id_all __maybe_unused)
944 {
945         event->stat_round.type = bswap_64(event->stat_round.type);
946         event->stat_round.time = bswap_64(event->stat_round.time);
947 }
948
949 typedef void (*perf_event__swap_op)(union perf_event *event,
950                                     bool sample_id_all);
951
952 static perf_event__swap_op perf_event__swap_ops[] = {
953         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
954         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
955         [PERF_RECORD_COMM]                = perf_event__comm_swap,
956         [PERF_RECORD_FORK]                = perf_event__task_swap,
957         [PERF_RECORD_EXIT]                = perf_event__task_swap,
958         [PERF_RECORD_LOST]                = perf_event__all64_swap,
959         [PERF_RECORD_READ]                = perf_event__read_swap,
960         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
961         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
962         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
963         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
964         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
965         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
966         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
967         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
968         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
969         [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
970         [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
971         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
972         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
973         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
974         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
975         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
976         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
977         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
978         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
979         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
980         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
981         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
982         [PERF_RECORD_STAT]                = perf_event__stat_swap,
983         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
984         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
985         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
986         [PERF_RECORD_HEADER_MAX]          = NULL,
987 };
988
989 /*
990  * When perf record finishes a pass on every buffers, it records this pseudo
991  * event.
992  * We record the max timestamp t found in the pass n.
993  * Assuming these timestamps are monotonic across cpus, we know that if
994  * a buffer still has events with timestamps below t, they will be all
995  * available and then read in the pass n + 1.
996  * Hence when we start to read the pass n + 2, we can safely flush every
997  * events with timestamps below t.
998  *
999  *    ============ PASS n =================
1000  *       CPU 0         |   CPU 1
1001  *                     |
1002  *    cnt1 timestamps  |   cnt2 timestamps
1003  *          1          |         2
1004  *          2          |         3
1005  *          -          |         4  <--- max recorded
1006  *
1007  *    ============ PASS n + 1 ==============
1008  *       CPU 0         |   CPU 1
1009  *                     |
1010  *    cnt1 timestamps  |   cnt2 timestamps
1011  *          3          |         5
1012  *          4          |         6
1013  *          5          |         7 <---- max recorded
1014  *
1015  *      Flush every events below timestamp 4
1016  *
1017  *    ============ PASS n + 2 ==============
1018  *       CPU 0         |   CPU 1
1019  *                     |
1020  *    cnt1 timestamps  |   cnt2 timestamps
1021  *          6          |         8
1022  *          7          |         9
1023  *          -          |         10
1024  *
1025  *      Flush every events below timestamp 7
1026  *      etc...
1027  */
1028 static int process_finished_round(struct perf_tool *tool __maybe_unused,
1029                                   union perf_event *event __maybe_unused,
1030                                   struct ordered_events *oe)
1031 {
1032         if (dump_trace)
1033                 fprintf(stdout, "\n");
1034         return ordered_events__flush(oe, OE_FLUSH__ROUND);
1035 }
1036
1037 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1038                               u64 timestamp, u64 file_offset)
1039 {
1040         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1041 }
1042
1043 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1044 {
1045         struct ip_callchain *callchain = sample->callchain;
1046         struct branch_stack *lbr_stack = sample->branch_stack;
1047         struct branch_entry *entries = perf_sample__branch_entries(sample);
1048         u64 kernel_callchain_nr = callchain->nr;
1049         unsigned int i;
1050
1051         for (i = 0; i < kernel_callchain_nr; i++) {
1052                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1053                         break;
1054         }
1055
1056         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1057                 u64 total_nr;
1058                 /*
1059                  * LBR callstack can only get user call chain,
1060                  * i is kernel call chain number,
1061                  * 1 is PERF_CONTEXT_USER.
1062                  *
1063                  * The user call chain is stored in LBR registers.
1064                  * LBR are pair registers. The caller is stored
1065                  * in "from" register, while the callee is stored
1066                  * in "to" register.
1067                  * For example, there is a call stack
1068                  * "A"->"B"->"C"->"D".
1069                  * The LBR registers will recorde like
1070                  * "C"->"D", "B"->"C", "A"->"B".
1071                  * So only the first "to" register and all "from"
1072                  * registers are needed to construct the whole stack.
1073                  */
1074                 total_nr = i + 1 + lbr_stack->nr + 1;
1075                 kernel_callchain_nr = i + 1;
1076
1077                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1078
1079                 for (i = 0; i < kernel_callchain_nr; i++)
1080                         printf("..... %2d: %016" PRIx64 "\n",
1081                                i, callchain->ips[i]);
1082
1083                 printf("..... %2d: %016" PRIx64 "\n",
1084                        (int)(kernel_callchain_nr), entries[0].to);
1085                 for (i = 0; i < lbr_stack->nr; i++)
1086                         printf("..... %2d: %016" PRIx64 "\n",
1087                                (int)(i + kernel_callchain_nr + 1), entries[i].from);
1088         }
1089 }
1090
1091 static void callchain__printf(struct evsel *evsel,
1092                               struct perf_sample *sample)
1093 {
1094         unsigned int i;
1095         struct ip_callchain *callchain = sample->callchain;
1096
1097         if (evsel__has_branch_callstack(evsel))
1098                 callchain__lbr_callstack_printf(sample);
1099
1100         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1101
1102         for (i = 0; i < callchain->nr; i++)
1103                 printf("..... %2d: %016" PRIx64 "\n",
1104                        i, callchain->ips[i]);
1105 }
1106
1107 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1108 {
1109         struct branch_entry *entries = perf_sample__branch_entries(sample);
1110         uint64_t i;
1111
1112         printf("%s: nr:%" PRIu64 "\n",
1113                 !callstack ? "... branch stack" : "... branch callstack",
1114                 sample->branch_stack->nr);
1115
1116         for (i = 0; i < sample->branch_stack->nr; i++) {
1117                 struct branch_entry *e = &entries[i];
1118
1119                 if (!callstack) {
1120                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1121                                 i, e->from, e->to,
1122                                 (unsigned short)e->flags.cycles,
1123                                 e->flags.mispred ? "M" : " ",
1124                                 e->flags.predicted ? "P" : " ",
1125                                 e->flags.abort ? "A" : " ",
1126                                 e->flags.in_tx ? "T" : " ",
1127                                 (unsigned)e->flags.reserved);
1128                 } else {
1129                         printf("..... %2"PRIu64": %016" PRIx64 "\n",
1130                                 i, i > 0 ? e->from : e->to);
1131                 }
1132         }
1133 }
1134
1135 static void regs_dump__printf(u64 mask, u64 *regs)
1136 {
1137         unsigned rid, i = 0;
1138
1139         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1140                 u64 val = regs[i++];
1141
1142                 printf(".... %-5s 0x%016" PRIx64 "\n",
1143                        perf_reg_name(rid), val);
1144         }
1145 }
1146
1147 static const char *regs_abi[] = {
1148         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1149         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1150         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1151 };
1152
1153 static inline const char *regs_dump_abi(struct regs_dump *d)
1154 {
1155         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1156                 return "unknown";
1157
1158         return regs_abi[d->abi];
1159 }
1160
1161 static void regs__printf(const char *type, struct regs_dump *regs)
1162 {
1163         u64 mask = regs->mask;
1164
1165         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1166                type,
1167                mask,
1168                regs_dump_abi(regs));
1169
1170         regs_dump__printf(mask, regs->regs);
1171 }
1172
1173 static void regs_user__printf(struct perf_sample *sample)
1174 {
1175         struct regs_dump *user_regs = &sample->user_regs;
1176
1177         if (user_regs->regs)
1178                 regs__printf("user", user_regs);
1179 }
1180
1181 static void regs_intr__printf(struct perf_sample *sample)
1182 {
1183         struct regs_dump *intr_regs = &sample->intr_regs;
1184
1185         if (intr_regs->regs)
1186                 regs__printf("intr", intr_regs);
1187 }
1188
1189 static void stack_user__printf(struct stack_dump *dump)
1190 {
1191         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1192                dump->size, dump->offset);
1193 }
1194
1195 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1196 {
1197         u64 sample_type = __evlist__combined_sample_type(evlist);
1198
1199         if (event->header.type != PERF_RECORD_SAMPLE &&
1200             !evlist__sample_id_all(evlist)) {
1201                 fputs("-1 -1 ", stdout);
1202                 return;
1203         }
1204
1205         if ((sample_type & PERF_SAMPLE_CPU))
1206                 printf("%u ", sample->cpu);
1207
1208         if (sample_type & PERF_SAMPLE_TIME)
1209                 printf("%" PRIu64 " ", sample->time);
1210 }
1211
1212 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1213 {
1214         printf("... sample_read:\n");
1215
1216         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1217                 printf("...... time enabled %016" PRIx64 "\n",
1218                        sample->read.time_enabled);
1219
1220         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1221                 printf("...... time running %016" PRIx64 "\n",
1222                        sample->read.time_running);
1223
1224         if (read_format & PERF_FORMAT_GROUP) {
1225                 u64 i;
1226
1227                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1228
1229                 for (i = 0; i < sample->read.group.nr; i++) {
1230                         struct sample_read_value *value;
1231
1232                         value = &sample->read.group.values[i];
1233                         printf("..... id %016" PRIx64
1234                                ", value %016" PRIx64 "\n",
1235                                value->id, value->value);
1236                 }
1237         } else
1238                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1239                         sample->read.one.id, sample->read.one.value);
1240 }
1241
1242 static void dump_event(struct evlist *evlist, union perf_event *event,
1243                        u64 file_offset, struct perf_sample *sample)
1244 {
1245         if (!dump_trace)
1246                 return;
1247
1248         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1249                file_offset, event->header.size, event->header.type);
1250
1251         trace_event(event);
1252         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1253                 evlist->trace_event_sample_raw(evlist, event, sample);
1254
1255         if (sample)
1256                 evlist__print_tstamp(evlist, event, sample);
1257
1258         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1259                event->header.size, perf_event__name(event->header.type));
1260 }
1261
1262 char *get_page_size_name(u64 size, char *str)
1263 {
1264         if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1265                 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1266
1267         return str;
1268 }
1269
1270 static void dump_sample(struct evsel *evsel, union perf_event *event,
1271                         struct perf_sample *sample)
1272 {
1273         u64 sample_type;
1274         char str[PAGE_SIZE_NAME_LEN];
1275
1276         if (!dump_trace)
1277                 return;
1278
1279         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1280                event->header.misc, sample->pid, sample->tid, sample->ip,
1281                sample->period, sample->addr);
1282
1283         sample_type = evsel->core.attr.sample_type;
1284
1285         if (evsel__has_callchain(evsel))
1286                 callchain__printf(evsel, sample);
1287
1288         if (evsel__has_br_stack(evsel))
1289                 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1290
1291         if (sample_type & PERF_SAMPLE_REGS_USER)
1292                 regs_user__printf(sample);
1293
1294         if (sample_type & PERF_SAMPLE_REGS_INTR)
1295                 regs_intr__printf(sample);
1296
1297         if (sample_type & PERF_SAMPLE_STACK_USER)
1298                 stack_user__printf(&sample->user_stack);
1299
1300         if (sample_type & PERF_SAMPLE_WEIGHT)
1301                 printf("... weight: %" PRIu64 "\n", sample->weight);
1302
1303         if (sample_type & PERF_SAMPLE_DATA_SRC)
1304                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1305
1306         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1307                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1308
1309         if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1310                 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1311
1312         if (sample_type & PERF_SAMPLE_TRANSACTION)
1313                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1314
1315         if (sample_type & PERF_SAMPLE_READ)
1316                 sample_read__printf(sample, evsel->core.attr.read_format);
1317 }
1318
1319 static void dump_read(struct evsel *evsel, union perf_event *event)
1320 {
1321         struct perf_record_read *read_event = &event->read;
1322         u64 read_format;
1323
1324         if (!dump_trace)
1325                 return;
1326
1327         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1328                evsel__name(evsel), event->read.value);
1329
1330         if (!evsel)
1331                 return;
1332
1333         read_format = evsel->core.attr.read_format;
1334
1335         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1336                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1337
1338         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1339                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1340
1341         if (read_format & PERF_FORMAT_ID)
1342                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1343 }
1344
1345 static struct machine *machines__find_for_cpumode(struct machines *machines,
1346                                                union perf_event *event,
1347                                                struct perf_sample *sample)
1348 {
1349         struct machine *machine;
1350
1351         if (perf_guest &&
1352             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1353              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1354                 u32 pid;
1355
1356                 if (event->header.type == PERF_RECORD_MMAP
1357                     || event->header.type == PERF_RECORD_MMAP2)
1358                         pid = event->mmap.pid;
1359                 else
1360                         pid = sample->pid;
1361
1362                 machine = machines__find(machines, pid);
1363                 if (!machine)
1364                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1365                 return machine;
1366         }
1367
1368         return &machines->host;
1369 }
1370
1371 static int deliver_sample_value(struct evlist *evlist,
1372                                 struct perf_tool *tool,
1373                                 union perf_event *event,
1374                                 struct perf_sample *sample,
1375                                 struct sample_read_value *v,
1376                                 struct machine *machine)
1377 {
1378         struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1379         struct evsel *evsel;
1380
1381         if (sid) {
1382                 sample->id     = v->id;
1383                 sample->period = v->value - sid->period;
1384                 sid->period    = v->value;
1385         }
1386
1387         if (!sid || sid->evsel == NULL) {
1388                 ++evlist->stats.nr_unknown_id;
1389                 return 0;
1390         }
1391
1392         /*
1393          * There's no reason to deliver sample
1394          * for zero period, bail out.
1395          */
1396         if (!sample->period)
1397                 return 0;
1398
1399         evsel = container_of(sid->evsel, struct evsel, core);
1400         return tool->sample(tool, event, sample, evsel, machine);
1401 }
1402
1403 static int deliver_sample_group(struct evlist *evlist,
1404                                 struct perf_tool *tool,
1405                                 union  perf_event *event,
1406                                 struct perf_sample *sample,
1407                                 struct machine *machine)
1408 {
1409         int ret = -EINVAL;
1410         u64 i;
1411
1412         for (i = 0; i < sample->read.group.nr; i++) {
1413                 ret = deliver_sample_value(evlist, tool, event, sample,
1414                                            &sample->read.group.values[i],
1415                                            machine);
1416                 if (ret)
1417                         break;
1418         }
1419
1420         return ret;
1421 }
1422
1423 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1424                                   union  perf_event *event, struct perf_sample *sample,
1425                                   struct evsel *evsel, struct machine *machine)
1426 {
1427         /* We know evsel != NULL. */
1428         u64 sample_type = evsel->core.attr.sample_type;
1429         u64 read_format = evsel->core.attr.read_format;
1430
1431         /* Standard sample delivery. */
1432         if (!(sample_type & PERF_SAMPLE_READ))
1433                 return tool->sample(tool, event, sample, evsel, machine);
1434
1435         /* For PERF_SAMPLE_READ we have either single or group mode. */
1436         if (read_format & PERF_FORMAT_GROUP)
1437                 return deliver_sample_group(evlist, tool, event, sample,
1438                                             machine);
1439         else
1440                 return deliver_sample_value(evlist, tool, event, sample,
1441                                             &sample->read.one, machine);
1442 }
1443
1444 static int machines__deliver_event(struct machines *machines,
1445                                    struct evlist *evlist,
1446                                    union perf_event *event,
1447                                    struct perf_sample *sample,
1448                                    struct perf_tool *tool, u64 file_offset)
1449 {
1450         struct evsel *evsel;
1451         struct machine *machine;
1452
1453         dump_event(evlist, event, file_offset, sample);
1454
1455         evsel = evlist__id2evsel(evlist, sample->id);
1456
1457         machine = machines__find_for_cpumode(machines, event, sample);
1458
1459         switch (event->header.type) {
1460         case PERF_RECORD_SAMPLE:
1461                 if (evsel == NULL) {
1462                         ++evlist->stats.nr_unknown_id;
1463                         return 0;
1464                 }
1465                 dump_sample(evsel, event, sample);
1466                 if (machine == NULL) {
1467                         ++evlist->stats.nr_unprocessable_samples;
1468                         return 0;
1469                 }
1470                 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1471         case PERF_RECORD_MMAP:
1472                 return tool->mmap(tool, event, sample, machine);
1473         case PERF_RECORD_MMAP2:
1474                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1475                         ++evlist->stats.nr_proc_map_timeout;
1476                 return tool->mmap2(tool, event, sample, machine);
1477         case PERF_RECORD_COMM:
1478                 return tool->comm(tool, event, sample, machine);
1479         case PERF_RECORD_NAMESPACES:
1480                 return tool->namespaces(tool, event, sample, machine);
1481         case PERF_RECORD_CGROUP:
1482                 return tool->cgroup(tool, event, sample, machine);
1483         case PERF_RECORD_FORK:
1484                 return tool->fork(tool, event, sample, machine);
1485         case PERF_RECORD_EXIT:
1486                 return tool->exit(tool, event, sample, machine);
1487         case PERF_RECORD_LOST:
1488                 if (tool->lost == perf_event__process_lost)
1489                         evlist->stats.total_lost += event->lost.lost;
1490                 return tool->lost(tool, event, sample, machine);
1491         case PERF_RECORD_LOST_SAMPLES:
1492                 if (tool->lost_samples == perf_event__process_lost_samples)
1493                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1494                 return tool->lost_samples(tool, event, sample, machine);
1495         case PERF_RECORD_READ:
1496                 dump_read(evsel, event);
1497                 return tool->read(tool, event, sample, evsel, machine);
1498         case PERF_RECORD_THROTTLE:
1499                 return tool->throttle(tool, event, sample, machine);
1500         case PERF_RECORD_UNTHROTTLE:
1501                 return tool->unthrottle(tool, event, sample, machine);
1502         case PERF_RECORD_AUX:
1503                 if (tool->aux == perf_event__process_aux) {
1504                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1505                                 evlist->stats.total_aux_lost += 1;
1506                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1507                                 evlist->stats.total_aux_partial += 1;
1508                 }
1509                 return tool->aux(tool, event, sample, machine);
1510         case PERF_RECORD_ITRACE_START:
1511                 return tool->itrace_start(tool, event, sample, machine);
1512         case PERF_RECORD_SWITCH:
1513         case PERF_RECORD_SWITCH_CPU_WIDE:
1514                 return tool->context_switch(tool, event, sample, machine);
1515         case PERF_RECORD_KSYMBOL:
1516                 return tool->ksymbol(tool, event, sample, machine);
1517         case PERF_RECORD_BPF_EVENT:
1518                 return tool->bpf(tool, event, sample, machine);
1519         case PERF_RECORD_TEXT_POKE:
1520                 return tool->text_poke(tool, event, sample, machine);
1521         default:
1522                 ++evlist->stats.nr_unknown_events;
1523                 return -1;
1524         }
1525 }
1526
1527 static int perf_session__deliver_event(struct perf_session *session,
1528                                        union perf_event *event,
1529                                        struct perf_tool *tool,
1530                                        u64 file_offset)
1531 {
1532         struct perf_sample sample;
1533         int ret = evlist__parse_sample(session->evlist, event, &sample);
1534
1535         if (ret) {
1536                 pr_err("Can't parse sample, err = %d\n", ret);
1537                 return ret;
1538         }
1539
1540         ret = auxtrace__process_event(session, event, &sample, tool);
1541         if (ret < 0)
1542                 return ret;
1543         if (ret > 0)
1544                 return 0;
1545
1546         ret = machines__deliver_event(&session->machines, session->evlist,
1547                                       event, &sample, tool, file_offset);
1548
1549         if (dump_trace && sample.aux_sample.size)
1550                 auxtrace__dump_auxtrace_sample(session, &sample);
1551
1552         return ret;
1553 }
1554
1555 static s64 perf_session__process_user_event(struct perf_session *session,
1556                                             union perf_event *event,
1557                                             u64 file_offset)
1558 {
1559         struct ordered_events *oe = &session->ordered_events;
1560         struct perf_tool *tool = session->tool;
1561         struct perf_sample sample = { .time = 0, };
1562         int fd = perf_data__fd(session->data);
1563         int err;
1564
1565         if (event->header.type != PERF_RECORD_COMPRESSED ||
1566             tool->compressed == perf_session__process_compressed_event_stub)
1567                 dump_event(session->evlist, event, file_offset, &sample);
1568
1569         /* These events are processed right away */
1570         switch (event->header.type) {
1571         case PERF_RECORD_HEADER_ATTR:
1572                 err = tool->attr(tool, event, &session->evlist);
1573                 if (err == 0) {
1574                         perf_session__set_id_hdr_size(session);
1575                         perf_session__set_comm_exec(session);
1576                 }
1577                 return err;
1578         case PERF_RECORD_EVENT_UPDATE:
1579                 return tool->event_update(tool, event, &session->evlist);
1580         case PERF_RECORD_HEADER_EVENT_TYPE:
1581                 /*
1582                  * Depreceated, but we need to handle it for sake
1583                  * of old data files create in pipe mode.
1584                  */
1585                 return 0;
1586         case PERF_RECORD_HEADER_TRACING_DATA:
1587                 /*
1588                  * Setup for reading amidst mmap, but only when we
1589                  * are in 'file' mode. The 'pipe' fd is in proper
1590                  * place already.
1591                  */
1592                 if (!perf_data__is_pipe(session->data))
1593                         lseek(fd, file_offset, SEEK_SET);
1594                 return tool->tracing_data(session, event);
1595         case PERF_RECORD_HEADER_BUILD_ID:
1596                 return tool->build_id(session, event);
1597         case PERF_RECORD_FINISHED_ROUND:
1598                 return tool->finished_round(tool, event, oe);
1599         case PERF_RECORD_ID_INDEX:
1600                 return tool->id_index(session, event);
1601         case PERF_RECORD_AUXTRACE_INFO:
1602                 return tool->auxtrace_info(session, event);
1603         case PERF_RECORD_AUXTRACE:
1604                 /* setup for reading amidst mmap */
1605                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1606                 return tool->auxtrace(session, event);
1607         case PERF_RECORD_AUXTRACE_ERROR:
1608                 perf_session__auxtrace_error_inc(session, event);
1609                 return tool->auxtrace_error(session, event);
1610         case PERF_RECORD_THREAD_MAP:
1611                 return tool->thread_map(session, event);
1612         case PERF_RECORD_CPU_MAP:
1613                 return tool->cpu_map(session, event);
1614         case PERF_RECORD_STAT_CONFIG:
1615                 return tool->stat_config(session, event);
1616         case PERF_RECORD_STAT:
1617                 return tool->stat(session, event);
1618         case PERF_RECORD_STAT_ROUND:
1619                 return tool->stat_round(session, event);
1620         case PERF_RECORD_TIME_CONV:
1621                 session->time_conv = event->time_conv;
1622                 return tool->time_conv(session, event);
1623         case PERF_RECORD_HEADER_FEATURE:
1624                 return tool->feature(session, event);
1625         case PERF_RECORD_COMPRESSED:
1626                 err = tool->compressed(session, event, file_offset);
1627                 if (err)
1628                         dump_event(session->evlist, event, file_offset, &sample);
1629                 return err;
1630         default:
1631                 return -EINVAL;
1632         }
1633 }
1634
1635 int perf_session__deliver_synth_event(struct perf_session *session,
1636                                       union perf_event *event,
1637                                       struct perf_sample *sample)
1638 {
1639         struct evlist *evlist = session->evlist;
1640         struct perf_tool *tool = session->tool;
1641
1642         events_stats__inc(&evlist->stats, event->header.type);
1643
1644         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1645                 return perf_session__process_user_event(session, event, 0);
1646
1647         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1648 }
1649
1650 static void event_swap(union perf_event *event, bool sample_id_all)
1651 {
1652         perf_event__swap_op swap;
1653
1654         swap = perf_event__swap_ops[event->header.type];
1655         if (swap)
1656                 swap(event, sample_id_all);
1657 }
1658
1659 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1660                              void *buf, size_t buf_sz,
1661                              union perf_event **event_ptr,
1662                              struct perf_sample *sample)
1663 {
1664         union perf_event *event;
1665         size_t hdr_sz, rest;
1666         int fd;
1667
1668         if (session->one_mmap && !session->header.needs_swap) {
1669                 event = file_offset - session->one_mmap_offset +
1670                         session->one_mmap_addr;
1671                 goto out_parse_sample;
1672         }
1673
1674         if (perf_data__is_pipe(session->data))
1675                 return -1;
1676
1677         fd = perf_data__fd(session->data);
1678         hdr_sz = sizeof(struct perf_event_header);
1679
1680         if (buf_sz < hdr_sz)
1681                 return -1;
1682
1683         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1684             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1685                 return -1;
1686
1687         event = (union perf_event *)buf;
1688
1689         if (session->header.needs_swap)
1690                 perf_event_header__bswap(&event->header);
1691
1692         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1693                 return -1;
1694
1695         rest = event->header.size - hdr_sz;
1696
1697         if (readn(fd, buf, rest) != (ssize_t)rest)
1698                 return -1;
1699
1700         if (session->header.needs_swap)
1701                 event_swap(event, evlist__sample_id_all(session->evlist));
1702
1703 out_parse_sample:
1704
1705         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1706             evlist__parse_sample(session->evlist, event, sample))
1707                 return -1;
1708
1709         *event_ptr = event;
1710
1711         return 0;
1712 }
1713
1714 int perf_session__peek_events(struct perf_session *session, u64 offset,
1715                               u64 size, peek_events_cb_t cb, void *data)
1716 {
1717         u64 max_offset = offset + size;
1718         char buf[PERF_SAMPLE_MAX_SIZE];
1719         union perf_event *event;
1720         int err;
1721
1722         do {
1723                 err = perf_session__peek_event(session, offset, buf,
1724                                                PERF_SAMPLE_MAX_SIZE, &event,
1725                                                NULL);
1726                 if (err)
1727                         return err;
1728
1729                 err = cb(session, event, offset, data);
1730                 if (err)
1731                         return err;
1732
1733                 offset += event->header.size;
1734                 if (event->header.type == PERF_RECORD_AUXTRACE)
1735                         offset += event->auxtrace.size;
1736
1737         } while (offset < max_offset);
1738
1739         return err;
1740 }
1741
1742 static s64 perf_session__process_event(struct perf_session *session,
1743                                        union perf_event *event, u64 file_offset)
1744 {
1745         struct evlist *evlist = session->evlist;
1746         struct perf_tool *tool = session->tool;
1747         int ret;
1748
1749         if (session->header.needs_swap)
1750                 event_swap(event, evlist__sample_id_all(evlist));
1751
1752         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1753                 return -EINVAL;
1754
1755         events_stats__inc(&evlist->stats, event->header.type);
1756
1757         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1758                 return perf_session__process_user_event(session, event, file_offset);
1759
1760         if (tool->ordered_events) {
1761                 u64 timestamp = -1ULL;
1762
1763                 ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1764                 if (ret && ret != -1)
1765                         return ret;
1766
1767                 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1768                 if (ret != -ETIME)
1769                         return ret;
1770         }
1771
1772         return perf_session__deliver_event(session, event, tool, file_offset);
1773 }
1774
1775 void perf_event_header__bswap(struct perf_event_header *hdr)
1776 {
1777         hdr->type = bswap_32(hdr->type);
1778         hdr->misc = bswap_16(hdr->misc);
1779         hdr->size = bswap_16(hdr->size);
1780 }
1781
1782 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1783 {
1784         return machine__findnew_thread(&session->machines.host, -1, pid);
1785 }
1786
1787 /*
1788  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1789  * So here a single thread is created for that, but actually there is a separate
1790  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1791  * is only 1. That causes problems for some tools, requiring workarounds. For
1792  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1793  */
1794 int perf_session__register_idle_thread(struct perf_session *session)
1795 {
1796         struct thread *thread;
1797         int err = 0;
1798
1799         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1800         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1801                 pr_err("problem inserting idle task.\n");
1802                 err = -1;
1803         }
1804
1805         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1806                 pr_err("problem inserting idle task.\n");
1807                 err = -1;
1808         }
1809
1810         /* machine__findnew_thread() got the thread, so put it */
1811         thread__put(thread);
1812         return err;
1813 }
1814
1815 static void
1816 perf_session__warn_order(const struct perf_session *session)
1817 {
1818         const struct ordered_events *oe = &session->ordered_events;
1819         struct evsel *evsel;
1820         bool should_warn = true;
1821
1822         evlist__for_each_entry(session->evlist, evsel) {
1823                 if (evsel->core.attr.write_backward)
1824                         should_warn = false;
1825         }
1826
1827         if (!should_warn)
1828                 return;
1829         if (oe->nr_unordered_events != 0)
1830                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1831 }
1832
1833 static void perf_session__warn_about_errors(const struct perf_session *session)
1834 {
1835         const struct events_stats *stats = &session->evlist->stats;
1836
1837         if (session->tool->lost == perf_event__process_lost &&
1838             stats->nr_events[PERF_RECORD_LOST] != 0) {
1839                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1840                             "Check IO/CPU overload!\n\n",
1841                             stats->nr_events[0],
1842                             stats->nr_events[PERF_RECORD_LOST]);
1843         }
1844
1845         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1846                 double drop_rate;
1847
1848                 drop_rate = (double)stats->total_lost_samples /
1849                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1850                 if (drop_rate > 0.05) {
1851                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1852                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1853                                     drop_rate * 100.0);
1854                 }
1855         }
1856
1857         if (session->tool->aux == perf_event__process_aux &&
1858             stats->total_aux_lost != 0) {
1859                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1860                             stats->total_aux_lost,
1861                             stats->nr_events[PERF_RECORD_AUX]);
1862         }
1863
1864         if (session->tool->aux == perf_event__process_aux &&
1865             stats->total_aux_partial != 0) {
1866                 bool vmm_exclusive = false;
1867
1868                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1869                                        &vmm_exclusive);
1870
1871                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1872                             "Are you running a KVM guest in the background?%s\n\n",
1873                             stats->total_aux_partial,
1874                             stats->nr_events[PERF_RECORD_AUX],
1875                             vmm_exclusive ?
1876                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1877                             "will reduce the gaps to only guest's timeslices." :
1878                             "");
1879         }
1880
1881         if (stats->nr_unknown_events != 0) {
1882                 ui__warning("Found %u unknown events!\n\n"
1883                             "Is this an older tool processing a perf.data "
1884                             "file generated by a more recent tool?\n\n"
1885                             "If that is not the case, consider "
1886                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1887                             stats->nr_unknown_events);
1888         }
1889
1890         if (stats->nr_unknown_id != 0) {
1891                 ui__warning("%u samples with id not present in the header\n",
1892                             stats->nr_unknown_id);
1893         }
1894
1895         if (stats->nr_invalid_chains != 0) {
1896                 ui__warning("Found invalid callchains!\n\n"
1897                             "%u out of %u events were discarded for this reason.\n\n"
1898                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1899                             stats->nr_invalid_chains,
1900                             stats->nr_events[PERF_RECORD_SAMPLE]);
1901         }
1902
1903         if (stats->nr_unprocessable_samples != 0) {
1904                 ui__warning("%u unprocessable samples recorded.\n"
1905                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1906                             stats->nr_unprocessable_samples);
1907         }
1908
1909         perf_session__warn_order(session);
1910
1911         events_stats__auxtrace_error_warn(stats);
1912
1913         if (stats->nr_proc_map_timeout != 0) {
1914                 ui__warning("%d map information files for pre-existing threads were\n"
1915                             "not processed, if there are samples for addresses they\n"
1916                             "will not be resolved, you may find out which are these\n"
1917                             "threads by running with -v and redirecting the output\n"
1918                             "to a file.\n"
1919                             "The time limit to process proc map is too short?\n"
1920                             "Increase it by --proc-map-timeout\n",
1921                             stats->nr_proc_map_timeout);
1922         }
1923 }
1924
1925 static int perf_session__flush_thread_stack(struct thread *thread,
1926                                             void *p __maybe_unused)
1927 {
1928         return thread_stack__flush(thread);
1929 }
1930
1931 static int perf_session__flush_thread_stacks(struct perf_session *session)
1932 {
1933         return machines__for_each_thread(&session->machines,
1934                                          perf_session__flush_thread_stack,
1935                                          NULL);
1936 }
1937
1938 volatile int session_done;
1939
1940 static int __perf_session__process_decomp_events(struct perf_session *session);
1941
1942 static int __perf_session__process_pipe_events(struct perf_session *session)
1943 {
1944         struct ordered_events *oe = &session->ordered_events;
1945         struct perf_tool *tool = session->tool;
1946         union perf_event *event;
1947         uint32_t size, cur_size = 0;
1948         void *buf = NULL;
1949         s64 skip = 0;
1950         u64 head;
1951         ssize_t err;
1952         void *p;
1953
1954         perf_tool__fill_defaults(tool);
1955
1956         head = 0;
1957         cur_size = sizeof(union perf_event);
1958
1959         buf = malloc(cur_size);
1960         if (!buf)
1961                 return -errno;
1962         ordered_events__set_copy_on_queue(oe, true);
1963 more:
1964         event = buf;
1965         err = perf_data__read(session->data, event,
1966                               sizeof(struct perf_event_header));
1967         if (err <= 0) {
1968                 if (err == 0)
1969                         goto done;
1970
1971                 pr_err("failed to read event header\n");
1972                 goto out_err;
1973         }
1974
1975         if (session->header.needs_swap)
1976                 perf_event_header__bswap(&event->header);
1977
1978         size = event->header.size;
1979         if (size < sizeof(struct perf_event_header)) {
1980                 pr_err("bad event header size\n");
1981                 goto out_err;
1982         }
1983
1984         if (size > cur_size) {
1985                 void *new = realloc(buf, size);
1986                 if (!new) {
1987                         pr_err("failed to allocate memory to read event\n");
1988                         goto out_err;
1989                 }
1990                 buf = new;
1991                 cur_size = size;
1992                 event = buf;
1993         }
1994         p = event;
1995         p += sizeof(struct perf_event_header);
1996
1997         if (size - sizeof(struct perf_event_header)) {
1998                 err = perf_data__read(session->data, p,
1999                                       size - sizeof(struct perf_event_header));
2000                 if (err <= 0) {
2001                         if (err == 0) {
2002                                 pr_err("unexpected end of event stream\n");
2003                                 goto done;
2004                         }
2005
2006                         pr_err("failed to read event data\n");
2007                         goto out_err;
2008                 }
2009         }
2010
2011         if ((skip = perf_session__process_event(session, event, head)) < 0) {
2012                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2013                        head, event->header.size, event->header.type);
2014                 err = -EINVAL;
2015                 goto out_err;
2016         }
2017
2018         head += size;
2019
2020         if (skip > 0)
2021                 head += skip;
2022
2023         err = __perf_session__process_decomp_events(session);
2024         if (err)
2025                 goto out_err;
2026
2027         if (!session_done())
2028                 goto more;
2029 done:
2030         /* do the final flush for ordered samples */
2031         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2032         if (err)
2033                 goto out_err;
2034         err = auxtrace__flush_events(session, tool);
2035         if (err)
2036                 goto out_err;
2037         err = perf_session__flush_thread_stacks(session);
2038 out_err:
2039         free(buf);
2040         if (!tool->no_warn)
2041                 perf_session__warn_about_errors(session);
2042         ordered_events__free(&session->ordered_events);
2043         auxtrace__free_events(session);
2044         return err;
2045 }
2046
2047 static union perf_event *
2048 prefetch_event(char *buf, u64 head, size_t mmap_size,
2049                bool needs_swap, union perf_event *error)
2050 {
2051         union perf_event *event;
2052
2053         /*
2054          * Ensure we have enough space remaining to read
2055          * the size of the event in the headers.
2056          */
2057         if (head + sizeof(event->header) > mmap_size)
2058                 return NULL;
2059
2060         event = (union perf_event *)(buf + head);
2061         if (needs_swap)
2062                 perf_event_header__bswap(&event->header);
2063
2064         if (head + event->header.size <= mmap_size)
2065                 return event;
2066
2067         /* We're not fetching the event so swap back again */
2068         if (needs_swap)
2069                 perf_event_header__bswap(&event->header);
2070
2071         pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2072                  " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2073
2074         return error;
2075 }
2076
2077 static union perf_event *
2078 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2079 {
2080         return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2081 }
2082
2083 static union perf_event *
2084 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2085 {
2086         return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2087 }
2088
2089 static int __perf_session__process_decomp_events(struct perf_session *session)
2090 {
2091         s64 skip;
2092         u64 size, file_pos = 0;
2093         struct decomp *decomp = session->decomp_last;
2094
2095         if (!decomp)
2096                 return 0;
2097
2098         while (decomp->head < decomp->size && !session_done()) {
2099                 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2100                                                              session->header.needs_swap);
2101
2102                 if (!event)
2103                         break;
2104
2105                 size = event->header.size;
2106
2107                 if (size < sizeof(struct perf_event_header) ||
2108                     (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2109                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2110                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2111                         return -EINVAL;
2112                 }
2113
2114                 if (skip)
2115                         size += skip;
2116
2117                 decomp->head += size;
2118         }
2119
2120         return 0;
2121 }
2122
2123 /*
2124  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2125  * slices. On 32bit we use 32MB.
2126  */
2127 #if BITS_PER_LONG == 64
2128 #define MMAP_SIZE ULLONG_MAX
2129 #define NUM_MMAPS 1
2130 #else
2131 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2132 #define NUM_MMAPS 128
2133 #endif
2134
2135 struct reader;
2136
2137 typedef s64 (*reader_cb_t)(struct perf_session *session,
2138                            union perf_event *event,
2139                            u64 file_offset);
2140
2141 struct reader {
2142         int              fd;
2143         u64              data_size;
2144         u64              data_offset;
2145         reader_cb_t      process;
2146 };
2147
2148 static int
2149 reader__process_events(struct reader *rd, struct perf_session *session,
2150                        struct ui_progress *prog)
2151 {
2152         u64 data_size = rd->data_size;
2153         u64 head, page_offset, file_offset, file_pos, size;
2154         int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2155         size_t  mmap_size;
2156         char *buf, *mmaps[NUM_MMAPS];
2157         union perf_event *event;
2158         s64 skip;
2159
2160         page_offset = page_size * (rd->data_offset / page_size);
2161         file_offset = page_offset;
2162         head = rd->data_offset - page_offset;
2163
2164         ui_progress__init_size(prog, data_size, "Processing events...");
2165
2166         data_size += rd->data_offset;
2167
2168         mmap_size = MMAP_SIZE;
2169         if (mmap_size > data_size) {
2170                 mmap_size = data_size;
2171                 session->one_mmap = true;
2172         }
2173
2174         memset(mmaps, 0, sizeof(mmaps));
2175
2176         mmap_prot  = PROT_READ;
2177         mmap_flags = MAP_SHARED;
2178
2179         if (session->header.needs_swap) {
2180                 mmap_prot  |= PROT_WRITE;
2181                 mmap_flags = MAP_PRIVATE;
2182         }
2183 remap:
2184         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2185                    file_offset);
2186         if (buf == MAP_FAILED) {
2187                 pr_err("failed to mmap file\n");
2188                 err = -errno;
2189                 goto out;
2190         }
2191         mmaps[map_idx] = buf;
2192         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2193         file_pos = file_offset + head;
2194         if (session->one_mmap) {
2195                 session->one_mmap_addr = buf;
2196                 session->one_mmap_offset = file_offset;
2197         }
2198
2199 more:
2200         event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2201         if (IS_ERR(event))
2202                 return PTR_ERR(event);
2203
2204         if (!event) {
2205                 if (mmaps[map_idx]) {
2206                         munmap(mmaps[map_idx], mmap_size);
2207                         mmaps[map_idx] = NULL;
2208                 }
2209
2210                 page_offset = page_size * (head / page_size);
2211                 file_offset += page_offset;
2212                 head -= page_offset;
2213                 goto remap;
2214         }
2215
2216         size = event->header.size;
2217
2218         skip = -EINVAL;
2219
2220         if (size < sizeof(struct perf_event_header) ||
2221             (skip = rd->process(session, event, file_pos)) < 0) {
2222                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2223                        file_offset + head, event->header.size,
2224                        event->header.type, strerror(-skip));
2225                 err = skip;
2226                 goto out;
2227         }
2228
2229         if (skip)
2230                 size += skip;
2231
2232         head += size;
2233         file_pos += size;
2234
2235         err = __perf_session__process_decomp_events(session);
2236         if (err)
2237                 goto out;
2238
2239         ui_progress__update(prog, size);
2240
2241         if (session_done())
2242                 goto out;
2243
2244         if (file_pos < data_size)
2245                 goto more;
2246
2247 out:
2248         return err;
2249 }
2250
2251 static s64 process_simple(struct perf_session *session,
2252                           union perf_event *event,
2253                           u64 file_offset)
2254 {
2255         return perf_session__process_event(session, event, file_offset);
2256 }
2257
2258 static int __perf_session__process_events(struct perf_session *session)
2259 {
2260         struct reader rd = {
2261                 .fd             = perf_data__fd(session->data),
2262                 .data_size      = session->header.data_size,
2263                 .data_offset    = session->header.data_offset,
2264                 .process        = process_simple,
2265         };
2266         struct ordered_events *oe = &session->ordered_events;
2267         struct perf_tool *tool = session->tool;
2268         struct ui_progress prog;
2269         int err;
2270
2271         perf_tool__fill_defaults(tool);
2272
2273         if (rd.data_size == 0)
2274                 return -1;
2275
2276         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2277
2278         err = reader__process_events(&rd, session, &prog);
2279         if (err)
2280                 goto out_err;
2281         /* do the final flush for ordered samples */
2282         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2283         if (err)
2284                 goto out_err;
2285         err = auxtrace__flush_events(session, tool);
2286         if (err)
2287                 goto out_err;
2288         err = perf_session__flush_thread_stacks(session);
2289 out_err:
2290         ui_progress__finish();
2291         if (!tool->no_warn)
2292                 perf_session__warn_about_errors(session);
2293         /*
2294          * We may switching perf.data output, make ordered_events
2295          * reusable.
2296          */
2297         ordered_events__reinit(&session->ordered_events);
2298         auxtrace__free_events(session);
2299         session->one_mmap = false;
2300         return err;
2301 }
2302
2303 int perf_session__process_events(struct perf_session *session)
2304 {
2305         if (perf_session__register_idle_thread(session) < 0)
2306                 return -ENOMEM;
2307
2308         if (perf_data__is_pipe(session->data))
2309                 return __perf_session__process_pipe_events(session);
2310
2311         return __perf_session__process_events(session);
2312 }
2313
2314 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2315 {
2316         struct evsel *evsel;
2317
2318         evlist__for_each_entry(session->evlist, evsel) {
2319                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2320                         return true;
2321         }
2322
2323         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2324         return false;
2325 }
2326
2327 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2328 {
2329         char *bracket;
2330         struct ref_reloc_sym *ref;
2331         struct kmap *kmap;
2332
2333         ref = zalloc(sizeof(struct ref_reloc_sym));
2334         if (ref == NULL)
2335                 return -ENOMEM;
2336
2337         ref->name = strdup(symbol_name);
2338         if (ref->name == NULL) {
2339                 free(ref);
2340                 return -ENOMEM;
2341         }
2342
2343         bracket = strchr(ref->name, ']');
2344         if (bracket)
2345                 *bracket = '\0';
2346
2347         ref->addr = addr;
2348
2349         kmap = map__kmap(map);
2350         if (kmap)
2351                 kmap->ref_reloc_sym = ref;
2352
2353         return 0;
2354 }
2355
2356 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2357 {
2358         return machines__fprintf_dsos(&session->machines, fp);
2359 }
2360
2361 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2362                                           bool (skip)(struct dso *dso, int parm), int parm)
2363 {
2364         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2365 }
2366
2367 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2368 {
2369         size_t ret;
2370         const char *msg = "";
2371
2372         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2373                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2374
2375         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2376
2377         ret += events_stats__fprintf(&session->evlist->stats, fp);
2378         return ret;
2379 }
2380
2381 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2382 {
2383         /*
2384          * FIXME: Here we have to actually print all the machines in this
2385          * session, not just the host...
2386          */
2387         return machine__fprintf(&session->machines.host, fp);
2388 }
2389
2390 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2391                                               unsigned int type)
2392 {
2393         struct evsel *pos;
2394
2395         evlist__for_each_entry(session->evlist, pos) {
2396                 if (pos->core.attr.type == type)
2397                         return pos;
2398         }
2399         return NULL;
2400 }
2401
2402 int perf_session__cpu_bitmap(struct perf_session *session,
2403                              const char *cpu_list, unsigned long *cpu_bitmap)
2404 {
2405         int i, err = -1;
2406         struct perf_cpu_map *map;
2407         int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2408
2409         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2410                 struct evsel *evsel;
2411
2412                 evsel = perf_session__find_first_evtype(session, i);
2413                 if (!evsel)
2414                         continue;
2415
2416                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2417                         pr_err("File does not contain CPU events. "
2418                                "Remove -C option to proceed.\n");
2419                         return -1;
2420                 }
2421         }
2422
2423         map = perf_cpu_map__new(cpu_list);
2424         if (map == NULL) {
2425                 pr_err("Invalid cpu_list\n");
2426                 return -1;
2427         }
2428
2429         for (i = 0; i < map->nr; i++) {
2430                 int cpu = map->map[i];
2431
2432                 if (cpu >= nr_cpus) {
2433                         pr_err("Requested CPU %d too large. "
2434                                "Consider raising MAX_NR_CPUS\n", cpu);
2435                         goto out_delete_map;
2436                 }
2437
2438                 set_bit(cpu, cpu_bitmap);
2439         }
2440
2441         err = 0;
2442
2443 out_delete_map:
2444         perf_cpu_map__put(map);
2445         return err;
2446 }
2447
2448 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2449                                 bool full)
2450 {
2451         if (session == NULL || fp == NULL)
2452                 return;
2453
2454         fprintf(fp, "# ========\n");
2455         perf_header__fprintf_info(session, fp, full);
2456         fprintf(fp, "# ========\n#\n");
2457 }
2458
2459 int perf_event__process_id_index(struct perf_session *session,
2460                                  union perf_event *event)
2461 {
2462         struct evlist *evlist = session->evlist;
2463         struct perf_record_id_index *ie = &event->id_index;
2464         size_t i, nr, max_nr;
2465
2466         max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2467                  sizeof(struct id_index_entry);
2468         nr = ie->nr;
2469         if (nr > max_nr)
2470                 return -EINVAL;
2471
2472         if (dump_trace)
2473                 fprintf(stdout, " nr: %zu\n", nr);
2474
2475         for (i = 0; i < nr; i++) {
2476                 struct id_index_entry *e = &ie->entries[i];
2477                 struct perf_sample_id *sid;
2478
2479                 if (dump_trace) {
2480                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2481                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2482                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2483                         fprintf(stdout, "  tid: %"PRI_ld64"\n", e->tid);
2484                 }
2485
2486                 sid = evlist__id2sid(evlist, e->id);
2487                 if (!sid)
2488                         return -ENOENT;
2489                 sid->idx = e->idx;
2490                 sid->cpu = e->cpu;
2491                 sid->tid = e->tid;
2492         }
2493         return 0;
2494 }