1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
11 #include <sys/types.h>
13 #include <perf/cpumap.h>
15 #include "map_symbol.h"
25 #include "perf_regs.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
33 #include "ui/progress.h"
35 #include "arch/common.h"
37 #include <internal/lib.h>
39 #ifdef HAVE_ZSTD_SUPPORT
40 static int perf_session__process_compressed_event(struct perf_session *session,
41 union perf_event *event, u64 file_offset)
44 size_t decomp_size, src_size;
45 u64 decomp_last_rem = 0;
46 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
47 struct decomp *decomp, *decomp_last = session->decomp_last;
50 decomp_last_rem = decomp_last->size - decomp_last->head;
51 decomp_len += decomp_last_rem;
54 mmap_len = sizeof(struct decomp) + decomp_len;
55 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
56 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
57 if (decomp == MAP_FAILED) {
58 pr_err("Couldn't allocate memory for decompression\n");
62 decomp->file_pos = file_offset;
63 decomp->mmap_len = mmap_len;
66 if (decomp_last_rem) {
67 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
68 decomp->size = decomp_last_rem;
71 src = (void *)event + sizeof(struct perf_record_compressed);
72 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
74 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
75 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
77 munmap(decomp, mmap_len);
78 pr_err("Couldn't decompress data\n");
82 decomp->size += decomp_size;
84 if (session->decomp == NULL) {
85 session->decomp = decomp;
86 session->decomp_last = decomp;
88 session->decomp_last->next = decomp;
89 session->decomp_last = decomp;
92 pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
96 #else /* !HAVE_ZSTD_SUPPORT */
97 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
100 static int perf_session__deliver_event(struct perf_session *session,
101 union perf_event *event,
102 struct perf_tool *tool,
105 static int perf_session__open(struct perf_session *session)
107 struct perf_data *data = session->data;
109 if (perf_session__read_header(session) < 0) {
110 pr_err("incompatible file format (rerun with -v to learn more)\n");
114 if (perf_data__is_pipe(data))
117 if (perf_header__has_feat(&session->header, HEADER_STAT))
120 if (!evlist__valid_sample_type(session->evlist)) {
121 pr_err("non matching sample_type\n");
125 if (!evlist__valid_sample_id_all(session->evlist)) {
126 pr_err("non matching sample_id_all\n");
130 if (!evlist__valid_read_format(session->evlist)) {
131 pr_err("non matching read_format\n");
138 void perf_session__set_id_hdr_size(struct perf_session *session)
140 u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
142 machines__set_id_hdr_size(&session->machines, id_hdr_size);
145 int perf_session__create_kernel_maps(struct perf_session *session)
147 int ret = machine__create_kernel_maps(&session->machines.host);
150 ret = machines__create_guest_kernel_maps(&session->machines);
154 static void perf_session__destroy_kernel_maps(struct perf_session *session)
156 machines__destroy_kernel_maps(&session->machines);
159 static bool perf_session__has_comm_exec(struct perf_session *session)
163 evlist__for_each_entry(session->evlist, evsel) {
164 if (evsel->core.attr.comm_exec)
171 static void perf_session__set_comm_exec(struct perf_session *session)
173 bool comm_exec = perf_session__has_comm_exec(session);
175 machines__set_comm_exec(&session->machines, comm_exec);
178 static int ordered_events__deliver_event(struct ordered_events *oe,
179 struct ordered_event *event)
181 struct perf_session *session = container_of(oe, struct perf_session,
184 return perf_session__deliver_event(session, event->event,
185 session->tool, event->file_offset);
188 struct perf_session *perf_session__new(struct perf_data *data,
189 bool repipe, struct perf_tool *tool)
192 struct perf_session *session = zalloc(sizeof(*session));
197 session->repipe = repipe;
198 session->tool = tool;
199 INIT_LIST_HEAD(&session->auxtrace_index);
200 machines__init(&session->machines);
201 ordered_events__init(&session->ordered_events,
202 ordered_events__deliver_event, NULL);
204 perf_env__init(&session->header.env);
206 ret = perf_data__open(data);
210 session->data = data;
212 if (perf_data__is_read(data)) {
213 ret = perf_session__open(session);
218 * set session attributes that are present in perf.data
219 * but not in pipe-mode.
221 if (!data->is_pipe) {
222 perf_session__set_id_hdr_size(session);
223 perf_session__set_comm_exec(session);
226 evlist__init_trace_event_sample_raw(session->evlist);
228 /* Open the directory data. */
230 ret = perf_data__open_dir(data);
235 if (!symbol_conf.kallsyms_name &&
236 !symbol_conf.vmlinux_name)
237 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
240 session->machines.host.env = &perf_env;
243 session->machines.host.single_address_space =
244 perf_env__single_address_space(session->machines.host.env);
246 if (!data || perf_data__is_write(data)) {
248 * In O_RDONLY mode this will be performed when reading the
249 * kernel MMAP event, in perf_event__process_mmap().
251 if (perf_session__create_kernel_maps(session) < 0)
252 pr_warning("Cannot read kernel map\n");
256 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
257 * processed, so evlist__sample_id_all is not meaningful here.
259 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
260 tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
261 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
262 tool->ordered_events = false;
268 perf_session__delete(session);
273 static void perf_session__delete_threads(struct perf_session *session)
275 machine__delete_threads(&session->machines.host);
278 static void perf_session__release_decomp_events(struct perf_session *session)
280 struct decomp *next, *decomp;
282 next = session->decomp;
288 mmap_len = decomp->mmap_len;
289 munmap(decomp, mmap_len);
293 void perf_session__delete(struct perf_session *session)
297 auxtrace__free(session);
298 auxtrace_index__free(&session->auxtrace_index);
299 perf_session__destroy_kernel_maps(session);
300 perf_session__delete_threads(session);
301 perf_session__release_decomp_events(session);
302 perf_env__exit(&session->header.env);
303 machines__exit(&session->machines);
305 if (perf_data__is_read(session->data))
306 evlist__delete(session->evlist);
307 perf_data__close(session->data);
312 static int process_event_synth_tracing_data_stub(struct perf_session *session
314 union perf_event *event
317 dump_printf(": unhandled!\n");
321 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
322 union perf_event *event __maybe_unused,
323 struct evlist **pevlist
326 dump_printf(": unhandled!\n");
330 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
331 union perf_event *event __maybe_unused,
332 struct evlist **pevlist
336 perf_event__fprintf_event_update(event, stdout);
338 dump_printf(": unhandled!\n");
342 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
343 union perf_event *event __maybe_unused,
344 struct perf_sample *sample __maybe_unused,
345 struct evsel *evsel __maybe_unused,
346 struct machine *machine __maybe_unused)
348 dump_printf(": unhandled!\n");
352 static int process_event_stub(struct perf_tool *tool __maybe_unused,
353 union perf_event *event __maybe_unused,
354 struct perf_sample *sample __maybe_unused,
355 struct machine *machine __maybe_unused)
357 dump_printf(": unhandled!\n");
361 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
362 union perf_event *event __maybe_unused,
363 struct ordered_events *oe __maybe_unused)
365 dump_printf(": unhandled!\n");
369 static int process_finished_round(struct perf_tool *tool,
370 union perf_event *event,
371 struct ordered_events *oe);
373 static int skipn(int fd, off_t n)
379 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
388 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
389 union perf_event *event)
391 dump_printf(": unhandled!\n");
392 if (perf_data__is_pipe(session->data))
393 skipn(perf_data__fd(session->data), event->auxtrace.size);
394 return event->auxtrace.size;
397 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
398 union perf_event *event __maybe_unused)
400 dump_printf(": unhandled!\n");
406 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
407 union perf_event *event __maybe_unused)
410 perf_event__fprintf_thread_map(event, stdout);
412 dump_printf(": unhandled!\n");
417 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
418 union perf_event *event __maybe_unused)
421 perf_event__fprintf_cpu_map(event, stdout);
423 dump_printf(": unhandled!\n");
428 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
429 union perf_event *event __maybe_unused)
432 perf_event__fprintf_stat_config(event, stdout);
434 dump_printf(": unhandled!\n");
438 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
439 union perf_event *event)
442 perf_event__fprintf_stat(event, stdout);
444 dump_printf(": unhandled!\n");
448 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
449 union perf_event *event)
452 perf_event__fprintf_stat_round(event, stdout);
454 dump_printf(": unhandled!\n");
458 static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
459 union perf_event *event)
462 perf_event__fprintf_time_conv(event, stdout);
464 dump_printf(": unhandled!\n");
468 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
469 union perf_event *event __maybe_unused,
470 u64 file_offset __maybe_unused)
472 dump_printf(": unhandled!\n");
476 void perf_tool__fill_defaults(struct perf_tool *tool)
478 if (tool->sample == NULL)
479 tool->sample = process_event_sample_stub;
480 if (tool->mmap == NULL)
481 tool->mmap = process_event_stub;
482 if (tool->mmap2 == NULL)
483 tool->mmap2 = process_event_stub;
484 if (tool->comm == NULL)
485 tool->comm = process_event_stub;
486 if (tool->namespaces == NULL)
487 tool->namespaces = process_event_stub;
488 if (tool->cgroup == NULL)
489 tool->cgroup = process_event_stub;
490 if (tool->fork == NULL)
491 tool->fork = process_event_stub;
492 if (tool->exit == NULL)
493 tool->exit = process_event_stub;
494 if (tool->lost == NULL)
495 tool->lost = perf_event__process_lost;
496 if (tool->lost_samples == NULL)
497 tool->lost_samples = perf_event__process_lost_samples;
498 if (tool->aux == NULL)
499 tool->aux = perf_event__process_aux;
500 if (tool->itrace_start == NULL)
501 tool->itrace_start = perf_event__process_itrace_start;
502 if (tool->context_switch == NULL)
503 tool->context_switch = perf_event__process_switch;
504 if (tool->ksymbol == NULL)
505 tool->ksymbol = perf_event__process_ksymbol;
506 if (tool->bpf == NULL)
507 tool->bpf = perf_event__process_bpf;
508 if (tool->text_poke == NULL)
509 tool->text_poke = perf_event__process_text_poke;
510 if (tool->read == NULL)
511 tool->read = process_event_sample_stub;
512 if (tool->throttle == NULL)
513 tool->throttle = process_event_stub;
514 if (tool->unthrottle == NULL)
515 tool->unthrottle = process_event_stub;
516 if (tool->attr == NULL)
517 tool->attr = process_event_synth_attr_stub;
518 if (tool->event_update == NULL)
519 tool->event_update = process_event_synth_event_update_stub;
520 if (tool->tracing_data == NULL)
521 tool->tracing_data = process_event_synth_tracing_data_stub;
522 if (tool->build_id == NULL)
523 tool->build_id = process_event_op2_stub;
524 if (tool->finished_round == NULL) {
525 if (tool->ordered_events)
526 tool->finished_round = process_finished_round;
528 tool->finished_round = process_finished_round_stub;
530 if (tool->id_index == NULL)
531 tool->id_index = process_event_op2_stub;
532 if (tool->auxtrace_info == NULL)
533 tool->auxtrace_info = process_event_op2_stub;
534 if (tool->auxtrace == NULL)
535 tool->auxtrace = process_event_auxtrace_stub;
536 if (tool->auxtrace_error == NULL)
537 tool->auxtrace_error = process_event_op2_stub;
538 if (tool->thread_map == NULL)
539 tool->thread_map = process_event_thread_map_stub;
540 if (tool->cpu_map == NULL)
541 tool->cpu_map = process_event_cpu_map_stub;
542 if (tool->stat_config == NULL)
543 tool->stat_config = process_event_stat_config_stub;
544 if (tool->stat == NULL)
545 tool->stat = process_stat_stub;
546 if (tool->stat_round == NULL)
547 tool->stat_round = process_stat_round_stub;
548 if (tool->time_conv == NULL)
549 tool->time_conv = process_event_time_conv_stub;
550 if (tool->feature == NULL)
551 tool->feature = process_event_op2_stub;
552 if (tool->compressed == NULL)
553 tool->compressed = perf_session__process_compressed_event;
556 static void swap_sample_id_all(union perf_event *event, void *data)
558 void *end = (void *) event + event->header.size;
559 int size = end - data;
561 BUG_ON(size % sizeof(u64));
562 mem_bswap_64(data, size);
565 static void perf_event__all64_swap(union perf_event *event,
566 bool sample_id_all __maybe_unused)
568 struct perf_event_header *hdr = &event->header;
569 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
572 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
574 event->comm.pid = bswap_32(event->comm.pid);
575 event->comm.tid = bswap_32(event->comm.tid);
578 void *data = &event->comm.comm;
580 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
581 swap_sample_id_all(event, data);
585 static void perf_event__mmap_swap(union perf_event *event,
588 event->mmap.pid = bswap_32(event->mmap.pid);
589 event->mmap.tid = bswap_32(event->mmap.tid);
590 event->mmap.start = bswap_64(event->mmap.start);
591 event->mmap.len = bswap_64(event->mmap.len);
592 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
595 void *data = &event->mmap.filename;
597 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
598 swap_sample_id_all(event, data);
602 static void perf_event__mmap2_swap(union perf_event *event,
605 event->mmap2.pid = bswap_32(event->mmap2.pid);
606 event->mmap2.tid = bswap_32(event->mmap2.tid);
607 event->mmap2.start = bswap_64(event->mmap2.start);
608 event->mmap2.len = bswap_64(event->mmap2.len);
609 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
611 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
612 event->mmap2.maj = bswap_32(event->mmap2.maj);
613 event->mmap2.min = bswap_32(event->mmap2.min);
614 event->mmap2.ino = bswap_64(event->mmap2.ino);
615 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
619 void *data = &event->mmap2.filename;
621 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
622 swap_sample_id_all(event, data);
625 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
627 event->fork.pid = bswap_32(event->fork.pid);
628 event->fork.tid = bswap_32(event->fork.tid);
629 event->fork.ppid = bswap_32(event->fork.ppid);
630 event->fork.ptid = bswap_32(event->fork.ptid);
631 event->fork.time = bswap_64(event->fork.time);
634 swap_sample_id_all(event, &event->fork + 1);
637 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
639 event->read.pid = bswap_32(event->read.pid);
640 event->read.tid = bswap_32(event->read.tid);
641 event->read.value = bswap_64(event->read.value);
642 event->read.time_enabled = bswap_64(event->read.time_enabled);
643 event->read.time_running = bswap_64(event->read.time_running);
644 event->read.id = bswap_64(event->read.id);
647 swap_sample_id_all(event, &event->read + 1);
650 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
652 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
653 event->aux.aux_size = bswap_64(event->aux.aux_size);
654 event->aux.flags = bswap_64(event->aux.flags);
657 swap_sample_id_all(event, &event->aux + 1);
660 static void perf_event__itrace_start_swap(union perf_event *event,
663 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
664 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
667 swap_sample_id_all(event, &event->itrace_start + 1);
670 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
672 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
673 event->context_switch.next_prev_pid =
674 bswap_32(event->context_switch.next_prev_pid);
675 event->context_switch.next_prev_tid =
676 bswap_32(event->context_switch.next_prev_tid);
680 swap_sample_id_all(event, &event->context_switch + 1);
683 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
685 event->text_poke.addr = bswap_64(event->text_poke.addr);
686 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
687 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
690 size_t len = sizeof(event->text_poke.old_len) +
691 sizeof(event->text_poke.new_len) +
692 event->text_poke.old_len +
693 event->text_poke.new_len;
694 void *data = &event->text_poke.old_len;
696 data += PERF_ALIGN(len, sizeof(u64));
697 swap_sample_id_all(event, data);
701 static void perf_event__throttle_swap(union perf_event *event,
704 event->throttle.time = bswap_64(event->throttle.time);
705 event->throttle.id = bswap_64(event->throttle.id);
706 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
709 swap_sample_id_all(event, &event->throttle + 1);
712 static void perf_event__namespaces_swap(union perf_event *event,
717 event->namespaces.pid = bswap_32(event->namespaces.pid);
718 event->namespaces.tid = bswap_32(event->namespaces.tid);
719 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
721 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
722 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
724 ns->dev = bswap_64(ns->dev);
725 ns->ino = bswap_64(ns->ino);
729 swap_sample_id_all(event, &event->namespaces.link_info[i]);
732 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
734 event->cgroup.id = bswap_64(event->cgroup.id);
737 void *data = &event->cgroup.path;
739 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
740 swap_sample_id_all(event, data);
744 static u8 revbyte(u8 b)
746 int rev = (b >> 4) | ((b & 0xf) << 4);
747 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
748 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
753 * XXX this is hack in attempt to carry flags bitfield
754 * through endian village. ABI says:
756 * Bit-fields are allocated from right to left (least to most significant)
757 * on little-endian implementations and from left to right (most to least
758 * significant) on big-endian implementations.
760 * The above seems to be byte specific, so we need to reverse each
761 * byte of the bitfield. 'Internet' also says this might be implementation
762 * specific and we probably need proper fix and carry perf_event_attr
763 * bitfield flags in separate data file FEAT_ section. Thought this seems
766 static void swap_bitfield(u8 *p, unsigned len)
770 for (i = 0; i < len; i++) {
776 /* exported for swapping attributes in file header */
777 void perf_event__attr_swap(struct perf_event_attr *attr)
779 attr->type = bswap_32(attr->type);
780 attr->size = bswap_32(attr->size);
782 #define bswap_safe(f, n) \
783 (attr->size > (offsetof(struct perf_event_attr, f) + \
784 sizeof(attr->f) * (n)))
785 #define bswap_field(f, sz) \
787 if (bswap_safe(f, 0)) \
788 attr->f = bswap_##sz(attr->f); \
790 #define bswap_field_16(f) bswap_field(f, 16)
791 #define bswap_field_32(f) bswap_field(f, 32)
792 #define bswap_field_64(f) bswap_field(f, 64)
794 bswap_field_64(config);
795 bswap_field_64(sample_period);
796 bswap_field_64(sample_type);
797 bswap_field_64(read_format);
798 bswap_field_32(wakeup_events);
799 bswap_field_32(bp_type);
800 bswap_field_64(bp_addr);
801 bswap_field_64(bp_len);
802 bswap_field_64(branch_sample_type);
803 bswap_field_64(sample_regs_user);
804 bswap_field_32(sample_stack_user);
805 bswap_field_32(aux_watermark);
806 bswap_field_16(sample_max_stack);
807 bswap_field_32(aux_sample_size);
810 * After read_format are bitfields. Check read_format because
811 * we are unable to use offsetof on bitfield.
813 if (bswap_safe(read_format, 1))
814 swap_bitfield((u8 *) (&attr->read_format + 1),
816 #undef bswap_field_64
817 #undef bswap_field_32
822 static void perf_event__hdr_attr_swap(union perf_event *event,
823 bool sample_id_all __maybe_unused)
827 perf_event__attr_swap(&event->attr.attr);
829 size = event->header.size;
830 size -= (void *)&event->attr.id - (void *)event;
831 mem_bswap_64(event->attr.id, size);
834 static void perf_event__event_update_swap(union perf_event *event,
835 bool sample_id_all __maybe_unused)
837 event->event_update.type = bswap_64(event->event_update.type);
838 event->event_update.id = bswap_64(event->event_update.id);
841 static void perf_event__event_type_swap(union perf_event *event,
842 bool sample_id_all __maybe_unused)
844 event->event_type.event_type.event_id =
845 bswap_64(event->event_type.event_type.event_id);
848 static void perf_event__tracing_data_swap(union perf_event *event,
849 bool sample_id_all __maybe_unused)
851 event->tracing_data.size = bswap_32(event->tracing_data.size);
854 static void perf_event__auxtrace_info_swap(union perf_event *event,
855 bool sample_id_all __maybe_unused)
859 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
861 size = event->header.size;
862 size -= (void *)&event->auxtrace_info.priv - (void *)event;
863 mem_bswap_64(event->auxtrace_info.priv, size);
866 static void perf_event__auxtrace_swap(union perf_event *event,
867 bool sample_id_all __maybe_unused)
869 event->auxtrace.size = bswap_64(event->auxtrace.size);
870 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
871 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
872 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
873 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
874 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
877 static void perf_event__auxtrace_error_swap(union perf_event *event,
878 bool sample_id_all __maybe_unused)
880 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
881 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
882 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
883 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
884 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
885 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
886 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
887 if (event->auxtrace_error.fmt)
888 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
891 static void perf_event__thread_map_swap(union perf_event *event,
892 bool sample_id_all __maybe_unused)
896 event->thread_map.nr = bswap_64(event->thread_map.nr);
898 for (i = 0; i < event->thread_map.nr; i++)
899 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
902 static void perf_event__cpu_map_swap(union perf_event *event,
903 bool sample_id_all __maybe_unused)
905 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
906 struct cpu_map_entries *cpus;
907 struct perf_record_record_cpu_map *mask;
910 data->type = bswap_16(data->type);
912 switch (data->type) {
913 case PERF_CPU_MAP__CPUS:
914 cpus = (struct cpu_map_entries *)data->data;
916 cpus->nr = bswap_16(cpus->nr);
918 for (i = 0; i < cpus->nr; i++)
919 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
921 case PERF_CPU_MAP__MASK:
922 mask = (struct perf_record_record_cpu_map *)data->data;
924 mask->nr = bswap_16(mask->nr);
925 mask->long_size = bswap_16(mask->long_size);
927 switch (mask->long_size) {
928 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
929 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
931 pr_err("cpu_map swap: unsupported long size\n");
938 static void perf_event__stat_config_swap(union perf_event *event,
939 bool sample_id_all __maybe_unused)
943 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
944 size += 1; /* nr item itself */
945 mem_bswap_64(&event->stat_config.nr, size);
948 static void perf_event__stat_swap(union perf_event *event,
949 bool sample_id_all __maybe_unused)
951 event->stat.id = bswap_64(event->stat.id);
952 event->stat.thread = bswap_32(event->stat.thread);
953 event->stat.cpu = bswap_32(event->stat.cpu);
954 event->stat.val = bswap_64(event->stat.val);
955 event->stat.ena = bswap_64(event->stat.ena);
956 event->stat.run = bswap_64(event->stat.run);
959 static void perf_event__stat_round_swap(union perf_event *event,
960 bool sample_id_all __maybe_unused)
962 event->stat_round.type = bswap_64(event->stat_round.type);
963 event->stat_round.time = bswap_64(event->stat_round.time);
966 static void perf_event__time_conv_swap(union perf_event *event,
967 bool sample_id_all __maybe_unused)
969 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
970 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
971 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
973 if (event_contains(event->time_conv, time_cycles)) {
974 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
975 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
979 typedef void (*perf_event__swap_op)(union perf_event *event,
982 static perf_event__swap_op perf_event__swap_ops[] = {
983 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
984 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
985 [PERF_RECORD_COMM] = perf_event__comm_swap,
986 [PERF_RECORD_FORK] = perf_event__task_swap,
987 [PERF_RECORD_EXIT] = perf_event__task_swap,
988 [PERF_RECORD_LOST] = perf_event__all64_swap,
989 [PERF_RECORD_READ] = perf_event__read_swap,
990 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
991 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
992 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
993 [PERF_RECORD_AUX] = perf_event__aux_swap,
994 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
995 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
996 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
997 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
998 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
999 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
1000 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
1001 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
1002 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
1003 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1004 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
1005 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
1006 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
1007 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
1008 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
1009 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
1010 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
1011 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
1012 [PERF_RECORD_STAT] = perf_event__stat_swap,
1013 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
1014 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
1015 [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
1016 [PERF_RECORD_HEADER_MAX] = NULL,
1020 * When perf record finishes a pass on every buffers, it records this pseudo
1022 * We record the max timestamp t found in the pass n.
1023 * Assuming these timestamps are monotonic across cpus, we know that if
1024 * a buffer still has events with timestamps below t, they will be all
1025 * available and then read in the pass n + 1.
1026 * Hence when we start to read the pass n + 2, we can safely flush every
1027 * events with timestamps below t.
1029 * ============ PASS n =================
1032 * cnt1 timestamps | cnt2 timestamps
1035 * - | 4 <--- max recorded
1037 * ============ PASS n + 1 ==============
1040 * cnt1 timestamps | cnt2 timestamps
1043 * 5 | 7 <---- max recorded
1045 * Flush every events below timestamp 4
1047 * ============ PASS n + 2 ==============
1050 * cnt1 timestamps | cnt2 timestamps
1055 * Flush every events below timestamp 7
1058 static int process_finished_round(struct perf_tool *tool __maybe_unused,
1059 union perf_event *event __maybe_unused,
1060 struct ordered_events *oe)
1063 fprintf(stdout, "\n");
1064 return ordered_events__flush(oe, OE_FLUSH__ROUND);
1067 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1068 u64 timestamp, u64 file_offset)
1070 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1073 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1075 struct ip_callchain *callchain = sample->callchain;
1076 struct branch_stack *lbr_stack = sample->branch_stack;
1077 struct branch_entry *entries = perf_sample__branch_entries(sample);
1078 u64 kernel_callchain_nr = callchain->nr;
1081 for (i = 0; i < kernel_callchain_nr; i++) {
1082 if (callchain->ips[i] == PERF_CONTEXT_USER)
1086 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1089 * LBR callstack can only get user call chain,
1090 * i is kernel call chain number,
1091 * 1 is PERF_CONTEXT_USER.
1093 * The user call chain is stored in LBR registers.
1094 * LBR are pair registers. The caller is stored
1095 * in "from" register, while the callee is stored
1097 * For example, there is a call stack
1098 * "A"->"B"->"C"->"D".
1099 * The LBR registers will be recorded like
1100 * "C"->"D", "B"->"C", "A"->"B".
1101 * So only the first "to" register and all "from"
1102 * registers are needed to construct the whole stack.
1104 total_nr = i + 1 + lbr_stack->nr + 1;
1105 kernel_callchain_nr = i + 1;
1107 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1109 for (i = 0; i < kernel_callchain_nr; i++)
1110 printf("..... %2d: %016" PRIx64 "\n",
1111 i, callchain->ips[i]);
1113 printf("..... %2d: %016" PRIx64 "\n",
1114 (int)(kernel_callchain_nr), entries[0].to);
1115 for (i = 0; i < lbr_stack->nr; i++)
1116 printf("..... %2d: %016" PRIx64 "\n",
1117 (int)(i + kernel_callchain_nr + 1), entries[i].from);
1121 static void callchain__printf(struct evsel *evsel,
1122 struct perf_sample *sample)
1125 struct ip_callchain *callchain = sample->callchain;
1127 if (evsel__has_branch_callstack(evsel))
1128 callchain__lbr_callstack_printf(sample);
1130 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1132 for (i = 0; i < callchain->nr; i++)
1133 printf("..... %2d: %016" PRIx64 "\n",
1134 i, callchain->ips[i]);
1137 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1139 struct branch_entry *entries = perf_sample__branch_entries(sample);
1142 printf("%s: nr:%" PRIu64 "\n",
1143 !callstack ? "... branch stack" : "... branch callstack",
1144 sample->branch_stack->nr);
1146 for (i = 0; i < sample->branch_stack->nr; i++) {
1147 struct branch_entry *e = &entries[i];
1150 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1152 (unsigned short)e->flags.cycles,
1153 e->flags.mispred ? "M" : " ",
1154 e->flags.predicted ? "P" : " ",
1155 e->flags.abort ? "A" : " ",
1156 e->flags.in_tx ? "T" : " ",
1157 (unsigned)e->flags.reserved);
1159 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1160 i, i > 0 ? e->from : e->to);
1165 static void regs_dump__printf(u64 mask, u64 *regs)
1167 unsigned rid, i = 0;
1169 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1170 u64 val = regs[i++];
1172 printf(".... %-5s 0x%016" PRIx64 "\n",
1173 perf_reg_name(rid), val);
1177 static const char *regs_abi[] = {
1178 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1179 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1180 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1183 static inline const char *regs_dump_abi(struct regs_dump *d)
1185 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1188 return regs_abi[d->abi];
1191 static void regs__printf(const char *type, struct regs_dump *regs)
1193 u64 mask = regs->mask;
1195 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1198 regs_dump_abi(regs));
1200 regs_dump__printf(mask, regs->regs);
1203 static void regs_user__printf(struct perf_sample *sample)
1205 struct regs_dump *user_regs = &sample->user_regs;
1207 if (user_regs->regs)
1208 regs__printf("user", user_regs);
1211 static void regs_intr__printf(struct perf_sample *sample)
1213 struct regs_dump *intr_regs = &sample->intr_regs;
1215 if (intr_regs->regs)
1216 regs__printf("intr", intr_regs);
1219 static void stack_user__printf(struct stack_dump *dump)
1221 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1222 dump->size, dump->offset);
1225 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1227 u64 sample_type = __evlist__combined_sample_type(evlist);
1229 if (event->header.type != PERF_RECORD_SAMPLE &&
1230 !evlist__sample_id_all(evlist)) {
1231 fputs("-1 -1 ", stdout);
1235 if ((sample_type & PERF_SAMPLE_CPU))
1236 printf("%u ", sample->cpu);
1238 if (sample_type & PERF_SAMPLE_TIME)
1239 printf("%" PRIu64 " ", sample->time);
1242 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1244 printf("... sample_read:\n");
1246 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1247 printf("...... time enabled %016" PRIx64 "\n",
1248 sample->read.time_enabled);
1250 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1251 printf("...... time running %016" PRIx64 "\n",
1252 sample->read.time_running);
1254 if (read_format & PERF_FORMAT_GROUP) {
1257 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1259 for (i = 0; i < sample->read.group.nr; i++) {
1260 struct sample_read_value *value;
1262 value = &sample->read.group.values[i];
1263 printf("..... id %016" PRIx64
1264 ", value %016" PRIx64 "\n",
1265 value->id, value->value);
1268 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1269 sample->read.one.id, sample->read.one.value);
1272 static void dump_event(struct evlist *evlist, union perf_event *event,
1273 u64 file_offset, struct perf_sample *sample)
1278 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1279 file_offset, event->header.size, event->header.type);
1282 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1283 evlist->trace_event_sample_raw(evlist, event, sample);
1286 evlist__print_tstamp(evlist, event, sample);
1288 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1289 event->header.size, perf_event__name(event->header.type));
1292 char *get_page_size_name(u64 size, char *str)
1294 if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1295 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1300 static void dump_sample(struct evsel *evsel, union perf_event *event,
1301 struct perf_sample *sample)
1304 char str[PAGE_SIZE_NAME_LEN];
1309 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1310 event->header.misc, sample->pid, sample->tid, sample->ip,
1311 sample->period, sample->addr);
1313 sample_type = evsel->core.attr.sample_type;
1315 if (evsel__has_callchain(evsel))
1316 callchain__printf(evsel, sample);
1318 if (evsel__has_br_stack(evsel))
1319 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1321 if (sample_type & PERF_SAMPLE_REGS_USER)
1322 regs_user__printf(sample);
1324 if (sample_type & PERF_SAMPLE_REGS_INTR)
1325 regs_intr__printf(sample);
1327 if (sample_type & PERF_SAMPLE_STACK_USER)
1328 stack_user__printf(&sample->user_stack);
1330 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1331 printf("... weight: %" PRIu64 "", sample->weight);
1332 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1333 printf(",0x%"PRIx16"", sample->ins_lat);
1334 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1339 if (sample_type & PERF_SAMPLE_DATA_SRC)
1340 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1342 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1343 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1345 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1346 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1348 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1349 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1351 if (sample_type & PERF_SAMPLE_TRANSACTION)
1352 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1354 if (sample_type & PERF_SAMPLE_READ)
1355 sample_read__printf(sample, evsel->core.attr.read_format);
1358 static void dump_read(struct evsel *evsel, union perf_event *event)
1360 struct perf_record_read *read_event = &event->read;
1366 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1367 evsel__name(evsel), event->read.value);
1372 read_format = evsel->core.attr.read_format;
1374 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1375 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1377 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1378 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1380 if (read_format & PERF_FORMAT_ID)
1381 printf("... id : %" PRI_lu64 "\n", read_event->id);
1384 static struct machine *machines__find_for_cpumode(struct machines *machines,
1385 union perf_event *event,
1386 struct perf_sample *sample)
1389 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1390 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1393 if (event->header.type == PERF_RECORD_MMAP
1394 || event->header.type == PERF_RECORD_MMAP2)
1395 pid = event->mmap.pid;
1399 return machines__find_guest(machines, pid);
1402 return &machines->host;
1405 static int deliver_sample_value(struct evlist *evlist,
1406 struct perf_tool *tool,
1407 union perf_event *event,
1408 struct perf_sample *sample,
1409 struct sample_read_value *v,
1410 struct machine *machine)
1412 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1413 struct evsel *evsel;
1417 sample->period = v->value - sid->period;
1418 sid->period = v->value;
1421 if (!sid || sid->evsel == NULL) {
1422 ++evlist->stats.nr_unknown_id;
1427 * There's no reason to deliver sample
1428 * for zero period, bail out.
1430 if (!sample->period)
1433 evsel = container_of(sid->evsel, struct evsel, core);
1434 return tool->sample(tool, event, sample, evsel, machine);
1437 static int deliver_sample_group(struct evlist *evlist,
1438 struct perf_tool *tool,
1439 union perf_event *event,
1440 struct perf_sample *sample,
1441 struct machine *machine)
1446 for (i = 0; i < sample->read.group.nr; i++) {
1447 ret = deliver_sample_value(evlist, tool, event, sample,
1448 &sample->read.group.values[i],
1457 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1458 union perf_event *event, struct perf_sample *sample,
1459 struct evsel *evsel, struct machine *machine)
1461 /* We know evsel != NULL. */
1462 u64 sample_type = evsel->core.attr.sample_type;
1463 u64 read_format = evsel->core.attr.read_format;
1465 /* Standard sample delivery. */
1466 if (!(sample_type & PERF_SAMPLE_READ))
1467 return tool->sample(tool, event, sample, evsel, machine);
1469 /* For PERF_SAMPLE_READ we have either single or group mode. */
1470 if (read_format & PERF_FORMAT_GROUP)
1471 return deliver_sample_group(evlist, tool, event, sample,
1474 return deliver_sample_value(evlist, tool, event, sample,
1475 &sample->read.one, machine);
1478 static int machines__deliver_event(struct machines *machines,
1479 struct evlist *evlist,
1480 union perf_event *event,
1481 struct perf_sample *sample,
1482 struct perf_tool *tool, u64 file_offset)
1484 struct evsel *evsel;
1485 struct machine *machine;
1487 dump_event(evlist, event, file_offset, sample);
1489 evsel = evlist__id2evsel(evlist, sample->id);
1491 machine = machines__find_for_cpumode(machines, event, sample);
1493 switch (event->header.type) {
1494 case PERF_RECORD_SAMPLE:
1495 if (evsel == NULL) {
1496 ++evlist->stats.nr_unknown_id;
1499 dump_sample(evsel, event, sample);
1500 if (machine == NULL) {
1501 ++evlist->stats.nr_unprocessable_samples;
1504 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1505 case PERF_RECORD_MMAP:
1506 return tool->mmap(tool, event, sample, machine);
1507 case PERF_RECORD_MMAP2:
1508 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1509 ++evlist->stats.nr_proc_map_timeout;
1510 return tool->mmap2(tool, event, sample, machine);
1511 case PERF_RECORD_COMM:
1512 return tool->comm(tool, event, sample, machine);
1513 case PERF_RECORD_NAMESPACES:
1514 return tool->namespaces(tool, event, sample, machine);
1515 case PERF_RECORD_CGROUP:
1516 return tool->cgroup(tool, event, sample, machine);
1517 case PERF_RECORD_FORK:
1518 return tool->fork(tool, event, sample, machine);
1519 case PERF_RECORD_EXIT:
1520 return tool->exit(tool, event, sample, machine);
1521 case PERF_RECORD_LOST:
1522 if (tool->lost == perf_event__process_lost)
1523 evlist->stats.total_lost += event->lost.lost;
1524 return tool->lost(tool, event, sample, machine);
1525 case PERF_RECORD_LOST_SAMPLES:
1526 if (tool->lost_samples == perf_event__process_lost_samples)
1527 evlist->stats.total_lost_samples += event->lost_samples.lost;
1528 return tool->lost_samples(tool, event, sample, machine);
1529 case PERF_RECORD_READ:
1530 dump_read(evsel, event);
1531 return tool->read(tool, event, sample, evsel, machine);
1532 case PERF_RECORD_THROTTLE:
1533 return tool->throttle(tool, event, sample, machine);
1534 case PERF_RECORD_UNTHROTTLE:
1535 return tool->unthrottle(tool, event, sample, machine);
1536 case PERF_RECORD_AUX:
1537 if (tool->aux == perf_event__process_aux) {
1538 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1539 evlist->stats.total_aux_lost += 1;
1540 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1541 evlist->stats.total_aux_partial += 1;
1543 return tool->aux(tool, event, sample, machine);
1544 case PERF_RECORD_ITRACE_START:
1545 return tool->itrace_start(tool, event, sample, machine);
1546 case PERF_RECORD_SWITCH:
1547 case PERF_RECORD_SWITCH_CPU_WIDE:
1548 return tool->context_switch(tool, event, sample, machine);
1549 case PERF_RECORD_KSYMBOL:
1550 return tool->ksymbol(tool, event, sample, machine);
1551 case PERF_RECORD_BPF_EVENT:
1552 return tool->bpf(tool, event, sample, machine);
1553 case PERF_RECORD_TEXT_POKE:
1554 return tool->text_poke(tool, event, sample, machine);
1556 ++evlist->stats.nr_unknown_events;
1561 static int perf_session__deliver_event(struct perf_session *session,
1562 union perf_event *event,
1563 struct perf_tool *tool,
1566 struct perf_sample sample;
1567 int ret = evlist__parse_sample(session->evlist, event, &sample);
1570 pr_err("Can't parse sample, err = %d\n", ret);
1574 ret = auxtrace__process_event(session, event, &sample, tool);
1580 ret = machines__deliver_event(&session->machines, session->evlist,
1581 event, &sample, tool, file_offset);
1583 if (dump_trace && sample.aux_sample.size)
1584 auxtrace__dump_auxtrace_sample(session, &sample);
1589 static s64 perf_session__process_user_event(struct perf_session *session,
1590 union perf_event *event,
1593 struct ordered_events *oe = &session->ordered_events;
1594 struct perf_tool *tool = session->tool;
1595 struct perf_sample sample = { .time = 0, };
1596 int fd = perf_data__fd(session->data);
1599 if (event->header.type != PERF_RECORD_COMPRESSED ||
1600 tool->compressed == perf_session__process_compressed_event_stub)
1601 dump_event(session->evlist, event, file_offset, &sample);
1603 /* These events are processed right away */
1604 switch (event->header.type) {
1605 case PERF_RECORD_HEADER_ATTR:
1606 err = tool->attr(tool, event, &session->evlist);
1608 perf_session__set_id_hdr_size(session);
1609 perf_session__set_comm_exec(session);
1612 case PERF_RECORD_EVENT_UPDATE:
1613 return tool->event_update(tool, event, &session->evlist);
1614 case PERF_RECORD_HEADER_EVENT_TYPE:
1616 * Deprecated, but we need to handle it for sake
1617 * of old data files create in pipe mode.
1620 case PERF_RECORD_HEADER_TRACING_DATA:
1622 * Setup for reading amidst mmap, but only when we
1623 * are in 'file' mode. The 'pipe' fd is in proper
1626 if (!perf_data__is_pipe(session->data))
1627 lseek(fd, file_offset, SEEK_SET);
1628 return tool->tracing_data(session, event);
1629 case PERF_RECORD_HEADER_BUILD_ID:
1630 return tool->build_id(session, event);
1631 case PERF_RECORD_FINISHED_ROUND:
1632 return tool->finished_round(tool, event, oe);
1633 case PERF_RECORD_ID_INDEX:
1634 return tool->id_index(session, event);
1635 case PERF_RECORD_AUXTRACE_INFO:
1636 return tool->auxtrace_info(session, event);
1637 case PERF_RECORD_AUXTRACE:
1638 /* setup for reading amidst mmap */
1639 lseek(fd, file_offset + event->header.size, SEEK_SET);
1640 return tool->auxtrace(session, event);
1641 case PERF_RECORD_AUXTRACE_ERROR:
1642 perf_session__auxtrace_error_inc(session, event);
1643 return tool->auxtrace_error(session, event);
1644 case PERF_RECORD_THREAD_MAP:
1645 return tool->thread_map(session, event);
1646 case PERF_RECORD_CPU_MAP:
1647 return tool->cpu_map(session, event);
1648 case PERF_RECORD_STAT_CONFIG:
1649 return tool->stat_config(session, event);
1650 case PERF_RECORD_STAT:
1651 return tool->stat(session, event);
1652 case PERF_RECORD_STAT_ROUND:
1653 return tool->stat_round(session, event);
1654 case PERF_RECORD_TIME_CONV:
1655 session->time_conv = event->time_conv;
1656 return tool->time_conv(session, event);
1657 case PERF_RECORD_HEADER_FEATURE:
1658 return tool->feature(session, event);
1659 case PERF_RECORD_COMPRESSED:
1660 err = tool->compressed(session, event, file_offset);
1662 dump_event(session->evlist, event, file_offset, &sample);
1669 int perf_session__deliver_synth_event(struct perf_session *session,
1670 union perf_event *event,
1671 struct perf_sample *sample)
1673 struct evlist *evlist = session->evlist;
1674 struct perf_tool *tool = session->tool;
1676 events_stats__inc(&evlist->stats, event->header.type);
1678 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1679 return perf_session__process_user_event(session, event, 0);
1681 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1684 static void event_swap(union perf_event *event, bool sample_id_all)
1686 perf_event__swap_op swap;
1688 swap = perf_event__swap_ops[event->header.type];
1690 swap(event, sample_id_all);
1693 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1694 void *buf, size_t buf_sz,
1695 union perf_event **event_ptr,
1696 struct perf_sample *sample)
1698 union perf_event *event;
1699 size_t hdr_sz, rest;
1702 if (session->one_mmap && !session->header.needs_swap) {
1703 event = file_offset - session->one_mmap_offset +
1704 session->one_mmap_addr;
1705 goto out_parse_sample;
1708 if (perf_data__is_pipe(session->data))
1711 fd = perf_data__fd(session->data);
1712 hdr_sz = sizeof(struct perf_event_header);
1714 if (buf_sz < hdr_sz)
1717 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1718 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1721 event = (union perf_event *)buf;
1723 if (session->header.needs_swap)
1724 perf_event_header__bswap(&event->header);
1726 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1730 rest = event->header.size - hdr_sz;
1732 if (readn(fd, buf, rest) != (ssize_t)rest)
1735 if (session->header.needs_swap)
1736 event_swap(event, evlist__sample_id_all(session->evlist));
1740 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1741 evlist__parse_sample(session->evlist, event, sample))
1749 int perf_session__peek_events(struct perf_session *session, u64 offset,
1750 u64 size, peek_events_cb_t cb, void *data)
1752 u64 max_offset = offset + size;
1753 char buf[PERF_SAMPLE_MAX_SIZE];
1754 union perf_event *event;
1758 err = perf_session__peek_event(session, offset, buf,
1759 PERF_SAMPLE_MAX_SIZE, &event,
1764 err = cb(session, event, offset, data);
1768 offset += event->header.size;
1769 if (event->header.type == PERF_RECORD_AUXTRACE)
1770 offset += event->auxtrace.size;
1772 } while (offset < max_offset);
1777 static s64 perf_session__process_event(struct perf_session *session,
1778 union perf_event *event, u64 file_offset)
1780 struct evlist *evlist = session->evlist;
1781 struct perf_tool *tool = session->tool;
1784 if (session->header.needs_swap)
1785 event_swap(event, evlist__sample_id_all(evlist));
1787 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1790 events_stats__inc(&evlist->stats, event->header.type);
1792 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1793 return perf_session__process_user_event(session, event, file_offset);
1795 if (tool->ordered_events) {
1796 u64 timestamp = -1ULL;
1798 ret = evlist__parse_sample_timestamp(evlist, event, ×tamp);
1799 if (ret && ret != -1)
1802 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1807 return perf_session__deliver_event(session, event, tool, file_offset);
1810 void perf_event_header__bswap(struct perf_event_header *hdr)
1812 hdr->type = bswap_32(hdr->type);
1813 hdr->misc = bswap_16(hdr->misc);
1814 hdr->size = bswap_16(hdr->size);
1817 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1819 return machine__findnew_thread(&session->machines.host, -1, pid);
1822 int perf_session__register_idle_thread(struct perf_session *session)
1824 struct thread *thread = machine__idle_thread(&session->machines.host);
1826 /* machine__idle_thread() got the thread, so put it */
1827 thread__put(thread);
1828 return thread ? 0 : -1;
1832 perf_session__warn_order(const struct perf_session *session)
1834 const struct ordered_events *oe = &session->ordered_events;
1835 struct evsel *evsel;
1836 bool should_warn = true;
1838 evlist__for_each_entry(session->evlist, evsel) {
1839 if (evsel->core.attr.write_backward)
1840 should_warn = false;
1845 if (oe->nr_unordered_events != 0)
1846 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1849 static void perf_session__warn_about_errors(const struct perf_session *session)
1851 const struct events_stats *stats = &session->evlist->stats;
1853 if (session->tool->lost == perf_event__process_lost &&
1854 stats->nr_events[PERF_RECORD_LOST] != 0) {
1855 ui__warning("Processed %d events and lost %d chunks!\n\n"
1856 "Check IO/CPU overload!\n\n",
1857 stats->nr_events[0],
1858 stats->nr_events[PERF_RECORD_LOST]);
1861 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1864 drop_rate = (double)stats->total_lost_samples /
1865 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1866 if (drop_rate > 0.05) {
1867 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1868 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1873 if (session->tool->aux == perf_event__process_aux &&
1874 stats->total_aux_lost != 0) {
1875 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1876 stats->total_aux_lost,
1877 stats->nr_events[PERF_RECORD_AUX]);
1880 if (session->tool->aux == perf_event__process_aux &&
1881 stats->total_aux_partial != 0) {
1882 bool vmm_exclusive = false;
1884 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1887 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1888 "Are you running a KVM guest in the background?%s\n\n",
1889 stats->total_aux_partial,
1890 stats->nr_events[PERF_RECORD_AUX],
1892 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1893 "will reduce the gaps to only guest's timeslices." :
1897 if (stats->nr_unknown_events != 0) {
1898 ui__warning("Found %u unknown events!\n\n"
1899 "Is this an older tool processing a perf.data "
1900 "file generated by a more recent tool?\n\n"
1901 "If that is not the case, consider "
1902 "reporting to linux-kernel@vger.kernel.org.\n\n",
1903 stats->nr_unknown_events);
1906 if (stats->nr_unknown_id != 0) {
1907 ui__warning("%u samples with id not present in the header\n",
1908 stats->nr_unknown_id);
1911 if (stats->nr_invalid_chains != 0) {
1912 ui__warning("Found invalid callchains!\n\n"
1913 "%u out of %u events were discarded for this reason.\n\n"
1914 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1915 stats->nr_invalid_chains,
1916 stats->nr_events[PERF_RECORD_SAMPLE]);
1919 if (stats->nr_unprocessable_samples != 0) {
1920 ui__warning("%u unprocessable samples recorded.\n"
1921 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1922 stats->nr_unprocessable_samples);
1925 perf_session__warn_order(session);
1927 events_stats__auxtrace_error_warn(stats);
1929 if (stats->nr_proc_map_timeout != 0) {
1930 ui__warning("%d map information files for pre-existing threads were\n"
1931 "not processed, if there are samples for addresses they\n"
1932 "will not be resolved, you may find out which are these\n"
1933 "threads by running with -v and redirecting the output\n"
1935 "The time limit to process proc map is too short?\n"
1936 "Increase it by --proc-map-timeout\n",
1937 stats->nr_proc_map_timeout);
1941 static int perf_session__flush_thread_stack(struct thread *thread,
1942 void *p __maybe_unused)
1944 return thread_stack__flush(thread);
1947 static int perf_session__flush_thread_stacks(struct perf_session *session)
1949 return machines__for_each_thread(&session->machines,
1950 perf_session__flush_thread_stack,
1954 volatile int session_done;
1956 static int __perf_session__process_decomp_events(struct perf_session *session);
1958 static int __perf_session__process_pipe_events(struct perf_session *session)
1960 struct ordered_events *oe = &session->ordered_events;
1961 struct perf_tool *tool = session->tool;
1962 union perf_event *event;
1963 uint32_t size, cur_size = 0;
1970 perf_tool__fill_defaults(tool);
1973 cur_size = sizeof(union perf_event);
1975 buf = malloc(cur_size);
1978 ordered_events__set_copy_on_queue(oe, true);
1981 err = perf_data__read(session->data, event,
1982 sizeof(struct perf_event_header));
1987 pr_err("failed to read event header\n");
1991 if (session->header.needs_swap)
1992 perf_event_header__bswap(&event->header);
1994 size = event->header.size;
1995 if (size < sizeof(struct perf_event_header)) {
1996 pr_err("bad event header size\n");
2000 if (size > cur_size) {
2001 void *new = realloc(buf, size);
2003 pr_err("failed to allocate memory to read event\n");
2011 p += sizeof(struct perf_event_header);
2013 if (size - sizeof(struct perf_event_header)) {
2014 err = perf_data__read(session->data, p,
2015 size - sizeof(struct perf_event_header));
2018 pr_err("unexpected end of event stream\n");
2022 pr_err("failed to read event data\n");
2027 if ((skip = perf_session__process_event(session, event, head)) < 0) {
2028 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2029 head, event->header.size, event->header.type);
2039 err = __perf_session__process_decomp_events(session);
2043 if (!session_done())
2046 /* do the final flush for ordered samples */
2047 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2050 err = auxtrace__flush_events(session, tool);
2053 err = perf_session__flush_thread_stacks(session);
2057 perf_session__warn_about_errors(session);
2058 ordered_events__free(&session->ordered_events);
2059 auxtrace__free_events(session);
2063 static union perf_event *
2064 prefetch_event(char *buf, u64 head, size_t mmap_size,
2065 bool needs_swap, union perf_event *error)
2067 union perf_event *event;
2070 * Ensure we have enough space remaining to read
2071 * the size of the event in the headers.
2073 if (head + sizeof(event->header) > mmap_size)
2076 event = (union perf_event *)(buf + head);
2078 perf_event_header__bswap(&event->header);
2080 if (head + event->header.size <= mmap_size)
2083 /* We're not fetching the event so swap back again */
2085 perf_event_header__bswap(&event->header);
2087 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2088 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2093 static union perf_event *
2094 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2096 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2099 static union perf_event *
2100 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2102 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2105 static int __perf_session__process_decomp_events(struct perf_session *session)
2108 u64 size, file_pos = 0;
2109 struct decomp *decomp = session->decomp_last;
2114 while (decomp->head < decomp->size && !session_done()) {
2115 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2116 session->header.needs_swap);
2121 size = event->header.size;
2123 if (size < sizeof(struct perf_event_header) ||
2124 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2125 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2126 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2133 decomp->head += size;
2140 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2141 * slices. On 32bit we use 32MB.
2143 #if BITS_PER_LONG == 64
2144 #define MMAP_SIZE ULLONG_MAX
2147 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2148 #define NUM_MMAPS 128
2153 typedef s64 (*reader_cb_t)(struct perf_session *session,
2154 union perf_event *event,
2161 reader_cb_t process;
2162 bool in_place_update;
2166 reader__process_events(struct reader *rd, struct perf_session *session,
2167 struct ui_progress *prog)
2169 u64 data_size = rd->data_size;
2170 u64 head, page_offset, file_offset, file_pos, size;
2171 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2173 char *buf, *mmaps[NUM_MMAPS];
2174 union perf_event *event;
2177 page_offset = page_size * (rd->data_offset / page_size);
2178 file_offset = page_offset;
2179 head = rd->data_offset - page_offset;
2181 ui_progress__init_size(prog, data_size, "Processing events...");
2183 data_size += rd->data_offset;
2185 mmap_size = MMAP_SIZE;
2186 if (mmap_size > data_size) {
2187 mmap_size = data_size;
2188 session->one_mmap = true;
2191 memset(mmaps, 0, sizeof(mmaps));
2193 mmap_prot = PROT_READ;
2194 mmap_flags = MAP_SHARED;
2196 if (rd->in_place_update) {
2197 mmap_prot |= PROT_WRITE;
2198 } else if (session->header.needs_swap) {
2199 mmap_prot |= PROT_WRITE;
2200 mmap_flags = MAP_PRIVATE;
2203 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2205 if (buf == MAP_FAILED) {
2206 pr_err("failed to mmap file\n");
2210 mmaps[map_idx] = buf;
2211 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2212 file_pos = file_offset + head;
2213 if (session->one_mmap) {
2214 session->one_mmap_addr = buf;
2215 session->one_mmap_offset = file_offset;
2219 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2221 return PTR_ERR(event);
2224 if (mmaps[map_idx]) {
2225 munmap(mmaps[map_idx], mmap_size);
2226 mmaps[map_idx] = NULL;
2229 page_offset = page_size * (head / page_size);
2230 file_offset += page_offset;
2231 head -= page_offset;
2235 size = event->header.size;
2239 if (size < sizeof(struct perf_event_header) ||
2240 (skip = rd->process(session, event, file_pos)) < 0) {
2241 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2242 file_offset + head, event->header.size,
2243 event->header.type, strerror(-skip));
2254 err = __perf_session__process_decomp_events(session);
2258 ui_progress__update(prog, size);
2263 if (file_pos < data_size)
2270 static s64 process_simple(struct perf_session *session,
2271 union perf_event *event,
2274 return perf_session__process_event(session, event, file_offset);
2277 static int __perf_session__process_events(struct perf_session *session)
2279 struct reader rd = {
2280 .fd = perf_data__fd(session->data),
2281 .data_size = session->header.data_size,
2282 .data_offset = session->header.data_offset,
2283 .process = process_simple,
2284 .in_place_update = session->data->in_place_update,
2286 struct ordered_events *oe = &session->ordered_events;
2287 struct perf_tool *tool = session->tool;
2288 struct ui_progress prog;
2291 perf_tool__fill_defaults(tool);
2293 if (rd.data_size == 0)
2296 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2298 err = reader__process_events(&rd, session, &prog);
2301 /* do the final flush for ordered samples */
2302 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2305 err = auxtrace__flush_events(session, tool);
2308 err = perf_session__flush_thread_stacks(session);
2310 ui_progress__finish();
2312 perf_session__warn_about_errors(session);
2314 * We may switching perf.data output, make ordered_events
2317 ordered_events__reinit(&session->ordered_events);
2318 auxtrace__free_events(session);
2319 session->one_mmap = false;
2323 int perf_session__process_events(struct perf_session *session)
2325 if (perf_session__register_idle_thread(session) < 0)
2328 if (perf_data__is_pipe(session->data))
2329 return __perf_session__process_pipe_events(session);
2331 return __perf_session__process_events(session);
2334 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2336 struct evsel *evsel;
2338 evlist__for_each_entry(session->evlist, evsel) {
2339 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2343 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2347 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2350 struct ref_reloc_sym *ref;
2353 ref = zalloc(sizeof(struct ref_reloc_sym));
2357 ref->name = strdup(symbol_name);
2358 if (ref->name == NULL) {
2363 bracket = strchr(ref->name, ']');
2369 kmap = map__kmap(map);
2371 kmap->ref_reloc_sym = ref;
2376 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2378 return machines__fprintf_dsos(&session->machines, fp);
2381 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2382 bool (skip)(struct dso *dso, int parm), int parm)
2384 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2387 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
2391 const char *msg = "";
2393 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2394 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2396 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2398 ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
2402 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2405 * FIXME: Here we have to actually print all the machines in this
2406 * session, not just the host...
2408 return machine__fprintf(&session->machines.host, fp);
2411 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2416 evlist__for_each_entry(session->evlist, pos) {
2417 if (pos->core.attr.type == type)
2423 int perf_session__cpu_bitmap(struct perf_session *session,
2424 const char *cpu_list, unsigned long *cpu_bitmap)
2427 struct perf_cpu_map *map;
2428 int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2430 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2431 struct evsel *evsel;
2433 evsel = perf_session__find_first_evtype(session, i);
2437 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2438 pr_err("File does not contain CPU events. "
2439 "Remove -C option to proceed.\n");
2444 map = perf_cpu_map__new(cpu_list);
2446 pr_err("Invalid cpu_list\n");
2450 for (i = 0; i < map->nr; i++) {
2451 int cpu = map->map[i];
2453 if (cpu >= nr_cpus) {
2454 pr_err("Requested CPU %d too large. "
2455 "Consider raising MAX_NR_CPUS\n", cpu);
2456 goto out_delete_map;
2459 set_bit(cpu, cpu_bitmap);
2465 perf_cpu_map__put(map);
2469 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2472 if (session == NULL || fp == NULL)
2475 fprintf(fp, "# ========\n");
2476 perf_header__fprintf_info(session, fp, full);
2477 fprintf(fp, "# ========\n#\n");
2480 int perf_event__process_id_index(struct perf_session *session,
2481 union perf_event *event)
2483 struct evlist *evlist = session->evlist;
2484 struct perf_record_id_index *ie = &event->id_index;
2485 size_t i, nr, max_nr;
2487 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2488 sizeof(struct id_index_entry);
2494 fprintf(stdout, " nr: %zu\n", nr);
2496 for (i = 0; i < nr; i++) {
2497 struct id_index_entry *e = &ie->entries[i];
2498 struct perf_sample_id *sid;
2501 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2502 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2503 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2504 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
2507 sid = evlist__id2sid(evlist, e->id);