1 // SPDX-License-Identifier: GPL-2.0-only
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <linux/bitops.h>
19 #include <linux/log2.h>
20 #include <linux/string.h>
21 #include <linux/time64.h>
23 #include <sys/param.h>
26 #include <linux/list.h>
27 #include <linux/zalloc.h>
34 #include "evsel_config.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "thread_map.h"
42 #include <linux/hash.h>
48 #include <subcmd/parse-options.h>
52 #include "intel-bts.h"
54 #include "s390-cpumsf.h"
55 #include "util/mmap.h"
57 #include <linux/ctype.h>
58 #include "symbol/kallsyms.h"
59 #include <internal/lib.h>
62 * Make a group from 'leader' to 'last', requiring that the events were not
63 * already grouped to a different leader.
65 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
70 if (!evsel__is_group_leader(leader))
74 evlist__for_each_entry(evlist, evsel) {
76 if (!(evsel__leader(evsel) == leader ||
77 (evsel__leader(evsel) == evsel &&
78 evsel->core.nr_members <= 1)))
80 } else if (evsel == leader) {
88 evlist__for_each_entry(evlist, evsel) {
90 if (!evsel__has_leader(evsel, leader)) {
91 evsel__set_leader(evsel, leader);
92 if (leader->core.nr_members < 1)
93 leader->core.nr_members = 1;
94 leader->core.nr_members += 1;
96 } else if (evsel == leader) {
106 static bool auxtrace__dont_decode(struct perf_session *session)
108 return !session->itrace_synth_opts ||
109 session->itrace_synth_opts->dont_decode;
112 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
113 struct auxtrace_mmap_params *mp,
114 void *userpg, int fd)
116 struct perf_event_mmap_page *pc = userpg;
118 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
133 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
134 pr_err("Cannot use AUX area tracing mmaps\n");
138 pc->aux_offset = mp->offset;
139 pc->aux_size = mp->len;
141 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
142 if (mm->base == MAP_FAILED) {
143 pr_debug2("failed to mmap AUX area\n");
151 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
154 munmap(mm->base, mm->len);
159 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
160 off_t auxtrace_offset,
161 unsigned int auxtrace_pages,
162 bool auxtrace_overwrite)
164 if (auxtrace_pages) {
165 mp->offset = auxtrace_offset;
166 mp->len = auxtrace_pages * (size_t)page_size;
167 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
168 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
169 pr_debug2("AUX area mmap length %zu\n", mp->len);
175 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
176 struct evlist *evlist, int idx,
182 mp->cpu = evlist->core.cpus->map[idx];
183 if (evlist->core.threads)
184 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
189 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
193 #define AUXTRACE_INIT_NR_QUEUES 32
195 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
197 struct auxtrace_queue *queue_array;
198 unsigned int max_nr_queues, i;
200 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
201 if (nr_queues > max_nr_queues)
204 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
208 for (i = 0; i < nr_queues; i++) {
209 INIT_LIST_HEAD(&queue_array[i].head);
210 queue_array[i].priv = NULL;
216 int auxtrace_queues__init(struct auxtrace_queues *queues)
218 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
219 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
220 if (!queues->queue_array)
225 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
226 unsigned int new_nr_queues)
228 unsigned int nr_queues = queues->nr_queues;
229 struct auxtrace_queue *queue_array;
233 nr_queues = AUXTRACE_INIT_NR_QUEUES;
235 while (nr_queues && nr_queues < new_nr_queues)
238 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
241 queue_array = auxtrace_alloc_queue_array(nr_queues);
245 for (i = 0; i < queues->nr_queues; i++) {
246 list_splice_tail(&queues->queue_array[i].head,
247 &queue_array[i].head);
248 queue_array[i].tid = queues->queue_array[i].tid;
249 queue_array[i].cpu = queues->queue_array[i].cpu;
250 queue_array[i].set = queues->queue_array[i].set;
251 queue_array[i].priv = queues->queue_array[i].priv;
254 queues->nr_queues = nr_queues;
255 queues->queue_array = queue_array;
260 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
262 int fd = perf_data__fd(session->data);
266 if (size > SSIZE_MAX)
273 ret = readn(fd, p, size);
274 if (ret != (ssize_t)size) {
282 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
284 struct auxtrace_buffer *buffer)
286 struct auxtrace_queue *queue;
289 if (idx >= queues->nr_queues) {
290 err = auxtrace_queues__grow(queues, idx + 1);
295 queue = &queues->queue_array[idx];
299 queue->tid = buffer->tid;
300 queue->cpu = buffer->cpu;
303 buffer->buffer_nr = queues->next_buffer_nr++;
305 list_add_tail(&buffer->list, &queue->head);
307 queues->new_data = true;
308 queues->populated = true;
313 /* Limit buffers to 32MiB on 32-bit */
314 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
316 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
318 struct auxtrace_buffer *buffer)
320 u64 sz = buffer->size;
321 bool consecutive = false;
322 struct auxtrace_buffer *b;
325 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
326 b = memdup(buffer, sizeof(struct auxtrace_buffer));
329 b->size = BUFFER_LIMIT_FOR_32_BIT;
330 b->consecutive = consecutive;
331 err = auxtrace_queues__queue_buffer(queues, idx, b);
333 auxtrace_buffer__free(b);
336 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
337 sz -= BUFFER_LIMIT_FOR_32_BIT;
342 buffer->consecutive = consecutive;
347 static bool filter_cpu(struct perf_session *session, int cpu)
349 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
351 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
354 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
355 struct perf_session *session,
357 struct auxtrace_buffer *buffer,
358 struct auxtrace_buffer **buffer_ptr)
362 if (filter_cpu(session, buffer->cpu))
365 buffer = memdup(buffer, sizeof(*buffer));
369 if (session->one_mmap) {
370 buffer->data = buffer->data_offset - session->one_mmap_offset +
371 session->one_mmap_addr;
372 } else if (perf_data__is_pipe(session->data)) {
373 buffer->data = auxtrace_copy_data(buffer->size, session);
376 buffer->data_needs_freeing = true;
377 } else if (BITS_PER_LONG == 32 &&
378 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
379 err = auxtrace_queues__split_buffer(queues, idx, buffer);
384 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
388 /* FIXME: Doesn't work for split buffer */
390 *buffer_ptr = buffer;
395 auxtrace_buffer__free(buffer);
399 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
400 struct perf_session *session,
401 union perf_event *event, off_t data_offset,
402 struct auxtrace_buffer **buffer_ptr)
404 struct auxtrace_buffer buffer = {
406 .tid = event->auxtrace.tid,
407 .cpu = event->auxtrace.cpu,
408 .data_offset = data_offset,
409 .offset = event->auxtrace.offset,
410 .reference = event->auxtrace.reference,
411 .size = event->auxtrace.size,
413 unsigned int idx = event->auxtrace.idx;
415 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
419 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
420 struct perf_session *session,
421 off_t file_offset, size_t sz)
423 union perf_event *event;
425 char buf[PERF_SAMPLE_MAX_SIZE];
427 err = perf_session__peek_event(session, file_offset, buf,
428 PERF_SAMPLE_MAX_SIZE, &event, NULL);
432 if (event->header.type == PERF_RECORD_AUXTRACE) {
433 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
434 event->header.size != sz) {
438 file_offset += event->header.size;
439 err = auxtrace_queues__add_event(queues, session, event,
446 void auxtrace_queues__free(struct auxtrace_queues *queues)
450 for (i = 0; i < queues->nr_queues; i++) {
451 while (!list_empty(&queues->queue_array[i].head)) {
452 struct auxtrace_buffer *buffer;
454 buffer = list_entry(queues->queue_array[i].head.next,
455 struct auxtrace_buffer, list);
456 list_del_init(&buffer->list);
457 auxtrace_buffer__free(buffer);
461 zfree(&queues->queue_array);
462 queues->nr_queues = 0;
465 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
466 unsigned int pos, unsigned int queue_nr,
472 parent = (pos - 1) >> 1;
473 if (heap_array[parent].ordinal <= ordinal)
475 heap_array[pos] = heap_array[parent];
478 heap_array[pos].queue_nr = queue_nr;
479 heap_array[pos].ordinal = ordinal;
482 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
485 struct auxtrace_heap_item *heap_array;
487 if (queue_nr >= heap->heap_sz) {
488 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
490 while (heap_sz <= queue_nr)
492 heap_array = realloc(heap->heap_array,
493 heap_sz * sizeof(struct auxtrace_heap_item));
496 heap->heap_array = heap_array;
497 heap->heap_sz = heap_sz;
500 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
505 void auxtrace_heap__free(struct auxtrace_heap *heap)
507 zfree(&heap->heap_array);
512 void auxtrace_heap__pop(struct auxtrace_heap *heap)
514 unsigned int pos, last, heap_cnt = heap->heap_cnt;
515 struct auxtrace_heap_item *heap_array;
522 heap_array = heap->heap_array;
526 unsigned int left, right;
528 left = (pos << 1) + 1;
529 if (left >= heap_cnt)
532 if (right >= heap_cnt) {
533 heap_array[pos] = heap_array[left];
536 if (heap_array[left].ordinal < heap_array[right].ordinal) {
537 heap_array[pos] = heap_array[left];
540 heap_array[pos] = heap_array[right];
546 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
547 heap_array[last].ordinal);
550 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
551 struct evlist *evlist)
554 return itr->info_priv_size(itr, evlist);
558 static int auxtrace_not_supported(void)
560 pr_err("AUX area tracing is not supported on this architecture\n");
564 int auxtrace_record__info_fill(struct auxtrace_record *itr,
565 struct perf_session *session,
566 struct perf_record_auxtrace_info *auxtrace_info,
570 return itr->info_fill(itr, session, auxtrace_info, priv_size);
571 return auxtrace_not_supported();
574 void auxtrace_record__free(struct auxtrace_record *itr)
580 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
582 if (itr && itr->snapshot_start)
583 return itr->snapshot_start(itr);
587 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
589 if (!on_exit && itr && itr->snapshot_finish)
590 return itr->snapshot_finish(itr);
594 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
595 struct auxtrace_mmap *mm,
596 unsigned char *data, u64 *head, u64 *old)
598 if (itr && itr->find_snapshot)
599 return itr->find_snapshot(itr, idx, mm, data, head, old);
603 int auxtrace_record__options(struct auxtrace_record *itr,
604 struct evlist *evlist,
605 struct record_opts *opts)
608 itr->evlist = evlist;
609 return itr->recording_options(itr, evlist, opts);
614 u64 auxtrace_record__reference(struct auxtrace_record *itr)
617 return itr->reference(itr);
621 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
622 struct record_opts *opts, const char *str)
627 /* PMU-agnostic options */
630 opts->auxtrace_snapshot_on_exit = true;
637 if (itr && itr->parse_snapshot_options)
638 return itr->parse_snapshot_options(itr, opts, str);
640 pr_err("No AUX area tracing to snapshot\n");
644 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
648 if (!itr->evlist || !itr->pmu)
651 evlist__for_each_entry(itr->evlist, evsel) {
652 if (evsel->core.attr.type == itr->pmu->type) {
655 return evlist__enable_event_idx(itr->evlist, evsel, idx);
662 * Event record size is 16-bit which results in a maximum size of about 64KiB.
663 * Allow about 4KiB for the rest of the sample record, to give a maximum
664 * AUX area sample size of 60KiB.
666 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
668 /* Arbitrary default size if no other default provided */
669 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
671 static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
672 struct record_opts *opts)
675 bool has_aux_leader = false;
678 evlist__for_each_entry(evlist, evsel) {
679 sz = evsel->core.attr.aux_sample_size;
680 if (evsel__is_group_leader(evsel)) {
681 has_aux_leader = evsel__is_aux_event(evsel);
684 pr_err("Cannot add AUX area sampling to an AUX area event\n");
686 pr_err("Cannot add AUX area sampling to a group leader\n");
690 if (sz > MAX_AUX_SAMPLE_SIZE) {
691 pr_err("AUX area sample size %u too big, max. %d\n",
692 sz, MAX_AUX_SAMPLE_SIZE);
696 if (!has_aux_leader) {
697 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
700 evsel__set_sample_bit(evsel, AUX);
701 opts->auxtrace_sample_mode = true;
703 evsel__reset_sample_bit(evsel, AUX);
707 if (!opts->auxtrace_sample_mode) {
708 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
712 if (!perf_can_aux_sample()) {
713 pr_err("AUX area sampling is not supported by kernel\n");
720 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
721 struct evlist *evlist,
722 struct record_opts *opts, const char *str)
724 struct evsel_config_term *term;
725 struct evsel *aux_evsel;
726 bool has_aux_sample_size = false;
727 bool has_aux_leader = false;
736 pr_err("No AUX area event to sample\n");
740 sz = strtoul(str, &endptr, 0);
741 if (*endptr || sz > UINT_MAX) {
742 pr_err("Bad AUX area sampling option: '%s'\n", str);
747 sz = itr->default_aux_sample_size;
750 sz = DEFAULT_AUX_SAMPLE_SIZE;
752 /* Set aux_sample_size based on --aux-sample option */
753 evlist__for_each_entry(evlist, evsel) {
754 if (evsel__is_group_leader(evsel)) {
755 has_aux_leader = evsel__is_aux_event(evsel);
756 } else if (has_aux_leader) {
757 evsel->core.attr.aux_sample_size = sz;
762 /* Override with aux_sample_size from config term */
763 evlist__for_each_entry(evlist, evsel) {
764 if (evsel__is_aux_event(evsel))
766 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
768 has_aux_sample_size = true;
769 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
770 /* If possible, group with the AUX event */
771 if (aux_evsel && evsel->core.attr.aux_sample_size)
772 evlist__regroup(evlist, aux_evsel, evsel);
776 if (!str && !has_aux_sample_size)
780 pr_err("No AUX area event to sample\n");
784 return auxtrace_validate_aux_sample_size(evlist, opts);
787 void auxtrace_regroup_aux_output(struct evlist *evlist)
789 struct evsel *evsel, *aux_evsel = NULL;
790 struct evsel_config_term *term;
792 evlist__for_each_entry(evlist, evsel) {
793 if (evsel__is_aux_event(evsel))
795 term = evsel__get_config_term(evsel, AUX_OUTPUT);
796 /* If possible, group with the AUX event */
797 if (term && aux_evsel)
798 evlist__regroup(evlist, aux_evsel, evsel);
802 struct auxtrace_record *__weak
803 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
809 static int auxtrace_index__alloc(struct list_head *head)
811 struct auxtrace_index *auxtrace_index;
813 auxtrace_index = malloc(sizeof(struct auxtrace_index));
817 auxtrace_index->nr = 0;
818 INIT_LIST_HEAD(&auxtrace_index->list);
820 list_add_tail(&auxtrace_index->list, head);
825 void auxtrace_index__free(struct list_head *head)
827 struct auxtrace_index *auxtrace_index, *n;
829 list_for_each_entry_safe(auxtrace_index, n, head, list) {
830 list_del_init(&auxtrace_index->list);
831 free(auxtrace_index);
835 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
837 struct auxtrace_index *auxtrace_index;
840 if (list_empty(head)) {
841 err = auxtrace_index__alloc(head);
846 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
848 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
849 err = auxtrace_index__alloc(head);
852 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
856 return auxtrace_index;
859 int auxtrace_index__auxtrace_event(struct list_head *head,
860 union perf_event *event, off_t file_offset)
862 struct auxtrace_index *auxtrace_index;
865 auxtrace_index = auxtrace_index__last(head);
869 nr = auxtrace_index->nr;
870 auxtrace_index->entries[nr].file_offset = file_offset;
871 auxtrace_index->entries[nr].sz = event->header.size;
872 auxtrace_index->nr += 1;
877 static int auxtrace_index__do_write(int fd,
878 struct auxtrace_index *auxtrace_index)
880 struct auxtrace_index_entry ent;
883 for (i = 0; i < auxtrace_index->nr; i++) {
884 ent.file_offset = auxtrace_index->entries[i].file_offset;
885 ent.sz = auxtrace_index->entries[i].sz;
886 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
892 int auxtrace_index__write(int fd, struct list_head *head)
894 struct auxtrace_index *auxtrace_index;
898 list_for_each_entry(auxtrace_index, head, list)
899 total += auxtrace_index->nr;
901 if (writen(fd, &total, sizeof(total)) != sizeof(total))
904 list_for_each_entry(auxtrace_index, head, list) {
905 err = auxtrace_index__do_write(fd, auxtrace_index);
913 static int auxtrace_index__process_entry(int fd, struct list_head *head,
916 struct auxtrace_index *auxtrace_index;
917 struct auxtrace_index_entry ent;
920 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
923 auxtrace_index = auxtrace_index__last(head);
927 nr = auxtrace_index->nr;
929 auxtrace_index->entries[nr].file_offset =
930 bswap_64(ent.file_offset);
931 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
933 auxtrace_index->entries[nr].file_offset = ent.file_offset;
934 auxtrace_index->entries[nr].sz = ent.sz;
937 auxtrace_index->nr = nr + 1;
942 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
945 struct list_head *head = &session->auxtrace_index;
948 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
954 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
960 err = auxtrace_index__process_entry(fd, head, needs_swap);
968 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
969 struct perf_session *session,
970 struct auxtrace_index_entry *ent)
972 return auxtrace_queues__add_indexed_event(queues, session,
973 ent->file_offset, ent->sz);
976 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
977 struct perf_session *session)
979 struct auxtrace_index *auxtrace_index;
980 struct auxtrace_index_entry *ent;
984 if (auxtrace__dont_decode(session))
987 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
988 for (i = 0; i < auxtrace_index->nr; i++) {
989 ent = &auxtrace_index->entries[i];
990 err = auxtrace_queues__process_index_entry(queues,
1000 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
1001 struct auxtrace_buffer *buffer)
1004 if (list_is_last(&buffer->list, &queue->head))
1006 return list_entry(buffer->list.next, struct auxtrace_buffer,
1009 if (list_empty(&queue->head))
1011 return list_entry(queue->head.next, struct auxtrace_buffer,
1016 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1017 struct perf_sample *sample,
1018 struct perf_session *session)
1020 struct perf_sample_id *sid;
1028 sid = evlist__id2sid(session->evlist, id);
1034 if (idx >= queues->nr_queues)
1037 return &queues->queue_array[idx];
1040 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1041 struct perf_session *session,
1042 struct perf_sample *sample, u64 data_offset,
1045 struct auxtrace_buffer buffer = {
1047 .data_offset = data_offset,
1048 .reference = reference,
1049 .size = sample->aux_sample.size,
1051 struct perf_sample_id *sid;
1052 u64 id = sample->id;
1058 sid = evlist__id2sid(session->evlist, id);
1063 buffer.tid = sid->tid;
1064 buffer.cpu = sid->cpu;
1066 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1074 static int auxtrace_queue_data_cb(struct perf_session *session,
1075 union perf_event *event, u64 offset,
1078 struct queue_data *qd = data;
1079 struct perf_sample sample;
1082 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1083 if (event->header.size < sizeof(struct perf_record_auxtrace))
1085 offset += event->header.size;
1086 return session->auxtrace->queue_data(session, NULL, event,
1090 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1093 err = evlist__parse_sample(session->evlist, event, &sample);
1097 if (!sample.aux_sample.size)
1100 offset += sample.aux_sample.data - (void *)event;
1102 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1105 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1107 struct queue_data qd = {
1112 if (auxtrace__dont_decode(session))
1115 if (!session->auxtrace || !session->auxtrace->queue_data)
1118 return perf_session__peek_events(session, session->header.data_offset,
1119 session->header.data_size,
1120 auxtrace_queue_data_cb, &qd);
1123 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
1125 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
1126 size_t adj = buffer->data_offset & (page_size - 1);
1127 size_t size = buffer->size + adj;
1128 off_t file_offset = buffer->data_offset - adj;
1132 return buffer->data;
1134 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
1135 if (addr == MAP_FAILED)
1138 buffer->mmap_addr = addr;
1139 buffer->mmap_size = size;
1141 buffer->data = addr + adj;
1143 return buffer->data;
1146 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1148 if (!buffer->data || !buffer->mmap_addr)
1150 munmap(buffer->mmap_addr, buffer->mmap_size);
1151 buffer->mmap_addr = NULL;
1152 buffer->mmap_size = 0;
1153 buffer->data = NULL;
1154 buffer->use_data = NULL;
1157 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1159 auxtrace_buffer__put_data(buffer);
1160 if (buffer->data_needs_freeing) {
1161 buffer->data_needs_freeing = false;
1162 zfree(&buffer->data);
1163 buffer->use_data = NULL;
1168 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1170 auxtrace_buffer__drop_data(buffer);
1174 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1175 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1176 const char *msg, u64 timestamp)
1180 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1182 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1183 auxtrace_error->type = type;
1184 auxtrace_error->code = code;
1185 auxtrace_error->cpu = cpu;
1186 auxtrace_error->pid = pid;
1187 auxtrace_error->tid = tid;
1188 auxtrace_error->fmt = 1;
1189 auxtrace_error->ip = ip;
1190 auxtrace_error->time = timestamp;
1191 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1193 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1194 strlen(auxtrace_error->msg) + 1;
1195 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1198 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1199 struct perf_tool *tool,
1200 struct perf_session *session,
1201 perf_event__handler_t process)
1203 union perf_event *ev;
1207 pr_debug2("Synthesizing auxtrace information\n");
1208 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1209 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1213 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1214 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1216 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1221 err = process(tool, ev, NULL, NULL);
1227 static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1229 struct evsel *new_leader = NULL;
1230 struct evsel *evsel;
1232 /* Find new leader for the group */
1233 evlist__for_each_entry(evlist, evsel) {
1234 if (!evsel__has_leader(evsel, leader) || evsel == leader)
1238 evsel__set_leader(evsel, new_leader);
1241 /* Update group information */
1243 zfree(&new_leader->group_name);
1244 new_leader->group_name = leader->group_name;
1245 leader->group_name = NULL;
1247 new_leader->core.nr_members = leader->core.nr_members - 1;
1248 leader->core.nr_members = 1;
1252 static void unleader_auxtrace(struct perf_session *session)
1254 struct evsel *evsel;
1256 evlist__for_each_entry(session->evlist, evsel) {
1257 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1258 evsel__is_group_leader(evsel)) {
1259 unleader_evsel(session->evlist, evsel);
1264 int perf_event__process_auxtrace_info(struct perf_session *session,
1265 union perf_event *event)
1267 enum auxtrace_type type = event->auxtrace_info.type;
1271 fprintf(stdout, " type: %u\n", type);
1274 case PERF_AUXTRACE_INTEL_PT:
1275 err = intel_pt_process_auxtrace_info(event, session);
1277 case PERF_AUXTRACE_INTEL_BTS:
1278 err = intel_bts_process_auxtrace_info(event, session);
1280 case PERF_AUXTRACE_ARM_SPE:
1281 err = arm_spe_process_auxtrace_info(event, session);
1283 case PERF_AUXTRACE_CS_ETM:
1284 err = cs_etm__process_auxtrace_info(event, session);
1286 case PERF_AUXTRACE_S390_CPUMSF:
1287 err = s390_cpumsf_process_auxtrace_info(event, session);
1289 case PERF_AUXTRACE_UNKNOWN:
1297 unleader_auxtrace(session);
1302 s64 perf_event__process_auxtrace(struct perf_session *session,
1303 union perf_event *event)
1308 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1309 event->auxtrace.size, event->auxtrace.offset,
1310 event->auxtrace.reference, event->auxtrace.idx,
1311 event->auxtrace.tid, event->auxtrace.cpu);
1313 if (auxtrace__dont_decode(session))
1314 return event->auxtrace.size;
1316 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1319 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1323 return event->auxtrace.size;
1326 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1327 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1328 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1329 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1330 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1331 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1333 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1336 synth_opts->branches = true;
1337 synth_opts->transactions = true;
1338 synth_opts->ptwrites = true;
1339 synth_opts->pwr_events = true;
1340 synth_opts->other_events = true;
1341 synth_opts->errors = true;
1342 synth_opts->flc = true;
1343 synth_opts->llc = true;
1344 synth_opts->tlb = true;
1345 synth_opts->mem = true;
1346 synth_opts->remote_access = true;
1349 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1350 synth_opts->period = 1;
1351 synth_opts->calls = true;
1353 synth_opts->instructions = true;
1354 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1355 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1357 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1358 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1359 synth_opts->initial_skip = 0;
1362 static int get_flag(const char **ptr, unsigned int *flags)
1367 if (c >= 'a' && c <= 'z') {
1368 *flags |= 1 << (c - 'a');
1371 } else if (c == ' ') {
1380 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1386 if (get_flag(ptr, plus_flags))
1391 if (get_flag(ptr, minus_flags))
1404 * Please check tools/perf/Documentation/perf-script.txt for information
1405 * about the options parsed here, which is introduced after this cset,
1406 * when support in 'perf script' for these options is introduced.
1408 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
1409 const char *str, int unset)
1413 bool period_type_set = false;
1414 bool period_set = false;
1416 synth_opts->set = true;
1419 synth_opts->dont_decode = true;
1424 itrace_synth_opts__set_default(synth_opts,
1425 synth_opts->default_no_sample);
1429 for (p = str; *p;) {
1432 synth_opts->instructions = true;
1433 while (*p == ' ' || *p == ',')
1436 synth_opts->period = strtoull(p, &endptr, 10);
1439 while (*p == ' ' || *p == ',')
1443 synth_opts->period_type =
1444 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1445 period_type_set = true;
1448 synth_opts->period_type =
1449 PERF_ITRACE_PERIOD_TICKS;
1450 period_type_set = true;
1453 synth_opts->period *= 1000;
1456 synth_opts->period *= 1000;
1461 synth_opts->period_type =
1462 PERF_ITRACE_PERIOD_NANOSECS;
1463 period_type_set = true;
1473 synth_opts->branches = true;
1476 synth_opts->transactions = true;
1479 synth_opts->ptwrites = true;
1482 synth_opts->pwr_events = true;
1485 synth_opts->other_events = true;
1488 synth_opts->errors = true;
1489 if (get_flags(&p, &synth_opts->error_plus_flags,
1490 &synth_opts->error_minus_flags))
1494 synth_opts->log = true;
1495 if (get_flags(&p, &synth_opts->log_plus_flags,
1496 &synth_opts->log_minus_flags))
1500 synth_opts->branches = true;
1501 synth_opts->calls = true;
1504 synth_opts->branches = true;
1505 synth_opts->returns = true;
1510 synth_opts->add_callchain = true;
1512 synth_opts->callchain = true;
1513 synth_opts->callchain_sz =
1514 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1515 while (*p == ' ' || *p == ',')
1520 val = strtoul(p, &endptr, 10);
1522 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1524 synth_opts->callchain_sz = val;
1530 synth_opts->add_last_branch = true;
1532 synth_opts->last_branch = true;
1533 synth_opts->last_branch_sz =
1534 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1535 while (*p == ' ' || *p == ',')
1540 val = strtoul(p, &endptr, 10);
1543 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1545 synth_opts->last_branch_sz = val;
1549 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1555 synth_opts->flc = true;
1558 synth_opts->llc = true;
1561 synth_opts->tlb = true;
1564 synth_opts->remote_access = true;
1567 synth_opts->mem = true;
1570 synth_opts->quick += 1;
1573 synth_opts->timeless_decoding = true;
1583 if (synth_opts->instructions) {
1584 if (!period_type_set)
1585 synth_opts->period_type =
1586 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1588 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1594 pr_err("Bad Instruction Tracing options '%s'\n", str);
1598 int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
1600 return itrace_do_parse_synth_opts(opt->value, str, unset);
1603 static const char * const auxtrace_error_type_name[] = {
1604 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1607 static const char *auxtrace_error_name(int type)
1609 const char *error_type_name = NULL;
1611 if (type < PERF_AUXTRACE_ERROR_MAX)
1612 error_type_name = auxtrace_error_type_name[type];
1613 if (!error_type_name)
1614 error_type_name = "unknown AUX";
1615 return error_type_name;
1618 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1620 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1621 unsigned long long nsecs = e->time;
1622 const char *msg = e->msg;
1625 ret = fprintf(fp, " %s error type %u",
1626 auxtrace_error_name(e->type), e->type);
1628 if (e->fmt && nsecs) {
1629 unsigned long secs = nsecs / NSEC_PER_SEC;
1631 nsecs -= secs * NSEC_PER_SEC;
1632 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1634 ret += fprintf(fp, " time 0");
1638 msg = (const char *)&e->time;
1640 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1641 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1645 void perf_session__auxtrace_error_inc(struct perf_session *session,
1646 union perf_event *event)
1648 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1650 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1651 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1654 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1658 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1659 if (!stats->nr_auxtrace_errors[i])
1661 ui__warning("%u %s errors\n",
1662 stats->nr_auxtrace_errors[i],
1663 auxtrace_error_name(i));
1667 int perf_event__process_auxtrace_error(struct perf_session *session,
1668 union perf_event *event)
1670 if (auxtrace__dont_decode(session))
1673 perf_event__fprintf_auxtrace_error(event, stdout);
1677 static int __auxtrace_mmap__read(struct mmap *map,
1678 struct auxtrace_record *itr,
1679 struct perf_tool *tool, process_auxtrace_t fn,
1680 bool snapshot, size_t snapshot_size)
1682 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1683 u64 head, old = mm->prev, offset, ref;
1684 unsigned char *data = mm->base;
1685 size_t size, head_off, old_off, len1, len2, padding;
1686 union perf_event ev;
1687 void *data1, *data2;
1690 head = auxtrace_mmap__read_snapshot_head(mm);
1691 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1695 head = auxtrace_mmap__read_head(mm);
1701 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1702 mm->idx, old, head, head - old);
1705 head_off = head & mm->mask;
1706 old_off = old & mm->mask;
1708 head_off = head % mm->len;
1709 old_off = old % mm->len;
1712 if (head_off > old_off)
1713 size = head_off - old_off;
1715 size = mm->len - (old_off - head_off);
1717 if (snapshot && size > snapshot_size)
1718 size = snapshot_size;
1720 ref = auxtrace_record__reference(itr);
1722 if (head > old || size <= head || mm->mask) {
1723 offset = head - size;
1726 * When the buffer size is not a power of 2, 'head' wraps at the
1727 * highest multiple of the buffer size, so we have to subtract
1728 * the remainder here.
1730 u64 rem = (0ULL - mm->len) % mm->len;
1732 offset = head - size - rem;
1735 if (size > head_off) {
1736 len1 = size - head_off;
1737 data1 = &data[mm->len - len1];
1742 data1 = &data[head_off - len1];
1747 if (itr->alignment) {
1748 unsigned int unwanted = len1 % itr->alignment;
1754 /* padding must be written by fn() e.g. record__process_auxtrace() */
1755 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1757 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1759 memset(&ev, 0, sizeof(ev));
1760 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1761 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1762 ev.auxtrace.size = size + padding;
1763 ev.auxtrace.offset = offset;
1764 ev.auxtrace.reference = ref;
1765 ev.auxtrace.idx = mm->idx;
1766 ev.auxtrace.tid = mm->tid;
1767 ev.auxtrace.cpu = mm->cpu;
1769 if (fn(tool, map, &ev, data1, len1, data2, len2))
1775 auxtrace_mmap__write_tail(mm, head);
1776 if (itr->read_finish) {
1779 err = itr->read_finish(itr, mm->idx);
1788 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1789 struct perf_tool *tool, process_auxtrace_t fn)
1791 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1794 int auxtrace_mmap__read_snapshot(struct mmap *map,
1795 struct auxtrace_record *itr,
1796 struct perf_tool *tool, process_auxtrace_t fn,
1797 size_t snapshot_size)
1799 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1803 * struct auxtrace_cache - hash table to implement a cache
1804 * @hashtable: the hashtable
1805 * @sz: hashtable size (number of hlists)
1806 * @entry_size: size of an entry
1807 * @limit: limit the number of entries to this maximum, when reached the cache
1808 * is dropped and caching begins again with an empty cache
1809 * @cnt: current number of entries
1810 * @bits: hashtable size (@sz = 2^@bits)
1812 struct auxtrace_cache {
1813 struct hlist_head *hashtable;
1821 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1822 unsigned int limit_percent)
1824 struct auxtrace_cache *c;
1825 struct hlist_head *ht;
1828 c = zalloc(sizeof(struct auxtrace_cache));
1834 ht = calloc(sz, sizeof(struct hlist_head));
1838 for (i = 0; i < sz; i++)
1839 INIT_HLIST_HEAD(&ht[i]);
1843 c->entry_size = entry_size;
1844 c->limit = (c->sz * limit_percent) / 100;
1854 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1856 struct auxtrace_cache_entry *entry;
1857 struct hlist_node *tmp;
1863 for (i = 0; i < c->sz; i++) {
1864 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1865 hlist_del(&entry->hash);
1866 auxtrace_cache__free_entry(c, entry);
1873 void auxtrace_cache__free(struct auxtrace_cache *c)
1878 auxtrace_cache__drop(c);
1879 zfree(&c->hashtable);
1883 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1885 return malloc(c->entry_size);
1888 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1894 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1895 struct auxtrace_cache_entry *entry)
1897 if (c->limit && ++c->cnt > c->limit)
1898 auxtrace_cache__drop(c);
1901 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1906 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
1909 struct auxtrace_cache_entry *entry;
1910 struct hlist_head *hlist;
1911 struct hlist_node *n;
1916 hlist = &c->hashtable[hash_32(key, c->bits)];
1917 hlist_for_each_entry_safe(entry, n, hlist, hash) {
1918 if (entry->key == key) {
1919 hlist_del(&entry->hash);
1927 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
1929 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
1931 auxtrace_cache__free_entry(c, entry);
1934 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1936 struct auxtrace_cache_entry *entry;
1937 struct hlist_head *hlist;
1942 hlist = &c->hashtable[hash_32(key, c->bits)];
1943 hlist_for_each_entry(entry, hlist, hash) {
1944 if (entry->key == key)
1951 static void addr_filter__free_str(struct addr_filter *filt)
1954 filt->action = NULL;
1955 filt->sym_from = NULL;
1956 filt->sym_to = NULL;
1957 filt->filename = NULL;
1960 static struct addr_filter *addr_filter__new(void)
1962 struct addr_filter *filt = zalloc(sizeof(*filt));
1965 INIT_LIST_HEAD(&filt->list);
1970 static void addr_filter__free(struct addr_filter *filt)
1973 addr_filter__free_str(filt);
1977 static void addr_filters__add(struct addr_filters *filts,
1978 struct addr_filter *filt)
1980 list_add_tail(&filt->list, &filts->head);
1984 static void addr_filters__del(struct addr_filters *filts,
1985 struct addr_filter *filt)
1987 list_del_init(&filt->list);
1991 void addr_filters__init(struct addr_filters *filts)
1993 INIT_LIST_HEAD(&filts->head);
1997 void addr_filters__exit(struct addr_filters *filts)
1999 struct addr_filter *filt, *n;
2001 list_for_each_entry_safe(filt, n, &filts->head, list) {
2002 addr_filters__del(filts, filt);
2003 addr_filter__free(filt);
2007 static int parse_num_or_str(char **inp, u64 *num, const char **str,
2008 const char *str_delim)
2010 *inp += strspn(*inp, " ");
2012 if (isdigit(**inp)) {
2018 *num = strtoull(*inp, &endptr, 0);
2029 *inp += strspn(*inp, " ");
2031 n = strcspn(*inp, str_delim);
2043 static int parse_action(struct addr_filter *filt)
2045 if (!strcmp(filt->action, "filter")) {
2048 } else if (!strcmp(filt->action, "start")) {
2050 } else if (!strcmp(filt->action, "stop")) {
2051 filt->start = false;
2052 } else if (!strcmp(filt->action, "tracestop")) {
2053 filt->start = false;
2055 filt->action += 5; /* Change 'tracestop' to 'stop' */
2062 static int parse_sym_idx(char **inp, int *idx)
2066 *inp += strspn(*inp, " ");
2073 if (**inp == 'g' || **inp == 'G') {
2081 num = strtoul(*inp, &endptr, 0);
2084 if (endptr == *inp || num > INT_MAX)
2093 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2095 int err = parse_num_or_str(inp, num, str, " ");
2098 err = parse_sym_idx(inp, idx);
2103 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2108 filt->str = fstr = strdup(*filter_inp);
2112 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2116 err = parse_action(filt);
2120 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2121 &filt->sym_from_idx);
2125 fstr += strspn(fstr, " ");
2129 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2136 fstr += strspn(fstr, " ");
2140 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2145 fstr += strspn(fstr, " ,");
2147 *filter_inp += fstr - filt->str;
2152 addr_filter__free_str(filt);
2157 int addr_filters__parse_bare_filter(struct addr_filters *filts,
2160 struct addr_filter *filt;
2161 const char *fstr = filter;
2165 filt = addr_filter__new();
2166 err = parse_one_filter(filt, &fstr);
2168 addr_filter__free(filt);
2169 addr_filters__exit(filts);
2172 addr_filters__add(filts, filt);
2191 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2193 /* A function with the same name, and global or the n'th found or any */
2194 return kallsyms__is_function(type) &&
2195 !strcmp(name, args->name) &&
2196 ((args->global && isupper(type)) ||
2197 (args->selected && ++(args->cnt) == args->idx) ||
2198 (!args->global && !args->selected));
2201 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2203 struct sym_args *args = arg;
2205 if (args->started) {
2207 args->size = start - args->start;
2208 if (args->selected) {
2211 } else if (kern_sym_match(args, name, type)) {
2212 args->duplicate = true;
2215 } else if (kern_sym_match(args, name, type)) {
2216 args->started = true;
2217 args->start = start;
2223 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2225 struct sym_args *args = arg;
2227 if (kern_sym_match(args, name, type)) {
2228 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2229 ++args->cnt, start, type, name);
2231 } else if (args->near) {
2233 pr_err("\t\twhich is near\t\t%s\n", name);
2239 static int sym_not_found_error(const char *sym_name, int idx)
2242 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2245 pr_err("Global symbol '%s' not found.\n", sym_name);
2247 pr_err("Symbol '%s' not found.\n", sym_name);
2249 pr_err("Note that symbols must be functions.\n");
2254 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2256 struct sym_args args = {
2260 .selected = idx > 0,
2267 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2269 pr_err("Failed to parse /proc/kallsyms\n");
2273 if (args.duplicate) {
2274 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2276 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2277 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2279 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2283 if (!args.started) {
2284 pr_err("Kernel symbol lookup: ");
2285 return sym_not_found_error(sym_name, idx);
2288 *start = args.start;
2294 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2295 char type, u64 start)
2297 struct sym_args *args = arg;
2299 if (!kallsyms__is_function(type))
2302 if (!args->started) {
2303 args->started = true;
2304 args->start = start;
2306 /* Don't know exactly where the kernel ends, so we add a page */
2307 args->size = round_up(start, page_size) + page_size - args->start;
2312 static int addr_filter__entire_kernel(struct addr_filter *filt)
2314 struct sym_args args = { .started = false };
2317 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2318 if (err < 0 || !args.started) {
2319 pr_err("Failed to parse /proc/kallsyms\n");
2323 filt->addr = args.start;
2324 filt->size = args.size;
2329 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2331 if (start + size >= filt->addr)
2334 if (filt->sym_from) {
2335 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2336 filt->sym_to, start, filt->sym_from, filt->addr);
2338 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2339 filt->sym_to, start, filt->addr);
2345 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2347 bool no_size = false;
2351 if (symbol_conf.kptr_restrict) {
2352 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2356 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2357 return addr_filter__entire_kernel(filt);
2359 if (filt->sym_from) {
2360 err = find_kern_sym(filt->sym_from, &start, &size,
2361 filt->sym_from_idx);
2365 if (filt->range && !filt->size && !filt->sym_to) {
2372 err = find_kern_sym(filt->sym_to, &start, &size,
2377 err = check_end_after_start(filt, start, size);
2380 filt->size = start + size - filt->addr;
2384 /* The very last symbol in kallsyms does not imply a particular size */
2386 pr_err("Cannot determine size of symbol '%s'\n",
2387 filt->sym_to ? filt->sym_to : filt->sym_from);
2394 static struct dso *load_dso(const char *name)
2399 map = dso__new_map(name);
2403 if (map__load(map) < 0)
2404 pr_err("File '%s' not found or has no symbols.\n", name);
2406 dso = dso__get(map->dso);
2413 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2416 /* Same name, and global or the n'th found or any */
2417 return !arch__compare_symbol_names(name, sym->name) &&
2418 ((!idx && sym->binding == STB_GLOBAL) ||
2419 (idx > 0 && ++*cnt == idx) ||
2423 static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2429 pr_err("Multiple symbols with name '%s'\n", sym_name);
2431 sym = dso__first_symbol(dso);
2433 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2434 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2436 sym->binding == STB_GLOBAL ? 'g' :
2437 sym->binding == STB_LOCAL ? 'l' : 'w',
2442 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2444 sym = dso__next_symbol(sym);
2447 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2449 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2452 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2461 sym = dso__first_symbol(dso);
2465 *size = sym->start - *start;
2469 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2470 print_duplicate_syms(dso, sym_name);
2473 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2474 *start = sym->start;
2475 *size = sym->end - sym->start;
2477 sym = dso__next_symbol(sym);
2481 return sym_not_found_error(sym_name, idx);
2486 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2488 if (dso__data_file_size(dso, NULL)) {
2489 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2495 filt->size = dso->data.file_size;
2500 static int addr_filter__resolve_syms(struct addr_filter *filt)
2506 if (!filt->sym_from && !filt->sym_to)
2509 if (!filt->filename)
2510 return addr_filter__resolve_kernel_syms(filt);
2512 dso = load_dso(filt->filename);
2514 pr_err("Failed to load symbols from: %s\n", filt->filename);
2518 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2519 err = addr_filter__entire_dso(filt, dso);
2523 if (filt->sym_from) {
2524 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2525 filt->sym_from_idx);
2529 if (filt->range && !filt->size && !filt->sym_to)
2534 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2539 err = check_end_after_start(filt, start, size);
2543 filt->size = start + size - filt->addr;
2552 static char *addr_filter__to_str(struct addr_filter *filt)
2554 char filename_buf[PATH_MAX];
2555 const char *at = "";
2556 const char *fn = "";
2560 if (filt->filename) {
2562 fn = realpath(filt->filename, filename_buf);
2568 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2569 filt->action, filt->addr, filt->size, at, fn);
2571 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2572 filt->action, filt->addr, at, fn);
2575 return err < 0 ? NULL : filter;
2578 static int parse_addr_filter(struct evsel *evsel, const char *filter,
2581 struct addr_filters filts;
2582 struct addr_filter *filt;
2585 addr_filters__init(&filts);
2587 err = addr_filters__parse_bare_filter(&filts, filter);
2591 if (filts.cnt > max_nr) {
2592 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2598 list_for_each_entry(filt, &filts.head, list) {
2601 err = addr_filter__resolve_syms(filt);
2605 new_filter = addr_filter__to_str(filt);
2611 if (evsel__append_addr_filter(evsel, new_filter)) {
2618 addr_filters__exit(&filts);
2621 pr_err("Failed to parse address filter: '%s'\n", filter);
2622 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2623 pr_err("Where multiple filters are separated by space or comma.\n");
2629 static int evsel__nr_addr_filter(struct evsel *evsel)
2631 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2632 int nr_addr_filters = 0;
2637 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2639 return nr_addr_filters;
2642 int auxtrace_parse_filters(struct evlist *evlist)
2644 struct evsel *evsel;
2648 evlist__for_each_entry(evlist, evsel) {
2649 filter = evsel->filter;
2650 max_nr = evsel__nr_addr_filter(evsel);
2651 if (!filter || !max_nr)
2653 evsel->filter = NULL;
2654 err = parse_addr_filter(evsel, filter, max_nr);
2658 pr_debug("Address filter: %s\n", evsel->filter);
2664 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2665 struct perf_sample *sample, struct perf_tool *tool)
2667 if (!session->auxtrace)
2670 return session->auxtrace->process_event(session, event, sample, tool);
2673 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2674 struct perf_sample *sample)
2676 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2677 auxtrace__dont_decode(session))
2680 session->auxtrace->dump_auxtrace_sample(session, sample);
2683 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2685 if (!session->auxtrace)
2688 return session->auxtrace->flush_events(session, tool);
2691 void auxtrace__free_events(struct perf_session *session)
2693 if (!session->auxtrace)
2696 return session->auxtrace->free_events(session);
2699 void auxtrace__free(struct perf_session *session)
2701 if (!session->auxtrace)
2704 return session->auxtrace->free(session);
2707 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2708 struct evsel *evsel)
2710 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2713 return session->auxtrace->evsel_is_auxtrace(session, evsel);