1 // SPDX-License-Identifier: GPL-2.0-only
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <linux/bitops.h>
19 #include <linux/log2.h>
20 #include <linux/string.h>
21 #include <linux/time64.h>
23 #include <sys/param.h>
26 #include <linux/list.h>
27 #include <linux/zalloc.h>
34 #include "evsel_config.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "thread_map.h"
42 #include <linux/hash.h>
48 #include <subcmd/parse-options.h>
52 #include "intel-bts.h"
54 #include "s390-cpumsf.h"
55 #include "util/mmap.h"
57 #include <linux/ctype.h>
58 #include "symbol/kallsyms.h"
59 #include <internal/lib.h>
62 * Make a group from 'leader' to 'last', requiring that the events were not
63 * already grouped to a different leader.
65 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
70 if (!evsel__is_group_leader(leader))
74 evlist__for_each_entry(evlist, evsel) {
76 if (!(evsel->leader == leader ||
77 (evsel->leader == evsel &&
78 evsel->core.nr_members <= 1)))
80 } else if (evsel == leader) {
88 evlist__for_each_entry(evlist, evsel) {
90 if (evsel->leader != leader) {
91 evsel->leader = leader;
92 if (leader->core.nr_members < 1)
93 leader->core.nr_members = 1;
94 leader->core.nr_members += 1;
96 } else if (evsel == leader) {
106 static bool auxtrace__dont_decode(struct perf_session *session)
108 return !session->itrace_synth_opts ||
109 session->itrace_synth_opts->dont_decode;
112 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
113 struct auxtrace_mmap_params *mp,
114 void *userpg, int fd)
116 struct perf_event_mmap_page *pc = userpg;
118 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
133 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
134 pr_err("Cannot use AUX area tracing mmaps\n");
138 pc->aux_offset = mp->offset;
139 pc->aux_size = mp->len;
141 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
142 if (mm->base == MAP_FAILED) {
143 pr_debug2("failed to mmap AUX area\n");
151 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
154 munmap(mm->base, mm->len);
159 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
160 off_t auxtrace_offset,
161 unsigned int auxtrace_pages,
162 bool auxtrace_overwrite)
164 if (auxtrace_pages) {
165 mp->offset = auxtrace_offset;
166 mp->len = auxtrace_pages * (size_t)page_size;
167 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
168 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
169 pr_debug2("AUX area mmap length %zu\n", mp->len);
175 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
176 struct evlist *evlist, int idx,
182 mp->cpu = evlist->core.cpus->map[idx];
183 if (evlist->core.threads)
184 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
189 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
193 #define AUXTRACE_INIT_NR_QUEUES 32
195 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
197 struct auxtrace_queue *queue_array;
198 unsigned int max_nr_queues, i;
200 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
201 if (nr_queues > max_nr_queues)
204 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
208 for (i = 0; i < nr_queues; i++) {
209 INIT_LIST_HEAD(&queue_array[i].head);
210 queue_array[i].priv = NULL;
216 int auxtrace_queues__init(struct auxtrace_queues *queues)
218 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
219 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
220 if (!queues->queue_array)
225 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
226 unsigned int new_nr_queues)
228 unsigned int nr_queues = queues->nr_queues;
229 struct auxtrace_queue *queue_array;
233 nr_queues = AUXTRACE_INIT_NR_QUEUES;
235 while (nr_queues && nr_queues < new_nr_queues)
238 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
241 queue_array = auxtrace_alloc_queue_array(nr_queues);
245 for (i = 0; i < queues->nr_queues; i++) {
246 list_splice_tail(&queues->queue_array[i].head,
247 &queue_array[i].head);
248 queue_array[i].tid = queues->queue_array[i].tid;
249 queue_array[i].cpu = queues->queue_array[i].cpu;
250 queue_array[i].set = queues->queue_array[i].set;
251 queue_array[i].priv = queues->queue_array[i].priv;
254 queues->nr_queues = nr_queues;
255 queues->queue_array = queue_array;
260 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
262 int fd = perf_data__fd(session->data);
266 if (size > SSIZE_MAX)
273 ret = readn(fd, p, size);
274 if (ret != (ssize_t)size) {
282 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
284 struct auxtrace_buffer *buffer)
286 struct auxtrace_queue *queue;
289 if (idx >= queues->nr_queues) {
290 err = auxtrace_queues__grow(queues, idx + 1);
295 queue = &queues->queue_array[idx];
299 queue->tid = buffer->tid;
300 queue->cpu = buffer->cpu;
301 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
302 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
303 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
307 buffer->buffer_nr = queues->next_buffer_nr++;
309 list_add_tail(&buffer->list, &queue->head);
311 queues->new_data = true;
312 queues->populated = true;
317 /* Limit buffers to 32MiB on 32-bit */
318 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
320 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
322 struct auxtrace_buffer *buffer)
324 u64 sz = buffer->size;
325 bool consecutive = false;
326 struct auxtrace_buffer *b;
329 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
330 b = memdup(buffer, sizeof(struct auxtrace_buffer));
333 b->size = BUFFER_LIMIT_FOR_32_BIT;
334 b->consecutive = consecutive;
335 err = auxtrace_queues__queue_buffer(queues, idx, b);
337 auxtrace_buffer__free(b);
340 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
341 sz -= BUFFER_LIMIT_FOR_32_BIT;
346 buffer->consecutive = consecutive;
351 static bool filter_cpu(struct perf_session *session, int cpu)
353 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
355 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
358 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
359 struct perf_session *session,
361 struct auxtrace_buffer *buffer,
362 struct auxtrace_buffer **buffer_ptr)
366 if (filter_cpu(session, buffer->cpu))
369 buffer = memdup(buffer, sizeof(*buffer));
373 if (session->one_mmap) {
374 buffer->data = buffer->data_offset - session->one_mmap_offset +
375 session->one_mmap_addr;
376 } else if (perf_data__is_pipe(session->data)) {
377 buffer->data = auxtrace_copy_data(buffer->size, session);
380 buffer->data_needs_freeing = true;
381 } else if (BITS_PER_LONG == 32 &&
382 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
383 err = auxtrace_queues__split_buffer(queues, idx, buffer);
388 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
392 /* FIXME: Doesn't work for split buffer */
394 *buffer_ptr = buffer;
399 auxtrace_buffer__free(buffer);
403 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
404 struct perf_session *session,
405 union perf_event *event, off_t data_offset,
406 struct auxtrace_buffer **buffer_ptr)
408 struct auxtrace_buffer buffer = {
410 .tid = event->auxtrace.tid,
411 .cpu = event->auxtrace.cpu,
412 .data_offset = data_offset,
413 .offset = event->auxtrace.offset,
414 .reference = event->auxtrace.reference,
415 .size = event->auxtrace.size,
417 unsigned int idx = event->auxtrace.idx;
419 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
423 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
424 struct perf_session *session,
425 off_t file_offset, size_t sz)
427 union perf_event *event;
429 char buf[PERF_SAMPLE_MAX_SIZE];
431 err = perf_session__peek_event(session, file_offset, buf,
432 PERF_SAMPLE_MAX_SIZE, &event, NULL);
436 if (event->header.type == PERF_RECORD_AUXTRACE) {
437 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
438 event->header.size != sz) {
442 file_offset += event->header.size;
443 err = auxtrace_queues__add_event(queues, session, event,
450 void auxtrace_queues__free(struct auxtrace_queues *queues)
454 for (i = 0; i < queues->nr_queues; i++) {
455 while (!list_empty(&queues->queue_array[i].head)) {
456 struct auxtrace_buffer *buffer;
458 buffer = list_entry(queues->queue_array[i].head.next,
459 struct auxtrace_buffer, list);
460 list_del_init(&buffer->list);
461 auxtrace_buffer__free(buffer);
465 zfree(&queues->queue_array);
466 queues->nr_queues = 0;
469 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
470 unsigned int pos, unsigned int queue_nr,
476 parent = (pos - 1) >> 1;
477 if (heap_array[parent].ordinal <= ordinal)
479 heap_array[pos] = heap_array[parent];
482 heap_array[pos].queue_nr = queue_nr;
483 heap_array[pos].ordinal = ordinal;
486 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
489 struct auxtrace_heap_item *heap_array;
491 if (queue_nr >= heap->heap_sz) {
492 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
494 while (heap_sz <= queue_nr)
496 heap_array = realloc(heap->heap_array,
497 heap_sz * sizeof(struct auxtrace_heap_item));
500 heap->heap_array = heap_array;
501 heap->heap_sz = heap_sz;
504 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
509 void auxtrace_heap__free(struct auxtrace_heap *heap)
511 zfree(&heap->heap_array);
516 void auxtrace_heap__pop(struct auxtrace_heap *heap)
518 unsigned int pos, last, heap_cnt = heap->heap_cnt;
519 struct auxtrace_heap_item *heap_array;
526 heap_array = heap->heap_array;
530 unsigned int left, right;
532 left = (pos << 1) + 1;
533 if (left >= heap_cnt)
536 if (right >= heap_cnt) {
537 heap_array[pos] = heap_array[left];
540 if (heap_array[left].ordinal < heap_array[right].ordinal) {
541 heap_array[pos] = heap_array[left];
544 heap_array[pos] = heap_array[right];
550 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
551 heap_array[last].ordinal);
554 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
555 struct evlist *evlist)
558 return itr->info_priv_size(itr, evlist);
562 static int auxtrace_not_supported(void)
564 pr_err("AUX area tracing is not supported on this architecture\n");
568 int auxtrace_record__info_fill(struct auxtrace_record *itr,
569 struct perf_session *session,
570 struct perf_record_auxtrace_info *auxtrace_info,
574 return itr->info_fill(itr, session, auxtrace_info, priv_size);
575 return auxtrace_not_supported();
578 void auxtrace_record__free(struct auxtrace_record *itr)
584 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
586 if (itr && itr->snapshot_start)
587 return itr->snapshot_start(itr);
591 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
593 if (!on_exit && itr && itr->snapshot_finish)
594 return itr->snapshot_finish(itr);
598 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
599 struct auxtrace_mmap *mm,
600 unsigned char *data, u64 *head, u64 *old)
602 if (itr && itr->find_snapshot)
603 return itr->find_snapshot(itr, idx, mm, data, head, old);
607 int auxtrace_record__options(struct auxtrace_record *itr,
608 struct evlist *evlist,
609 struct record_opts *opts)
612 itr->evlist = evlist;
613 return itr->recording_options(itr, evlist, opts);
618 u64 auxtrace_record__reference(struct auxtrace_record *itr)
621 return itr->reference(itr);
625 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
626 struct record_opts *opts, const char *str)
631 /* PMU-agnostic options */
634 opts->auxtrace_snapshot_on_exit = true;
642 return itr->parse_snapshot_options(itr, opts, str);
644 pr_err("No AUX area tracing to snapshot\n");
648 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
652 if (!itr->evlist || !itr->pmu)
655 evlist__for_each_entry(itr->evlist, evsel) {
656 if (evsel->core.attr.type == itr->pmu->type) {
659 return evlist__enable_event_idx(itr->evlist, evsel, idx);
666 * Event record size is 16-bit which results in a maximum size of about 64KiB.
667 * Allow about 4KiB for the rest of the sample record, to give a maximum
668 * AUX area sample size of 60KiB.
670 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
672 /* Arbitrary default size if no other default provided */
673 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
675 static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
676 struct record_opts *opts)
679 bool has_aux_leader = false;
682 evlist__for_each_entry(evlist, evsel) {
683 sz = evsel->core.attr.aux_sample_size;
684 if (evsel__is_group_leader(evsel)) {
685 has_aux_leader = evsel__is_aux_event(evsel);
688 pr_err("Cannot add AUX area sampling to an AUX area event\n");
690 pr_err("Cannot add AUX area sampling to a group leader\n");
694 if (sz > MAX_AUX_SAMPLE_SIZE) {
695 pr_err("AUX area sample size %u too big, max. %d\n",
696 sz, MAX_AUX_SAMPLE_SIZE);
700 if (!has_aux_leader) {
701 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
704 evsel__set_sample_bit(evsel, AUX);
705 opts->auxtrace_sample_mode = true;
707 evsel__reset_sample_bit(evsel, AUX);
711 if (!opts->auxtrace_sample_mode) {
712 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
716 if (!perf_can_aux_sample()) {
717 pr_err("AUX area sampling is not supported by kernel\n");
724 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
725 struct evlist *evlist,
726 struct record_opts *opts, const char *str)
728 struct evsel_config_term *term;
729 struct evsel *aux_evsel;
730 bool has_aux_sample_size = false;
731 bool has_aux_leader = false;
740 pr_err("No AUX area event to sample\n");
744 sz = strtoul(str, &endptr, 0);
745 if (*endptr || sz > UINT_MAX) {
746 pr_err("Bad AUX area sampling option: '%s'\n", str);
751 sz = itr->default_aux_sample_size;
754 sz = DEFAULT_AUX_SAMPLE_SIZE;
756 /* Set aux_sample_size based on --aux-sample option */
757 evlist__for_each_entry(evlist, evsel) {
758 if (evsel__is_group_leader(evsel)) {
759 has_aux_leader = evsel__is_aux_event(evsel);
760 } else if (has_aux_leader) {
761 evsel->core.attr.aux_sample_size = sz;
766 /* Override with aux_sample_size from config term */
767 evlist__for_each_entry(evlist, evsel) {
768 if (evsel__is_aux_event(evsel))
770 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
772 has_aux_sample_size = true;
773 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
774 /* If possible, group with the AUX event */
775 if (aux_evsel && evsel->core.attr.aux_sample_size)
776 evlist__regroup(evlist, aux_evsel, evsel);
780 if (!str && !has_aux_sample_size)
784 pr_err("No AUX area event to sample\n");
788 return auxtrace_validate_aux_sample_size(evlist, opts);
791 void auxtrace_regroup_aux_output(struct evlist *evlist)
793 struct evsel *evsel, *aux_evsel = NULL;
794 struct evsel_config_term *term;
796 evlist__for_each_entry(evlist, evsel) {
797 if (evsel__is_aux_event(evsel))
799 term = evsel__get_config_term(evsel, AUX_OUTPUT);
800 /* If possible, group with the AUX event */
801 if (term && aux_evsel)
802 evlist__regroup(evlist, aux_evsel, evsel);
806 struct auxtrace_record *__weak
807 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
813 static int auxtrace_index__alloc(struct list_head *head)
815 struct auxtrace_index *auxtrace_index;
817 auxtrace_index = malloc(sizeof(struct auxtrace_index));
821 auxtrace_index->nr = 0;
822 INIT_LIST_HEAD(&auxtrace_index->list);
824 list_add_tail(&auxtrace_index->list, head);
829 void auxtrace_index__free(struct list_head *head)
831 struct auxtrace_index *auxtrace_index, *n;
833 list_for_each_entry_safe(auxtrace_index, n, head, list) {
834 list_del_init(&auxtrace_index->list);
835 free(auxtrace_index);
839 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
841 struct auxtrace_index *auxtrace_index;
844 if (list_empty(head)) {
845 err = auxtrace_index__alloc(head);
850 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
852 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
853 err = auxtrace_index__alloc(head);
856 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
860 return auxtrace_index;
863 int auxtrace_index__auxtrace_event(struct list_head *head,
864 union perf_event *event, off_t file_offset)
866 struct auxtrace_index *auxtrace_index;
869 auxtrace_index = auxtrace_index__last(head);
873 nr = auxtrace_index->nr;
874 auxtrace_index->entries[nr].file_offset = file_offset;
875 auxtrace_index->entries[nr].sz = event->header.size;
876 auxtrace_index->nr += 1;
881 static int auxtrace_index__do_write(int fd,
882 struct auxtrace_index *auxtrace_index)
884 struct auxtrace_index_entry ent;
887 for (i = 0; i < auxtrace_index->nr; i++) {
888 ent.file_offset = auxtrace_index->entries[i].file_offset;
889 ent.sz = auxtrace_index->entries[i].sz;
890 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
896 int auxtrace_index__write(int fd, struct list_head *head)
898 struct auxtrace_index *auxtrace_index;
902 list_for_each_entry(auxtrace_index, head, list)
903 total += auxtrace_index->nr;
905 if (writen(fd, &total, sizeof(total)) != sizeof(total))
908 list_for_each_entry(auxtrace_index, head, list) {
909 err = auxtrace_index__do_write(fd, auxtrace_index);
917 static int auxtrace_index__process_entry(int fd, struct list_head *head,
920 struct auxtrace_index *auxtrace_index;
921 struct auxtrace_index_entry ent;
924 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
927 auxtrace_index = auxtrace_index__last(head);
931 nr = auxtrace_index->nr;
933 auxtrace_index->entries[nr].file_offset =
934 bswap_64(ent.file_offset);
935 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
937 auxtrace_index->entries[nr].file_offset = ent.file_offset;
938 auxtrace_index->entries[nr].sz = ent.sz;
941 auxtrace_index->nr = nr + 1;
946 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
949 struct list_head *head = &session->auxtrace_index;
952 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
958 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
964 err = auxtrace_index__process_entry(fd, head, needs_swap);
972 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
973 struct perf_session *session,
974 struct auxtrace_index_entry *ent)
976 return auxtrace_queues__add_indexed_event(queues, session,
977 ent->file_offset, ent->sz);
980 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
981 struct perf_session *session)
983 struct auxtrace_index *auxtrace_index;
984 struct auxtrace_index_entry *ent;
988 if (auxtrace__dont_decode(session))
991 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
992 for (i = 0; i < auxtrace_index->nr; i++) {
993 ent = &auxtrace_index->entries[i];
994 err = auxtrace_queues__process_index_entry(queues,
1004 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
1005 struct auxtrace_buffer *buffer)
1008 if (list_is_last(&buffer->list, &queue->head))
1010 return list_entry(buffer->list.next, struct auxtrace_buffer,
1013 if (list_empty(&queue->head))
1015 return list_entry(queue->head.next, struct auxtrace_buffer,
1020 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1021 struct perf_sample *sample,
1022 struct perf_session *session)
1024 struct perf_sample_id *sid;
1032 sid = evlist__id2sid(session->evlist, id);
1038 if (idx >= queues->nr_queues)
1041 return &queues->queue_array[idx];
1044 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1045 struct perf_session *session,
1046 struct perf_sample *sample, u64 data_offset,
1049 struct auxtrace_buffer buffer = {
1051 .data_offset = data_offset,
1052 .reference = reference,
1053 .size = sample->aux_sample.size,
1055 struct perf_sample_id *sid;
1056 u64 id = sample->id;
1062 sid = evlist__id2sid(session->evlist, id);
1067 buffer.tid = sid->tid;
1068 buffer.cpu = sid->cpu;
1070 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1078 static int auxtrace_queue_data_cb(struct perf_session *session,
1079 union perf_event *event, u64 offset,
1082 struct queue_data *qd = data;
1083 struct perf_sample sample;
1086 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1087 if (event->header.size < sizeof(struct perf_record_auxtrace))
1089 offset += event->header.size;
1090 return session->auxtrace->queue_data(session, NULL, event,
1094 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1097 err = evlist__parse_sample(session->evlist, event, &sample);
1101 if (!sample.aux_sample.size)
1104 offset += sample.aux_sample.data - (void *)event;
1106 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1109 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1111 struct queue_data qd = {
1116 if (auxtrace__dont_decode(session))
1119 if (!session->auxtrace || !session->auxtrace->queue_data)
1122 return perf_session__peek_events(session, session->header.data_offset,
1123 session->header.data_size,
1124 auxtrace_queue_data_cb, &qd);
1127 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
1129 size_t adj = buffer->data_offset & (page_size - 1);
1130 size_t size = buffer->size + adj;
1131 off_t file_offset = buffer->data_offset - adj;
1135 return buffer->data;
1137 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
1138 if (addr == MAP_FAILED)
1141 buffer->mmap_addr = addr;
1142 buffer->mmap_size = size;
1144 buffer->data = addr + adj;
1146 return buffer->data;
1149 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1151 if (!buffer->data || !buffer->mmap_addr)
1153 munmap(buffer->mmap_addr, buffer->mmap_size);
1154 buffer->mmap_addr = NULL;
1155 buffer->mmap_size = 0;
1156 buffer->data = NULL;
1157 buffer->use_data = NULL;
1160 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1162 auxtrace_buffer__put_data(buffer);
1163 if (buffer->data_needs_freeing) {
1164 buffer->data_needs_freeing = false;
1165 zfree(&buffer->data);
1166 buffer->use_data = NULL;
1171 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1173 auxtrace_buffer__drop_data(buffer);
1177 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1178 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1179 const char *msg, u64 timestamp)
1183 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1185 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1186 auxtrace_error->type = type;
1187 auxtrace_error->code = code;
1188 auxtrace_error->cpu = cpu;
1189 auxtrace_error->pid = pid;
1190 auxtrace_error->tid = tid;
1191 auxtrace_error->fmt = 1;
1192 auxtrace_error->ip = ip;
1193 auxtrace_error->time = timestamp;
1194 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1196 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1197 strlen(auxtrace_error->msg) + 1;
1198 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1201 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1202 struct perf_tool *tool,
1203 struct perf_session *session,
1204 perf_event__handler_t process)
1206 union perf_event *ev;
1210 pr_debug2("Synthesizing auxtrace information\n");
1211 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1212 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1216 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1217 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1219 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1224 err = process(tool, ev, NULL, NULL);
1230 static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1232 struct evsel *new_leader = NULL;
1233 struct evsel *evsel;
1235 /* Find new leader for the group */
1236 evlist__for_each_entry(evlist, evsel) {
1237 if (evsel->leader != leader || evsel == leader)
1241 evsel->leader = new_leader;
1244 /* Update group information */
1246 zfree(&new_leader->group_name);
1247 new_leader->group_name = leader->group_name;
1248 leader->group_name = NULL;
1250 new_leader->core.nr_members = leader->core.nr_members - 1;
1251 leader->core.nr_members = 1;
1255 static void unleader_auxtrace(struct perf_session *session)
1257 struct evsel *evsel;
1259 evlist__for_each_entry(session->evlist, evsel) {
1260 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1261 evsel__is_group_leader(evsel)) {
1262 unleader_evsel(session->evlist, evsel);
1267 int perf_event__process_auxtrace_info(struct perf_session *session,
1268 union perf_event *event)
1270 enum auxtrace_type type = event->auxtrace_info.type;
1274 fprintf(stdout, " type: %u\n", type);
1277 case PERF_AUXTRACE_INTEL_PT:
1278 err = intel_pt_process_auxtrace_info(event, session);
1280 case PERF_AUXTRACE_INTEL_BTS:
1281 err = intel_bts_process_auxtrace_info(event, session);
1283 case PERF_AUXTRACE_ARM_SPE:
1284 err = arm_spe_process_auxtrace_info(event, session);
1286 case PERF_AUXTRACE_CS_ETM:
1287 err = cs_etm__process_auxtrace_info(event, session);
1289 case PERF_AUXTRACE_S390_CPUMSF:
1290 err = s390_cpumsf_process_auxtrace_info(event, session);
1292 case PERF_AUXTRACE_UNKNOWN:
1300 unleader_auxtrace(session);
1305 s64 perf_event__process_auxtrace(struct perf_session *session,
1306 union perf_event *event)
1311 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1312 event->auxtrace.size, event->auxtrace.offset,
1313 event->auxtrace.reference, event->auxtrace.idx,
1314 event->auxtrace.tid, event->auxtrace.cpu);
1316 if (auxtrace__dont_decode(session))
1317 return event->auxtrace.size;
1319 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1322 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1326 return event->auxtrace.size;
1329 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1330 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1331 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1332 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1333 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1334 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1336 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1339 synth_opts->branches = true;
1340 synth_opts->transactions = true;
1341 synth_opts->ptwrites = true;
1342 synth_opts->pwr_events = true;
1343 synth_opts->other_events = true;
1344 synth_opts->errors = true;
1345 synth_opts->flc = true;
1346 synth_opts->llc = true;
1347 synth_opts->tlb = true;
1348 synth_opts->mem = true;
1349 synth_opts->remote_access = true;
1352 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1353 synth_opts->period = 1;
1354 synth_opts->calls = true;
1356 synth_opts->instructions = true;
1357 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1358 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1360 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1361 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1362 synth_opts->initial_skip = 0;
1365 static int get_flag(const char **ptr, unsigned int *flags)
1370 if (c >= 'a' && c <= 'z') {
1371 *flags |= 1 << (c - 'a');
1374 } else if (c == ' ') {
1383 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1389 if (get_flag(ptr, plus_flags))
1394 if (get_flag(ptr, minus_flags))
1407 * Please check tools/perf/Documentation/perf-script.txt for information
1408 * about the options parsed here, which is introduced after this cset,
1409 * when support in 'perf script' for these options is introduced.
1411 int itrace_parse_synth_opts(const struct option *opt, const char *str,
1414 struct itrace_synth_opts *synth_opts = opt->value;
1417 bool period_type_set = false;
1418 bool period_set = false;
1420 synth_opts->set = true;
1423 synth_opts->dont_decode = true;
1428 itrace_synth_opts__set_default(synth_opts,
1429 synth_opts->default_no_sample);
1433 for (p = str; *p;) {
1436 synth_opts->instructions = true;
1437 while (*p == ' ' || *p == ',')
1440 synth_opts->period = strtoull(p, &endptr, 10);
1443 while (*p == ' ' || *p == ',')
1447 synth_opts->period_type =
1448 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1449 period_type_set = true;
1452 synth_opts->period_type =
1453 PERF_ITRACE_PERIOD_TICKS;
1454 period_type_set = true;
1457 synth_opts->period *= 1000;
1460 synth_opts->period *= 1000;
1465 synth_opts->period_type =
1466 PERF_ITRACE_PERIOD_NANOSECS;
1467 period_type_set = true;
1477 synth_opts->branches = true;
1480 synth_opts->transactions = true;
1483 synth_opts->ptwrites = true;
1486 synth_opts->pwr_events = true;
1489 synth_opts->other_events = true;
1492 synth_opts->errors = true;
1493 if (get_flags(&p, &synth_opts->error_plus_flags,
1494 &synth_opts->error_minus_flags))
1498 synth_opts->log = true;
1499 if (get_flags(&p, &synth_opts->log_plus_flags,
1500 &synth_opts->log_minus_flags))
1504 synth_opts->branches = true;
1505 synth_opts->calls = true;
1508 synth_opts->branches = true;
1509 synth_opts->returns = true;
1514 synth_opts->add_callchain = true;
1516 synth_opts->callchain = true;
1517 synth_opts->callchain_sz =
1518 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1519 while (*p == ' ' || *p == ',')
1524 val = strtoul(p, &endptr, 10);
1526 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1528 synth_opts->callchain_sz = val;
1534 synth_opts->add_last_branch = true;
1536 synth_opts->last_branch = true;
1537 synth_opts->last_branch_sz =
1538 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1539 while (*p == ' ' || *p == ',')
1544 val = strtoul(p, &endptr, 10);
1547 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1549 synth_opts->last_branch_sz = val;
1553 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1559 synth_opts->flc = true;
1562 synth_opts->llc = true;
1565 synth_opts->tlb = true;
1568 synth_opts->remote_access = true;
1571 synth_opts->mem = true;
1574 synth_opts->quick += 1;
1584 if (synth_opts->instructions) {
1585 if (!period_type_set)
1586 synth_opts->period_type =
1587 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1589 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1595 pr_err("Bad Instruction Tracing options '%s'\n", str);
1599 static const char * const auxtrace_error_type_name[] = {
1600 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1603 static const char *auxtrace_error_name(int type)
1605 const char *error_type_name = NULL;
1607 if (type < PERF_AUXTRACE_ERROR_MAX)
1608 error_type_name = auxtrace_error_type_name[type];
1609 if (!error_type_name)
1610 error_type_name = "unknown AUX";
1611 return error_type_name;
1614 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1616 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1617 unsigned long long nsecs = e->time;
1618 const char *msg = e->msg;
1621 ret = fprintf(fp, " %s error type %u",
1622 auxtrace_error_name(e->type), e->type);
1624 if (e->fmt && nsecs) {
1625 unsigned long secs = nsecs / NSEC_PER_SEC;
1627 nsecs -= secs * NSEC_PER_SEC;
1628 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1630 ret += fprintf(fp, " time 0");
1634 msg = (const char *)&e->time;
1636 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1637 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1641 void perf_session__auxtrace_error_inc(struct perf_session *session,
1642 union perf_event *event)
1644 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1646 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1647 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1650 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1654 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1655 if (!stats->nr_auxtrace_errors[i])
1657 ui__warning("%u %s errors\n",
1658 stats->nr_auxtrace_errors[i],
1659 auxtrace_error_name(i));
1663 int perf_event__process_auxtrace_error(struct perf_session *session,
1664 union perf_event *event)
1666 if (auxtrace__dont_decode(session))
1669 perf_event__fprintf_auxtrace_error(event, stdout);
1673 static int __auxtrace_mmap__read(struct mmap *map,
1674 struct auxtrace_record *itr,
1675 struct perf_tool *tool, process_auxtrace_t fn,
1676 bool snapshot, size_t snapshot_size)
1678 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1679 u64 head, old = mm->prev, offset, ref;
1680 unsigned char *data = mm->base;
1681 size_t size, head_off, old_off, len1, len2, padding;
1682 union perf_event ev;
1683 void *data1, *data2;
1686 head = auxtrace_mmap__read_snapshot_head(mm);
1687 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1691 head = auxtrace_mmap__read_head(mm);
1697 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1698 mm->idx, old, head, head - old);
1701 head_off = head & mm->mask;
1702 old_off = old & mm->mask;
1704 head_off = head % mm->len;
1705 old_off = old % mm->len;
1708 if (head_off > old_off)
1709 size = head_off - old_off;
1711 size = mm->len - (old_off - head_off);
1713 if (snapshot && size > snapshot_size)
1714 size = snapshot_size;
1716 ref = auxtrace_record__reference(itr);
1718 if (head > old || size <= head || mm->mask) {
1719 offset = head - size;
1722 * When the buffer size is not a power of 2, 'head' wraps at the
1723 * highest multiple of the buffer size, so we have to subtract
1724 * the remainder here.
1726 u64 rem = (0ULL - mm->len) % mm->len;
1728 offset = head - size - rem;
1731 if (size > head_off) {
1732 len1 = size - head_off;
1733 data1 = &data[mm->len - len1];
1738 data1 = &data[head_off - len1];
1743 if (itr->alignment) {
1744 unsigned int unwanted = len1 % itr->alignment;
1750 /* padding must be written by fn() e.g. record__process_auxtrace() */
1751 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1753 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1755 memset(&ev, 0, sizeof(ev));
1756 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1757 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1758 ev.auxtrace.size = size + padding;
1759 ev.auxtrace.offset = offset;
1760 ev.auxtrace.reference = ref;
1761 ev.auxtrace.idx = mm->idx;
1762 ev.auxtrace.tid = mm->tid;
1763 ev.auxtrace.cpu = mm->cpu;
1765 if (fn(tool, map, &ev, data1, len1, data2, len2))
1771 auxtrace_mmap__write_tail(mm, head);
1772 if (itr->read_finish) {
1775 err = itr->read_finish(itr, mm->idx);
1784 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1785 struct perf_tool *tool, process_auxtrace_t fn)
1787 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1790 int auxtrace_mmap__read_snapshot(struct mmap *map,
1791 struct auxtrace_record *itr,
1792 struct perf_tool *tool, process_auxtrace_t fn,
1793 size_t snapshot_size)
1795 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1799 * struct auxtrace_cache - hash table to implement a cache
1800 * @hashtable: the hashtable
1801 * @sz: hashtable size (number of hlists)
1802 * @entry_size: size of an entry
1803 * @limit: limit the number of entries to this maximum, when reached the cache
1804 * is dropped and caching begins again with an empty cache
1805 * @cnt: current number of entries
1806 * @bits: hashtable size (@sz = 2^@bits)
1808 struct auxtrace_cache {
1809 struct hlist_head *hashtable;
1817 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1818 unsigned int limit_percent)
1820 struct auxtrace_cache *c;
1821 struct hlist_head *ht;
1824 c = zalloc(sizeof(struct auxtrace_cache));
1830 ht = calloc(sz, sizeof(struct hlist_head));
1834 for (i = 0; i < sz; i++)
1835 INIT_HLIST_HEAD(&ht[i]);
1839 c->entry_size = entry_size;
1840 c->limit = (c->sz * limit_percent) / 100;
1850 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1852 struct auxtrace_cache_entry *entry;
1853 struct hlist_node *tmp;
1859 for (i = 0; i < c->sz; i++) {
1860 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1861 hlist_del(&entry->hash);
1862 auxtrace_cache__free_entry(c, entry);
1869 void auxtrace_cache__free(struct auxtrace_cache *c)
1874 auxtrace_cache__drop(c);
1875 zfree(&c->hashtable);
1879 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1881 return malloc(c->entry_size);
1884 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1890 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1891 struct auxtrace_cache_entry *entry)
1893 if (c->limit && ++c->cnt > c->limit)
1894 auxtrace_cache__drop(c);
1897 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1902 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
1905 struct auxtrace_cache_entry *entry;
1906 struct hlist_head *hlist;
1907 struct hlist_node *n;
1912 hlist = &c->hashtable[hash_32(key, c->bits)];
1913 hlist_for_each_entry_safe(entry, n, hlist, hash) {
1914 if (entry->key == key) {
1915 hlist_del(&entry->hash);
1923 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
1925 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
1927 auxtrace_cache__free_entry(c, entry);
1930 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1932 struct auxtrace_cache_entry *entry;
1933 struct hlist_head *hlist;
1938 hlist = &c->hashtable[hash_32(key, c->bits)];
1939 hlist_for_each_entry(entry, hlist, hash) {
1940 if (entry->key == key)
1947 static void addr_filter__free_str(struct addr_filter *filt)
1950 filt->action = NULL;
1951 filt->sym_from = NULL;
1952 filt->sym_to = NULL;
1953 filt->filename = NULL;
1956 static struct addr_filter *addr_filter__new(void)
1958 struct addr_filter *filt = zalloc(sizeof(*filt));
1961 INIT_LIST_HEAD(&filt->list);
1966 static void addr_filter__free(struct addr_filter *filt)
1969 addr_filter__free_str(filt);
1973 static void addr_filters__add(struct addr_filters *filts,
1974 struct addr_filter *filt)
1976 list_add_tail(&filt->list, &filts->head);
1980 static void addr_filters__del(struct addr_filters *filts,
1981 struct addr_filter *filt)
1983 list_del_init(&filt->list);
1987 void addr_filters__init(struct addr_filters *filts)
1989 INIT_LIST_HEAD(&filts->head);
1993 void addr_filters__exit(struct addr_filters *filts)
1995 struct addr_filter *filt, *n;
1997 list_for_each_entry_safe(filt, n, &filts->head, list) {
1998 addr_filters__del(filts, filt);
1999 addr_filter__free(filt);
2003 static int parse_num_or_str(char **inp, u64 *num, const char **str,
2004 const char *str_delim)
2006 *inp += strspn(*inp, " ");
2008 if (isdigit(**inp)) {
2014 *num = strtoull(*inp, &endptr, 0);
2025 *inp += strspn(*inp, " ");
2027 n = strcspn(*inp, str_delim);
2039 static int parse_action(struct addr_filter *filt)
2041 if (!strcmp(filt->action, "filter")) {
2044 } else if (!strcmp(filt->action, "start")) {
2046 } else if (!strcmp(filt->action, "stop")) {
2047 filt->start = false;
2048 } else if (!strcmp(filt->action, "tracestop")) {
2049 filt->start = false;
2051 filt->action += 5; /* Change 'tracestop' to 'stop' */
2058 static int parse_sym_idx(char **inp, int *idx)
2062 *inp += strspn(*inp, " ");
2069 if (**inp == 'g' || **inp == 'G') {
2077 num = strtoul(*inp, &endptr, 0);
2080 if (endptr == *inp || num > INT_MAX)
2089 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2091 int err = parse_num_or_str(inp, num, str, " ");
2094 err = parse_sym_idx(inp, idx);
2099 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2104 filt->str = fstr = strdup(*filter_inp);
2108 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2112 err = parse_action(filt);
2116 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2117 &filt->sym_from_idx);
2121 fstr += strspn(fstr, " ");
2125 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2132 fstr += strspn(fstr, " ");
2136 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2141 fstr += strspn(fstr, " ,");
2143 *filter_inp += fstr - filt->str;
2148 addr_filter__free_str(filt);
2153 int addr_filters__parse_bare_filter(struct addr_filters *filts,
2156 struct addr_filter *filt;
2157 const char *fstr = filter;
2161 filt = addr_filter__new();
2162 err = parse_one_filter(filt, &fstr);
2164 addr_filter__free(filt);
2165 addr_filters__exit(filts);
2168 addr_filters__add(filts, filt);
2187 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2189 /* A function with the same name, and global or the n'th found or any */
2190 return kallsyms__is_function(type) &&
2191 !strcmp(name, args->name) &&
2192 ((args->global && isupper(type)) ||
2193 (args->selected && ++(args->cnt) == args->idx) ||
2194 (!args->global && !args->selected));
2197 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2199 struct sym_args *args = arg;
2201 if (args->started) {
2203 args->size = start - args->start;
2204 if (args->selected) {
2207 } else if (kern_sym_match(args, name, type)) {
2208 args->duplicate = true;
2211 } else if (kern_sym_match(args, name, type)) {
2212 args->started = true;
2213 args->start = start;
2219 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2221 struct sym_args *args = arg;
2223 if (kern_sym_match(args, name, type)) {
2224 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2225 ++args->cnt, start, type, name);
2227 } else if (args->near) {
2229 pr_err("\t\twhich is near\t\t%s\n", name);
2235 static int sym_not_found_error(const char *sym_name, int idx)
2238 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2241 pr_err("Global symbol '%s' not found.\n", sym_name);
2243 pr_err("Symbol '%s' not found.\n", sym_name);
2245 pr_err("Note that symbols must be functions.\n");
2250 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2252 struct sym_args args = {
2256 .selected = idx > 0,
2263 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2265 pr_err("Failed to parse /proc/kallsyms\n");
2269 if (args.duplicate) {
2270 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2272 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2273 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2275 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2279 if (!args.started) {
2280 pr_err("Kernel symbol lookup: ");
2281 return sym_not_found_error(sym_name, idx);
2284 *start = args.start;
2290 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2291 char type, u64 start)
2293 struct sym_args *args = arg;
2295 if (!kallsyms__is_function(type))
2298 if (!args->started) {
2299 args->started = true;
2300 args->start = start;
2302 /* Don't know exactly where the kernel ends, so we add a page */
2303 args->size = round_up(start, page_size) + page_size - args->start;
2308 static int addr_filter__entire_kernel(struct addr_filter *filt)
2310 struct sym_args args = { .started = false };
2313 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2314 if (err < 0 || !args.started) {
2315 pr_err("Failed to parse /proc/kallsyms\n");
2319 filt->addr = args.start;
2320 filt->size = args.size;
2325 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2327 if (start + size >= filt->addr)
2330 if (filt->sym_from) {
2331 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2332 filt->sym_to, start, filt->sym_from, filt->addr);
2334 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2335 filt->sym_to, start, filt->addr);
2341 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2343 bool no_size = false;
2347 if (symbol_conf.kptr_restrict) {
2348 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2352 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2353 return addr_filter__entire_kernel(filt);
2355 if (filt->sym_from) {
2356 err = find_kern_sym(filt->sym_from, &start, &size,
2357 filt->sym_from_idx);
2361 if (filt->range && !filt->size && !filt->sym_to) {
2368 err = find_kern_sym(filt->sym_to, &start, &size,
2373 err = check_end_after_start(filt, start, size);
2376 filt->size = start + size - filt->addr;
2380 /* The very last symbol in kallsyms does not imply a particular size */
2382 pr_err("Cannot determine size of symbol '%s'\n",
2383 filt->sym_to ? filt->sym_to : filt->sym_from);
2390 static struct dso *load_dso(const char *name)
2395 map = dso__new_map(name);
2399 if (map__load(map) < 0)
2400 pr_err("File '%s' not found or has no symbols.\n", name);
2402 dso = dso__get(map->dso);
2409 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2412 /* Same name, and global or the n'th found or any */
2413 return !arch__compare_symbol_names(name, sym->name) &&
2414 ((!idx && sym->binding == STB_GLOBAL) ||
2415 (idx > 0 && ++*cnt == idx) ||
2419 static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2425 pr_err("Multiple symbols with name '%s'\n", sym_name);
2427 sym = dso__first_symbol(dso);
2429 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2430 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2432 sym->binding == STB_GLOBAL ? 'g' :
2433 sym->binding == STB_LOCAL ? 'l' : 'w',
2438 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2440 sym = dso__next_symbol(sym);
2443 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2445 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2448 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2457 sym = dso__first_symbol(dso);
2461 *size = sym->start - *start;
2465 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2466 print_duplicate_syms(dso, sym_name);
2469 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2470 *start = sym->start;
2471 *size = sym->end - sym->start;
2473 sym = dso__next_symbol(sym);
2477 return sym_not_found_error(sym_name, idx);
2482 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2484 if (dso__data_file_size(dso, NULL)) {
2485 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2491 filt->size = dso->data.file_size;
2496 static int addr_filter__resolve_syms(struct addr_filter *filt)
2502 if (!filt->sym_from && !filt->sym_to)
2505 if (!filt->filename)
2506 return addr_filter__resolve_kernel_syms(filt);
2508 dso = load_dso(filt->filename);
2510 pr_err("Failed to load symbols from: %s\n", filt->filename);
2514 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2515 err = addr_filter__entire_dso(filt, dso);
2519 if (filt->sym_from) {
2520 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2521 filt->sym_from_idx);
2525 if (filt->range && !filt->size && !filt->sym_to)
2530 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2535 err = check_end_after_start(filt, start, size);
2539 filt->size = start + size - filt->addr;
2548 static char *addr_filter__to_str(struct addr_filter *filt)
2550 char filename_buf[PATH_MAX];
2551 const char *at = "";
2552 const char *fn = "";
2556 if (filt->filename) {
2558 fn = realpath(filt->filename, filename_buf);
2564 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2565 filt->action, filt->addr, filt->size, at, fn);
2567 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2568 filt->action, filt->addr, at, fn);
2571 return err < 0 ? NULL : filter;
2574 static int parse_addr_filter(struct evsel *evsel, const char *filter,
2577 struct addr_filters filts;
2578 struct addr_filter *filt;
2581 addr_filters__init(&filts);
2583 err = addr_filters__parse_bare_filter(&filts, filter);
2587 if (filts.cnt > max_nr) {
2588 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2594 list_for_each_entry(filt, &filts.head, list) {
2597 err = addr_filter__resolve_syms(filt);
2601 new_filter = addr_filter__to_str(filt);
2607 if (evsel__append_addr_filter(evsel, new_filter)) {
2614 addr_filters__exit(&filts);
2617 pr_err("Failed to parse address filter: '%s'\n", filter);
2618 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2619 pr_err("Where multiple filters are separated by space or comma.\n");
2625 static int evsel__nr_addr_filter(struct evsel *evsel)
2627 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2628 int nr_addr_filters = 0;
2633 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2635 return nr_addr_filters;
2638 int auxtrace_parse_filters(struct evlist *evlist)
2640 struct evsel *evsel;
2644 evlist__for_each_entry(evlist, evsel) {
2645 filter = evsel->filter;
2646 max_nr = evsel__nr_addr_filter(evsel);
2647 if (!filter || !max_nr)
2649 evsel->filter = NULL;
2650 err = parse_addr_filter(evsel, filter, max_nr);
2654 pr_debug("Address filter: %s\n", evsel->filter);
2660 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2661 struct perf_sample *sample, struct perf_tool *tool)
2663 if (!session->auxtrace)
2666 return session->auxtrace->process_event(session, event, sample, tool);
2669 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2670 struct perf_sample *sample)
2672 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2673 auxtrace__dont_decode(session))
2676 session->auxtrace->dump_auxtrace_sample(session, sample);
2679 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2681 if (!session->auxtrace)
2684 return session->auxtrace->flush_events(session, tool);
2687 void auxtrace__free_events(struct perf_session *session)
2689 if (!session->auxtrace)
2692 return session->auxtrace->free_events(session);
2695 void auxtrace__free(struct perf_session *session)
2697 if (!session->auxtrace)
2700 return session->auxtrace->free(session);
2703 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2704 struct evsel *evsel)
2706 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2709 return session->auxtrace->evsel_is_auxtrace(session, evsel);