1 // SPDX-License-Identifier: GPL-2.0-only
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <linux/bitops.h>
19 #include <linux/log2.h>
20 #include <linux/string.h>
21 #include <linux/time64.h>
23 #include <sys/param.h>
26 #include <linux/list.h>
27 #include <linux/zalloc.h>
34 #include "evsel_config.h"
36 #include "util/synthetic-events.h"
37 #include "thread_map.h"
41 #include <linux/hash.h>
47 #include <subcmd/parse-options.h>
51 #include "intel-bts.h"
53 #include "s390-cpumsf.h"
54 #include "util/mmap.h"
56 #include <linux/ctype.h>
57 #include <linux/kernel.h>
58 #include "symbol/kallsyms.h"
59 #include <internal/lib.h>
61 static struct perf_pmu *perf_evsel__find_pmu(struct evsel *evsel)
63 struct perf_pmu *pmu = NULL;
65 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
66 if (pmu->type == evsel->core.attr.type)
73 static bool perf_evsel__is_aux_event(struct evsel *evsel)
75 struct perf_pmu *pmu = perf_evsel__find_pmu(evsel);
77 return pmu && pmu->auxtrace;
81 * Make a group from 'leader' to 'last', requiring that the events were not
82 * already grouped to a different leader.
84 static int perf_evlist__regroup(struct evlist *evlist,
91 if (!perf_evsel__is_group_leader(leader))
95 evlist__for_each_entry(evlist, evsel) {
97 if (!(evsel->leader == leader ||
98 (evsel->leader == evsel &&
99 evsel->core.nr_members <= 1)))
101 } else if (evsel == leader) {
109 evlist__for_each_entry(evlist, evsel) {
111 if (evsel->leader != leader) {
112 evsel->leader = leader;
113 if (leader->core.nr_members < 1)
114 leader->core.nr_members = 1;
115 leader->core.nr_members += 1;
117 } else if (evsel == leader) {
127 static bool auxtrace__dont_decode(struct perf_session *session)
129 return !session->itrace_synth_opts ||
130 session->itrace_synth_opts->dont_decode;
133 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
134 struct auxtrace_mmap_params *mp,
135 void *userpg, int fd)
137 struct perf_event_mmap_page *pc = userpg;
139 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
154 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
155 pr_err("Cannot use AUX area tracing mmaps\n");
159 pc->aux_offset = mp->offset;
160 pc->aux_size = mp->len;
162 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
163 if (mm->base == MAP_FAILED) {
164 pr_debug2("failed to mmap AUX area\n");
172 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
175 munmap(mm->base, mm->len);
180 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
181 off_t auxtrace_offset,
182 unsigned int auxtrace_pages,
183 bool auxtrace_overwrite)
185 if (auxtrace_pages) {
186 mp->offset = auxtrace_offset;
187 mp->len = auxtrace_pages * (size_t)page_size;
188 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
189 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
190 pr_debug2("AUX area mmap length %zu\n", mp->len);
196 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
197 struct evlist *evlist, int idx,
203 mp->cpu = evlist->core.cpus->map[idx];
204 if (evlist->core.threads)
205 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
210 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
214 #define AUXTRACE_INIT_NR_QUEUES 32
216 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
218 struct auxtrace_queue *queue_array;
219 unsigned int max_nr_queues, i;
221 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
222 if (nr_queues > max_nr_queues)
225 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
229 for (i = 0; i < nr_queues; i++) {
230 INIT_LIST_HEAD(&queue_array[i].head);
231 queue_array[i].priv = NULL;
237 int auxtrace_queues__init(struct auxtrace_queues *queues)
239 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
240 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
241 if (!queues->queue_array)
246 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
247 unsigned int new_nr_queues)
249 unsigned int nr_queues = queues->nr_queues;
250 struct auxtrace_queue *queue_array;
254 nr_queues = AUXTRACE_INIT_NR_QUEUES;
256 while (nr_queues && nr_queues < new_nr_queues)
259 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
262 queue_array = auxtrace_alloc_queue_array(nr_queues);
266 for (i = 0; i < queues->nr_queues; i++) {
267 list_splice_tail(&queues->queue_array[i].head,
268 &queue_array[i].head);
269 queue_array[i].tid = queues->queue_array[i].tid;
270 queue_array[i].cpu = queues->queue_array[i].cpu;
271 queue_array[i].set = queues->queue_array[i].set;
272 queue_array[i].priv = queues->queue_array[i].priv;
275 queues->nr_queues = nr_queues;
276 queues->queue_array = queue_array;
281 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
283 int fd = perf_data__fd(session->data);
287 if (size > SSIZE_MAX)
294 ret = readn(fd, p, size);
295 if (ret != (ssize_t)size) {
303 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
305 struct auxtrace_buffer *buffer)
307 struct auxtrace_queue *queue;
310 if (idx >= queues->nr_queues) {
311 err = auxtrace_queues__grow(queues, idx + 1);
316 queue = &queues->queue_array[idx];
320 queue->tid = buffer->tid;
321 queue->cpu = buffer->cpu;
322 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
323 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
324 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
328 buffer->buffer_nr = queues->next_buffer_nr++;
330 list_add_tail(&buffer->list, &queue->head);
332 queues->new_data = true;
333 queues->populated = true;
338 /* Limit buffers to 32MiB on 32-bit */
339 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
341 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
343 struct auxtrace_buffer *buffer)
345 u64 sz = buffer->size;
346 bool consecutive = false;
347 struct auxtrace_buffer *b;
350 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
351 b = memdup(buffer, sizeof(struct auxtrace_buffer));
354 b->size = BUFFER_LIMIT_FOR_32_BIT;
355 b->consecutive = consecutive;
356 err = auxtrace_queues__queue_buffer(queues, idx, b);
358 auxtrace_buffer__free(b);
361 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
362 sz -= BUFFER_LIMIT_FOR_32_BIT;
367 buffer->consecutive = consecutive;
372 static bool filter_cpu(struct perf_session *session, int cpu)
374 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
376 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
379 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
380 struct perf_session *session,
382 struct auxtrace_buffer *buffer,
383 struct auxtrace_buffer **buffer_ptr)
387 if (filter_cpu(session, buffer->cpu))
390 buffer = memdup(buffer, sizeof(*buffer));
394 if (session->one_mmap) {
395 buffer->data = buffer->data_offset - session->one_mmap_offset +
396 session->one_mmap_addr;
397 } else if (perf_data__is_pipe(session->data)) {
398 buffer->data = auxtrace_copy_data(buffer->size, session);
401 buffer->data_needs_freeing = true;
402 } else if (BITS_PER_LONG == 32 &&
403 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
404 err = auxtrace_queues__split_buffer(queues, idx, buffer);
409 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
413 /* FIXME: Doesn't work for split buffer */
415 *buffer_ptr = buffer;
420 auxtrace_buffer__free(buffer);
424 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
425 struct perf_session *session,
426 union perf_event *event, off_t data_offset,
427 struct auxtrace_buffer **buffer_ptr)
429 struct auxtrace_buffer buffer = {
431 .tid = event->auxtrace.tid,
432 .cpu = event->auxtrace.cpu,
433 .data_offset = data_offset,
434 .offset = event->auxtrace.offset,
435 .reference = event->auxtrace.reference,
436 .size = event->auxtrace.size,
438 unsigned int idx = event->auxtrace.idx;
440 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
444 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
445 struct perf_session *session,
446 off_t file_offset, size_t sz)
448 union perf_event *event;
450 char buf[PERF_SAMPLE_MAX_SIZE];
452 err = perf_session__peek_event(session, file_offset, buf,
453 PERF_SAMPLE_MAX_SIZE, &event, NULL);
457 if (event->header.type == PERF_RECORD_AUXTRACE) {
458 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
459 event->header.size != sz) {
463 file_offset += event->header.size;
464 err = auxtrace_queues__add_event(queues, session, event,
471 void auxtrace_queues__free(struct auxtrace_queues *queues)
475 for (i = 0; i < queues->nr_queues; i++) {
476 while (!list_empty(&queues->queue_array[i].head)) {
477 struct auxtrace_buffer *buffer;
479 buffer = list_entry(queues->queue_array[i].head.next,
480 struct auxtrace_buffer, list);
481 list_del_init(&buffer->list);
482 auxtrace_buffer__free(buffer);
486 zfree(&queues->queue_array);
487 queues->nr_queues = 0;
490 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
491 unsigned int pos, unsigned int queue_nr,
497 parent = (pos - 1) >> 1;
498 if (heap_array[parent].ordinal <= ordinal)
500 heap_array[pos] = heap_array[parent];
503 heap_array[pos].queue_nr = queue_nr;
504 heap_array[pos].ordinal = ordinal;
507 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
510 struct auxtrace_heap_item *heap_array;
512 if (queue_nr >= heap->heap_sz) {
513 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
515 while (heap_sz <= queue_nr)
517 heap_array = realloc(heap->heap_array,
518 heap_sz * sizeof(struct auxtrace_heap_item));
521 heap->heap_array = heap_array;
522 heap->heap_sz = heap_sz;
525 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
530 void auxtrace_heap__free(struct auxtrace_heap *heap)
532 zfree(&heap->heap_array);
537 void auxtrace_heap__pop(struct auxtrace_heap *heap)
539 unsigned int pos, last, heap_cnt = heap->heap_cnt;
540 struct auxtrace_heap_item *heap_array;
547 heap_array = heap->heap_array;
551 unsigned int left, right;
553 left = (pos << 1) + 1;
554 if (left >= heap_cnt)
557 if (right >= heap_cnt) {
558 heap_array[pos] = heap_array[left];
561 if (heap_array[left].ordinal < heap_array[right].ordinal) {
562 heap_array[pos] = heap_array[left];
565 heap_array[pos] = heap_array[right];
571 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
572 heap_array[last].ordinal);
575 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
576 struct evlist *evlist)
579 return itr->info_priv_size(itr, evlist);
583 static int auxtrace_not_supported(void)
585 pr_err("AUX area tracing is not supported on this architecture\n");
589 int auxtrace_record__info_fill(struct auxtrace_record *itr,
590 struct perf_session *session,
591 struct perf_record_auxtrace_info *auxtrace_info,
595 return itr->info_fill(itr, session, auxtrace_info, priv_size);
596 return auxtrace_not_supported();
599 void auxtrace_record__free(struct auxtrace_record *itr)
605 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
607 if (itr && itr->snapshot_start)
608 return itr->snapshot_start(itr);
612 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
614 if (!on_exit && itr && itr->snapshot_finish)
615 return itr->snapshot_finish(itr);
619 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
620 struct auxtrace_mmap *mm,
621 unsigned char *data, u64 *head, u64 *old)
623 if (itr && itr->find_snapshot)
624 return itr->find_snapshot(itr, idx, mm, data, head, old);
628 int auxtrace_record__options(struct auxtrace_record *itr,
629 struct evlist *evlist,
630 struct record_opts *opts)
633 itr->evlist = evlist;
634 return itr->recording_options(itr, evlist, opts);
639 u64 auxtrace_record__reference(struct auxtrace_record *itr)
642 return itr->reference(itr);
646 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
647 struct record_opts *opts, const char *str)
652 /* PMU-agnostic options */
655 opts->auxtrace_snapshot_on_exit = true;
663 return itr->parse_snapshot_options(itr, opts, str);
665 pr_err("No AUX area tracing to snapshot\n");
669 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
673 if (!itr->evlist || !itr->pmu)
676 evlist__for_each_entry(itr->evlist, evsel) {
677 if (evsel->core.attr.type == itr->pmu->type) {
680 return perf_evlist__enable_event_idx(itr->evlist, evsel,
688 * Event record size is 16-bit which results in a maximum size of about 64KiB.
689 * Allow about 4KiB for the rest of the sample record, to give a maximum
690 * AUX area sample size of 60KiB.
692 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
694 /* Arbitrary default size if no other default provided */
695 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
697 static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
698 struct record_opts *opts)
701 bool has_aux_leader = false;
704 evlist__for_each_entry(evlist, evsel) {
705 sz = evsel->core.attr.aux_sample_size;
706 if (perf_evsel__is_group_leader(evsel)) {
707 has_aux_leader = perf_evsel__is_aux_event(evsel);
710 pr_err("Cannot add AUX area sampling to an AUX area event\n");
712 pr_err("Cannot add AUX area sampling to a group leader\n");
716 if (sz > MAX_AUX_SAMPLE_SIZE) {
717 pr_err("AUX area sample size %u too big, max. %d\n",
718 sz, MAX_AUX_SAMPLE_SIZE);
722 if (!has_aux_leader) {
723 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
726 perf_evsel__set_sample_bit(evsel, AUX);
727 opts->auxtrace_sample_mode = true;
729 perf_evsel__reset_sample_bit(evsel, AUX);
733 if (!opts->auxtrace_sample_mode) {
734 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
738 if (!perf_can_aux_sample()) {
739 pr_err("AUX area sampling is not supported by kernel\n");
746 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
747 struct evlist *evlist,
748 struct record_opts *opts, const char *str)
750 struct perf_evsel_config_term *term;
751 struct evsel *aux_evsel;
752 bool has_aux_sample_size = false;
753 bool has_aux_leader = false;
762 pr_err("No AUX area event to sample\n");
766 sz = strtoul(str, &endptr, 0);
767 if (*endptr || sz > UINT_MAX) {
768 pr_err("Bad AUX area sampling option: '%s'\n", str);
773 sz = itr->default_aux_sample_size;
776 sz = DEFAULT_AUX_SAMPLE_SIZE;
778 /* Set aux_sample_size based on --aux-sample option */
779 evlist__for_each_entry(evlist, evsel) {
780 if (perf_evsel__is_group_leader(evsel)) {
781 has_aux_leader = perf_evsel__is_aux_event(evsel);
782 } else if (has_aux_leader) {
783 evsel->core.attr.aux_sample_size = sz;
788 /* Override with aux_sample_size from config term */
789 evlist__for_each_entry(evlist, evsel) {
790 if (perf_evsel__is_aux_event(evsel))
792 term = perf_evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
794 has_aux_sample_size = true;
795 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
796 /* If possible, group with the AUX event */
797 if (aux_evsel && evsel->core.attr.aux_sample_size)
798 perf_evlist__regroup(evlist, aux_evsel, evsel);
802 if (!str && !has_aux_sample_size)
806 pr_err("No AUX area event to sample\n");
810 return auxtrace_validate_aux_sample_size(evlist, opts);
813 struct auxtrace_record *__weak
814 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
820 static int auxtrace_index__alloc(struct list_head *head)
822 struct auxtrace_index *auxtrace_index;
824 auxtrace_index = malloc(sizeof(struct auxtrace_index));
828 auxtrace_index->nr = 0;
829 INIT_LIST_HEAD(&auxtrace_index->list);
831 list_add_tail(&auxtrace_index->list, head);
836 void auxtrace_index__free(struct list_head *head)
838 struct auxtrace_index *auxtrace_index, *n;
840 list_for_each_entry_safe(auxtrace_index, n, head, list) {
841 list_del_init(&auxtrace_index->list);
842 free(auxtrace_index);
846 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
848 struct auxtrace_index *auxtrace_index;
851 if (list_empty(head)) {
852 err = auxtrace_index__alloc(head);
857 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
859 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
860 err = auxtrace_index__alloc(head);
863 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
867 return auxtrace_index;
870 int auxtrace_index__auxtrace_event(struct list_head *head,
871 union perf_event *event, off_t file_offset)
873 struct auxtrace_index *auxtrace_index;
876 auxtrace_index = auxtrace_index__last(head);
880 nr = auxtrace_index->nr;
881 auxtrace_index->entries[nr].file_offset = file_offset;
882 auxtrace_index->entries[nr].sz = event->header.size;
883 auxtrace_index->nr += 1;
888 static int auxtrace_index__do_write(int fd,
889 struct auxtrace_index *auxtrace_index)
891 struct auxtrace_index_entry ent;
894 for (i = 0; i < auxtrace_index->nr; i++) {
895 ent.file_offset = auxtrace_index->entries[i].file_offset;
896 ent.sz = auxtrace_index->entries[i].sz;
897 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
903 int auxtrace_index__write(int fd, struct list_head *head)
905 struct auxtrace_index *auxtrace_index;
909 list_for_each_entry(auxtrace_index, head, list)
910 total += auxtrace_index->nr;
912 if (writen(fd, &total, sizeof(total)) != sizeof(total))
915 list_for_each_entry(auxtrace_index, head, list) {
916 err = auxtrace_index__do_write(fd, auxtrace_index);
924 static int auxtrace_index__process_entry(int fd, struct list_head *head,
927 struct auxtrace_index *auxtrace_index;
928 struct auxtrace_index_entry ent;
931 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
934 auxtrace_index = auxtrace_index__last(head);
938 nr = auxtrace_index->nr;
940 auxtrace_index->entries[nr].file_offset =
941 bswap_64(ent.file_offset);
942 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
944 auxtrace_index->entries[nr].file_offset = ent.file_offset;
945 auxtrace_index->entries[nr].sz = ent.sz;
948 auxtrace_index->nr = nr + 1;
953 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
956 struct list_head *head = &session->auxtrace_index;
959 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
965 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
971 err = auxtrace_index__process_entry(fd, head, needs_swap);
979 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
980 struct perf_session *session,
981 struct auxtrace_index_entry *ent)
983 return auxtrace_queues__add_indexed_event(queues, session,
984 ent->file_offset, ent->sz);
987 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
988 struct perf_session *session)
990 struct auxtrace_index *auxtrace_index;
991 struct auxtrace_index_entry *ent;
995 if (auxtrace__dont_decode(session))
998 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
999 for (i = 0; i < auxtrace_index->nr; i++) {
1000 ent = &auxtrace_index->entries[i];
1001 err = auxtrace_queues__process_index_entry(queues,
1011 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
1012 struct auxtrace_buffer *buffer)
1015 if (list_is_last(&buffer->list, &queue->head))
1017 return list_entry(buffer->list.next, struct auxtrace_buffer,
1020 if (list_empty(&queue->head))
1022 return list_entry(queue->head.next, struct auxtrace_buffer,
1027 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1028 struct perf_sample *sample,
1029 struct perf_session *session)
1031 struct perf_sample_id *sid;
1039 sid = perf_evlist__id2sid(session->evlist, id);
1045 if (idx >= queues->nr_queues)
1048 return &queues->queue_array[idx];
1051 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1052 struct perf_session *session,
1053 struct perf_sample *sample, u64 data_offset,
1056 struct auxtrace_buffer buffer = {
1058 .data_offset = data_offset,
1059 .reference = reference,
1060 .size = sample->aux_sample.size,
1062 struct perf_sample_id *sid;
1063 u64 id = sample->id;
1069 sid = perf_evlist__id2sid(session->evlist, id);
1074 buffer.tid = sid->tid;
1075 buffer.cpu = sid->cpu;
1077 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1085 static int auxtrace_queue_data_cb(struct perf_session *session,
1086 union perf_event *event, u64 offset,
1089 struct queue_data *qd = data;
1090 struct perf_sample sample;
1093 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1094 if (event->header.size < sizeof(struct perf_record_auxtrace))
1096 offset += event->header.size;
1097 return session->auxtrace->queue_data(session, NULL, event,
1101 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1104 err = perf_evlist__parse_sample(session->evlist, event, &sample);
1108 if (!sample.aux_sample.size)
1111 offset += sample.aux_sample.data - (void *)event;
1113 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1116 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1118 struct queue_data qd = {
1123 if (auxtrace__dont_decode(session))
1126 if (!session->auxtrace || !session->auxtrace->queue_data)
1129 return perf_session__peek_events(session, session->header.data_offset,
1130 session->header.data_size,
1131 auxtrace_queue_data_cb, &qd);
1134 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
1136 size_t adj = buffer->data_offset & (page_size - 1);
1137 size_t size = buffer->size + adj;
1138 off_t file_offset = buffer->data_offset - adj;
1142 return buffer->data;
1144 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
1145 if (addr == MAP_FAILED)
1148 buffer->mmap_addr = addr;
1149 buffer->mmap_size = size;
1151 buffer->data = addr + adj;
1153 return buffer->data;
1156 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1158 if (!buffer->data || !buffer->mmap_addr)
1160 munmap(buffer->mmap_addr, buffer->mmap_size);
1161 buffer->mmap_addr = NULL;
1162 buffer->mmap_size = 0;
1163 buffer->data = NULL;
1164 buffer->use_data = NULL;
1167 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1169 auxtrace_buffer__put_data(buffer);
1170 if (buffer->data_needs_freeing) {
1171 buffer->data_needs_freeing = false;
1172 zfree(&buffer->data);
1173 buffer->use_data = NULL;
1178 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1180 auxtrace_buffer__drop_data(buffer);
1184 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1185 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1186 const char *msg, u64 timestamp)
1190 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1192 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1193 auxtrace_error->type = type;
1194 auxtrace_error->code = code;
1195 auxtrace_error->cpu = cpu;
1196 auxtrace_error->pid = pid;
1197 auxtrace_error->tid = tid;
1198 auxtrace_error->fmt = 1;
1199 auxtrace_error->ip = ip;
1200 auxtrace_error->time = timestamp;
1201 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1203 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1204 strlen(auxtrace_error->msg) + 1;
1205 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1208 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1209 struct perf_tool *tool,
1210 struct perf_session *session,
1211 perf_event__handler_t process)
1213 union perf_event *ev;
1217 pr_debug2("Synthesizing auxtrace information\n");
1218 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1219 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1223 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1224 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1226 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1231 err = process(tool, ev, NULL, NULL);
1237 static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1239 struct evsel *new_leader = NULL;
1240 struct evsel *evsel;
1242 /* Find new leader for the group */
1243 evlist__for_each_entry(evlist, evsel) {
1244 if (evsel->leader != leader || evsel == leader)
1248 evsel->leader = new_leader;
1251 /* Update group information */
1253 zfree(&new_leader->group_name);
1254 new_leader->group_name = leader->group_name;
1255 leader->group_name = NULL;
1257 new_leader->core.nr_members = leader->core.nr_members - 1;
1258 leader->core.nr_members = 1;
1262 static void unleader_auxtrace(struct perf_session *session)
1264 struct evsel *evsel;
1266 evlist__for_each_entry(session->evlist, evsel) {
1267 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1268 perf_evsel__is_group_leader(evsel)) {
1269 unleader_evsel(session->evlist, evsel);
1274 int perf_event__process_auxtrace_info(struct perf_session *session,
1275 union perf_event *event)
1277 enum auxtrace_type type = event->auxtrace_info.type;
1281 fprintf(stdout, " type: %u\n", type);
1284 case PERF_AUXTRACE_INTEL_PT:
1285 err = intel_pt_process_auxtrace_info(event, session);
1287 case PERF_AUXTRACE_INTEL_BTS:
1288 err = intel_bts_process_auxtrace_info(event, session);
1290 case PERF_AUXTRACE_ARM_SPE:
1291 err = arm_spe_process_auxtrace_info(event, session);
1293 case PERF_AUXTRACE_CS_ETM:
1294 err = cs_etm__process_auxtrace_info(event, session);
1296 case PERF_AUXTRACE_S390_CPUMSF:
1297 err = s390_cpumsf_process_auxtrace_info(event, session);
1299 case PERF_AUXTRACE_UNKNOWN:
1307 unleader_auxtrace(session);
1312 s64 perf_event__process_auxtrace(struct perf_session *session,
1313 union perf_event *event)
1318 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1319 event->auxtrace.size, event->auxtrace.offset,
1320 event->auxtrace.reference, event->auxtrace.idx,
1321 event->auxtrace.tid, event->auxtrace.cpu);
1323 if (auxtrace__dont_decode(session))
1324 return event->auxtrace.size;
1326 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1329 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1333 return event->auxtrace.size;
1336 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1337 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1338 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1339 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1340 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1341 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1343 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1346 synth_opts->branches = true;
1347 synth_opts->transactions = true;
1348 synth_opts->ptwrites = true;
1349 synth_opts->pwr_events = true;
1350 synth_opts->other_events = true;
1351 synth_opts->errors = true;
1353 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1354 synth_opts->period = 1;
1355 synth_opts->calls = true;
1357 synth_opts->instructions = true;
1358 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1359 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1361 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1362 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1363 synth_opts->initial_skip = 0;
1367 * Please check tools/perf/Documentation/perf-script.txt for information
1368 * about the options parsed here, which is introduced after this cset,
1369 * when support in 'perf script' for these options is introduced.
1371 int itrace_parse_synth_opts(const struct option *opt, const char *str,
1374 struct itrace_synth_opts *synth_opts = opt->value;
1377 bool period_type_set = false;
1378 bool period_set = false;
1380 synth_opts->set = true;
1383 synth_opts->dont_decode = true;
1388 itrace_synth_opts__set_default(synth_opts,
1389 synth_opts->default_no_sample);
1393 for (p = str; *p;) {
1396 synth_opts->instructions = true;
1397 while (*p == ' ' || *p == ',')
1400 synth_opts->period = strtoull(p, &endptr, 10);
1403 while (*p == ' ' || *p == ',')
1407 synth_opts->period_type =
1408 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1409 period_type_set = true;
1412 synth_opts->period_type =
1413 PERF_ITRACE_PERIOD_TICKS;
1414 period_type_set = true;
1417 synth_opts->period *= 1000;
1420 synth_opts->period *= 1000;
1425 synth_opts->period_type =
1426 PERF_ITRACE_PERIOD_NANOSECS;
1427 period_type_set = true;
1437 synth_opts->branches = true;
1440 synth_opts->transactions = true;
1443 synth_opts->ptwrites = true;
1446 synth_opts->pwr_events = true;
1449 synth_opts->other_events = true;
1452 synth_opts->errors = true;
1455 synth_opts->log = true;
1458 synth_opts->branches = true;
1459 synth_opts->calls = true;
1462 synth_opts->branches = true;
1463 synth_opts->returns = true;
1466 synth_opts->callchain = true;
1467 synth_opts->callchain_sz =
1468 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1469 while (*p == ' ' || *p == ',')
1474 val = strtoul(p, &endptr, 10);
1476 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1478 synth_opts->callchain_sz = val;
1482 synth_opts->last_branch = true;
1483 synth_opts->last_branch_sz =
1484 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1485 while (*p == ' ' || *p == ',')
1490 val = strtoul(p, &endptr, 10);
1493 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1495 synth_opts->last_branch_sz = val;
1499 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1512 if (synth_opts->instructions) {
1513 if (!period_type_set)
1514 synth_opts->period_type =
1515 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1517 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1523 pr_err("Bad Instruction Tracing options '%s'\n", str);
1527 static const char * const auxtrace_error_type_name[] = {
1528 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1531 static const char *auxtrace_error_name(int type)
1533 const char *error_type_name = NULL;
1535 if (type < PERF_AUXTRACE_ERROR_MAX)
1536 error_type_name = auxtrace_error_type_name[type];
1537 if (!error_type_name)
1538 error_type_name = "unknown AUX";
1539 return error_type_name;
1542 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1544 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1545 unsigned long long nsecs = e->time;
1546 const char *msg = e->msg;
1549 ret = fprintf(fp, " %s error type %u",
1550 auxtrace_error_name(e->type), e->type);
1552 if (e->fmt && nsecs) {
1553 unsigned long secs = nsecs / NSEC_PER_SEC;
1555 nsecs -= secs * NSEC_PER_SEC;
1556 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1558 ret += fprintf(fp, " time 0");
1562 msg = (const char *)&e->time;
1564 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1565 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1569 void perf_session__auxtrace_error_inc(struct perf_session *session,
1570 union perf_event *event)
1572 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1574 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1575 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1578 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1582 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1583 if (!stats->nr_auxtrace_errors[i])
1585 ui__warning("%u %s errors\n",
1586 stats->nr_auxtrace_errors[i],
1587 auxtrace_error_name(i));
1591 int perf_event__process_auxtrace_error(struct perf_session *session,
1592 union perf_event *event)
1594 if (auxtrace__dont_decode(session))
1597 perf_event__fprintf_auxtrace_error(event, stdout);
1601 static int __auxtrace_mmap__read(struct mmap *map,
1602 struct auxtrace_record *itr,
1603 struct perf_tool *tool, process_auxtrace_t fn,
1604 bool snapshot, size_t snapshot_size)
1606 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1607 u64 head, old = mm->prev, offset, ref;
1608 unsigned char *data = mm->base;
1609 size_t size, head_off, old_off, len1, len2, padding;
1610 union perf_event ev;
1611 void *data1, *data2;
1614 head = auxtrace_mmap__read_snapshot_head(mm);
1615 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1619 head = auxtrace_mmap__read_head(mm);
1625 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1626 mm->idx, old, head, head - old);
1629 head_off = head & mm->mask;
1630 old_off = old & mm->mask;
1632 head_off = head % mm->len;
1633 old_off = old % mm->len;
1636 if (head_off > old_off)
1637 size = head_off - old_off;
1639 size = mm->len - (old_off - head_off);
1641 if (snapshot && size > snapshot_size)
1642 size = snapshot_size;
1644 ref = auxtrace_record__reference(itr);
1646 if (head > old || size <= head || mm->mask) {
1647 offset = head - size;
1650 * When the buffer size is not a power of 2, 'head' wraps at the
1651 * highest multiple of the buffer size, so we have to subtract
1652 * the remainder here.
1654 u64 rem = (0ULL - mm->len) % mm->len;
1656 offset = head - size - rem;
1659 if (size > head_off) {
1660 len1 = size - head_off;
1661 data1 = &data[mm->len - len1];
1666 data1 = &data[head_off - len1];
1671 if (itr->alignment) {
1672 unsigned int unwanted = len1 % itr->alignment;
1678 /* padding must be written by fn() e.g. record__process_auxtrace() */
1679 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1681 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1683 memset(&ev, 0, sizeof(ev));
1684 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1685 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1686 ev.auxtrace.size = size + padding;
1687 ev.auxtrace.offset = offset;
1688 ev.auxtrace.reference = ref;
1689 ev.auxtrace.idx = mm->idx;
1690 ev.auxtrace.tid = mm->tid;
1691 ev.auxtrace.cpu = mm->cpu;
1693 if (fn(tool, map, &ev, data1, len1, data2, len2))
1699 auxtrace_mmap__write_tail(mm, head);
1700 if (itr->read_finish) {
1703 err = itr->read_finish(itr, mm->idx);
1712 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1713 struct perf_tool *tool, process_auxtrace_t fn)
1715 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1718 int auxtrace_mmap__read_snapshot(struct mmap *map,
1719 struct auxtrace_record *itr,
1720 struct perf_tool *tool, process_auxtrace_t fn,
1721 size_t snapshot_size)
1723 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1727 * struct auxtrace_cache - hash table to implement a cache
1728 * @hashtable: the hashtable
1729 * @sz: hashtable size (number of hlists)
1730 * @entry_size: size of an entry
1731 * @limit: limit the number of entries to this maximum, when reached the cache
1732 * is dropped and caching begins again with an empty cache
1733 * @cnt: current number of entries
1734 * @bits: hashtable size (@sz = 2^@bits)
1736 struct auxtrace_cache {
1737 struct hlist_head *hashtable;
1745 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1746 unsigned int limit_percent)
1748 struct auxtrace_cache *c;
1749 struct hlist_head *ht;
1752 c = zalloc(sizeof(struct auxtrace_cache));
1758 ht = calloc(sz, sizeof(struct hlist_head));
1762 for (i = 0; i < sz; i++)
1763 INIT_HLIST_HEAD(&ht[i]);
1767 c->entry_size = entry_size;
1768 c->limit = (c->sz * limit_percent) / 100;
1778 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1780 struct auxtrace_cache_entry *entry;
1781 struct hlist_node *tmp;
1787 for (i = 0; i < c->sz; i++) {
1788 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1789 hlist_del(&entry->hash);
1790 auxtrace_cache__free_entry(c, entry);
1797 void auxtrace_cache__free(struct auxtrace_cache *c)
1802 auxtrace_cache__drop(c);
1803 zfree(&c->hashtable);
1807 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1809 return malloc(c->entry_size);
1812 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1818 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1819 struct auxtrace_cache_entry *entry)
1821 if (c->limit && ++c->cnt > c->limit)
1822 auxtrace_cache__drop(c);
1825 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1830 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
1833 struct auxtrace_cache_entry *entry;
1834 struct hlist_head *hlist;
1835 struct hlist_node *n;
1840 hlist = &c->hashtable[hash_32(key, c->bits)];
1841 hlist_for_each_entry_safe(entry, n, hlist, hash) {
1842 if (entry->key == key) {
1843 hlist_del(&entry->hash);
1851 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
1853 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
1855 auxtrace_cache__free_entry(c, entry);
1858 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1860 struct auxtrace_cache_entry *entry;
1861 struct hlist_head *hlist;
1866 hlist = &c->hashtable[hash_32(key, c->bits)];
1867 hlist_for_each_entry(entry, hlist, hash) {
1868 if (entry->key == key)
1875 static void addr_filter__free_str(struct addr_filter *filt)
1878 filt->action = NULL;
1879 filt->sym_from = NULL;
1880 filt->sym_to = NULL;
1881 filt->filename = NULL;
1884 static struct addr_filter *addr_filter__new(void)
1886 struct addr_filter *filt = zalloc(sizeof(*filt));
1889 INIT_LIST_HEAD(&filt->list);
1894 static void addr_filter__free(struct addr_filter *filt)
1897 addr_filter__free_str(filt);
1901 static void addr_filters__add(struct addr_filters *filts,
1902 struct addr_filter *filt)
1904 list_add_tail(&filt->list, &filts->head);
1908 static void addr_filters__del(struct addr_filters *filts,
1909 struct addr_filter *filt)
1911 list_del_init(&filt->list);
1915 void addr_filters__init(struct addr_filters *filts)
1917 INIT_LIST_HEAD(&filts->head);
1921 void addr_filters__exit(struct addr_filters *filts)
1923 struct addr_filter *filt, *n;
1925 list_for_each_entry_safe(filt, n, &filts->head, list) {
1926 addr_filters__del(filts, filt);
1927 addr_filter__free(filt);
1931 static int parse_num_or_str(char **inp, u64 *num, const char **str,
1932 const char *str_delim)
1934 *inp += strspn(*inp, " ");
1936 if (isdigit(**inp)) {
1942 *num = strtoull(*inp, &endptr, 0);
1953 *inp += strspn(*inp, " ");
1955 n = strcspn(*inp, str_delim);
1967 static int parse_action(struct addr_filter *filt)
1969 if (!strcmp(filt->action, "filter")) {
1972 } else if (!strcmp(filt->action, "start")) {
1974 } else if (!strcmp(filt->action, "stop")) {
1975 filt->start = false;
1976 } else if (!strcmp(filt->action, "tracestop")) {
1977 filt->start = false;
1979 filt->action += 5; /* Change 'tracestop' to 'stop' */
1986 static int parse_sym_idx(char **inp, int *idx)
1990 *inp += strspn(*inp, " ");
1997 if (**inp == 'g' || **inp == 'G') {
2005 num = strtoul(*inp, &endptr, 0);
2008 if (endptr == *inp || num > INT_MAX)
2017 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2019 int err = parse_num_or_str(inp, num, str, " ");
2022 err = parse_sym_idx(inp, idx);
2027 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2032 filt->str = fstr = strdup(*filter_inp);
2036 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2040 err = parse_action(filt);
2044 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2045 &filt->sym_from_idx);
2049 fstr += strspn(fstr, " ");
2053 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2060 fstr += strspn(fstr, " ");
2064 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2069 fstr += strspn(fstr, " ,");
2071 *filter_inp += fstr - filt->str;
2076 addr_filter__free_str(filt);
2081 int addr_filters__parse_bare_filter(struct addr_filters *filts,
2084 struct addr_filter *filt;
2085 const char *fstr = filter;
2089 filt = addr_filter__new();
2090 err = parse_one_filter(filt, &fstr);
2092 addr_filter__free(filt);
2093 addr_filters__exit(filts);
2096 addr_filters__add(filts, filt);
2115 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2117 /* A function with the same name, and global or the n'th found or any */
2118 return kallsyms__is_function(type) &&
2119 !strcmp(name, args->name) &&
2120 ((args->global && isupper(type)) ||
2121 (args->selected && ++(args->cnt) == args->idx) ||
2122 (!args->global && !args->selected));
2125 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2127 struct sym_args *args = arg;
2129 if (args->started) {
2131 args->size = start - args->start;
2132 if (args->selected) {
2135 } else if (kern_sym_match(args, name, type)) {
2136 args->duplicate = true;
2139 } else if (kern_sym_match(args, name, type)) {
2140 args->started = true;
2141 args->start = start;
2147 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2149 struct sym_args *args = arg;
2151 if (kern_sym_match(args, name, type)) {
2152 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2153 ++args->cnt, start, type, name);
2155 } else if (args->near) {
2157 pr_err("\t\twhich is near\t\t%s\n", name);
2163 static int sym_not_found_error(const char *sym_name, int idx)
2166 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2169 pr_err("Global symbol '%s' not found.\n", sym_name);
2171 pr_err("Symbol '%s' not found.\n", sym_name);
2173 pr_err("Note that symbols must be functions.\n");
2178 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2180 struct sym_args args = {
2184 .selected = idx > 0,
2191 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2193 pr_err("Failed to parse /proc/kallsyms\n");
2197 if (args.duplicate) {
2198 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2200 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2201 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2203 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2207 if (!args.started) {
2208 pr_err("Kernel symbol lookup: ");
2209 return sym_not_found_error(sym_name, idx);
2212 *start = args.start;
2218 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2219 char type, u64 start)
2221 struct sym_args *args = arg;
2223 if (!kallsyms__is_function(type))
2226 if (!args->started) {
2227 args->started = true;
2228 args->start = start;
2230 /* Don't know exactly where the kernel ends, so we add a page */
2231 args->size = round_up(start, page_size) + page_size - args->start;
2236 static int addr_filter__entire_kernel(struct addr_filter *filt)
2238 struct sym_args args = { .started = false };
2241 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2242 if (err < 0 || !args.started) {
2243 pr_err("Failed to parse /proc/kallsyms\n");
2247 filt->addr = args.start;
2248 filt->size = args.size;
2253 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2255 if (start + size >= filt->addr)
2258 if (filt->sym_from) {
2259 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2260 filt->sym_to, start, filt->sym_from, filt->addr);
2262 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2263 filt->sym_to, start, filt->addr);
2269 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2271 bool no_size = false;
2275 if (symbol_conf.kptr_restrict) {
2276 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2280 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2281 return addr_filter__entire_kernel(filt);
2283 if (filt->sym_from) {
2284 err = find_kern_sym(filt->sym_from, &start, &size,
2285 filt->sym_from_idx);
2289 if (filt->range && !filt->size && !filt->sym_to) {
2296 err = find_kern_sym(filt->sym_to, &start, &size,
2301 err = check_end_after_start(filt, start, size);
2304 filt->size = start + size - filt->addr;
2308 /* The very last symbol in kallsyms does not imply a particular size */
2310 pr_err("Cannot determine size of symbol '%s'\n",
2311 filt->sym_to ? filt->sym_to : filt->sym_from);
2318 static struct dso *load_dso(const char *name)
2323 map = dso__new_map(name);
2327 if (map__load(map) < 0)
2328 pr_err("File '%s' not found or has no symbols.\n", name);
2330 dso = dso__get(map->dso);
2337 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2340 /* Same name, and global or the n'th found or any */
2341 return !arch__compare_symbol_names(name, sym->name) &&
2342 ((!idx && sym->binding == STB_GLOBAL) ||
2343 (idx > 0 && ++*cnt == idx) ||
2347 static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2353 pr_err("Multiple symbols with name '%s'\n", sym_name);
2355 sym = dso__first_symbol(dso);
2357 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2358 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2360 sym->binding == STB_GLOBAL ? 'g' :
2361 sym->binding == STB_LOCAL ? 'l' : 'w',
2366 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2368 sym = dso__next_symbol(sym);
2371 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2373 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2376 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2385 sym = dso__first_symbol(dso);
2389 *size = sym->start - *start;
2393 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2394 print_duplicate_syms(dso, sym_name);
2397 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2398 *start = sym->start;
2399 *size = sym->end - sym->start;
2401 sym = dso__next_symbol(sym);
2405 return sym_not_found_error(sym_name, idx);
2410 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2412 if (dso__data_file_size(dso, NULL)) {
2413 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2419 filt->size = dso->data.file_size;
2424 static int addr_filter__resolve_syms(struct addr_filter *filt)
2430 if (!filt->sym_from && !filt->sym_to)
2433 if (!filt->filename)
2434 return addr_filter__resolve_kernel_syms(filt);
2436 dso = load_dso(filt->filename);
2438 pr_err("Failed to load symbols from: %s\n", filt->filename);
2442 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2443 err = addr_filter__entire_dso(filt, dso);
2447 if (filt->sym_from) {
2448 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2449 filt->sym_from_idx);
2453 if (filt->range && !filt->size && !filt->sym_to)
2458 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2463 err = check_end_after_start(filt, start, size);
2467 filt->size = start + size - filt->addr;
2476 static char *addr_filter__to_str(struct addr_filter *filt)
2478 char filename_buf[PATH_MAX];
2479 const char *at = "";
2480 const char *fn = "";
2484 if (filt->filename) {
2486 fn = realpath(filt->filename, filename_buf);
2492 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2493 filt->action, filt->addr, filt->size, at, fn);
2495 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2496 filt->action, filt->addr, at, fn);
2499 return err < 0 ? NULL : filter;
2502 static int parse_addr_filter(struct evsel *evsel, const char *filter,
2505 struct addr_filters filts;
2506 struct addr_filter *filt;
2509 addr_filters__init(&filts);
2511 err = addr_filters__parse_bare_filter(&filts, filter);
2515 if (filts.cnt > max_nr) {
2516 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2522 list_for_each_entry(filt, &filts.head, list) {
2525 err = addr_filter__resolve_syms(filt);
2529 new_filter = addr_filter__to_str(filt);
2535 if (perf_evsel__append_addr_filter(evsel, new_filter)) {
2542 addr_filters__exit(&filts);
2545 pr_err("Failed to parse address filter: '%s'\n", filter);
2546 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2547 pr_err("Where multiple filters are separated by space or comma.\n");
2553 static int perf_evsel__nr_addr_filter(struct evsel *evsel)
2555 struct perf_pmu *pmu = perf_evsel__find_pmu(evsel);
2556 int nr_addr_filters = 0;
2561 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2563 return nr_addr_filters;
2566 int auxtrace_parse_filters(struct evlist *evlist)
2568 struct evsel *evsel;
2572 evlist__for_each_entry(evlist, evsel) {
2573 filter = evsel->filter;
2574 max_nr = perf_evsel__nr_addr_filter(evsel);
2575 if (!filter || !max_nr)
2577 evsel->filter = NULL;
2578 err = parse_addr_filter(evsel, filter, max_nr);
2582 pr_debug("Address filter: %s\n", evsel->filter);
2588 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2589 struct perf_sample *sample, struct perf_tool *tool)
2591 if (!session->auxtrace)
2594 return session->auxtrace->process_event(session, event, sample, tool);
2597 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2598 struct perf_sample *sample)
2600 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2601 auxtrace__dont_decode(session))
2604 session->auxtrace->dump_auxtrace_sample(session, sample);
2607 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2609 if (!session->auxtrace)
2612 return session->auxtrace->flush_events(session, tool);
2615 void auxtrace__free_events(struct perf_session *session)
2617 if (!session->auxtrace)
2620 return session->auxtrace->free_events(session);
2623 void auxtrace__free(struct perf_session *session)
2625 if (!session->auxtrace)
2628 return session->auxtrace->free(session);
2631 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2632 struct evsel *evsel)
2634 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2637 return session->auxtrace->evsel_is_auxtrace(session, evsel);