1 // SPDX-License-Identifier: GPL-2.0-only
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <linux/bitops.h>
19 #include <linux/log2.h>
20 #include <linux/string.h>
21 #include <linux/time64.h>
23 #include <sys/param.h>
26 #include <linux/list.h>
27 #include <linux/zalloc.h>
34 #include "evsel_config.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "thread_map.h"
42 #include <linux/hash.h>
48 #include <subcmd/parse-options.h>
52 #include "intel-bts.h"
54 #include "s390-cpumsf.h"
55 #include "util/mmap.h"
57 #include <linux/ctype.h>
58 #include "symbol/kallsyms.h"
59 #include <internal/lib.h>
62 * Make a group from 'leader' to 'last', requiring that the events were not
63 * already grouped to a different leader.
65 static int perf_evlist__regroup(struct evlist *evlist,
72 if (!evsel__is_group_leader(leader))
76 evlist__for_each_entry(evlist, evsel) {
78 if (!(evsel->leader == leader ||
79 (evsel->leader == evsel &&
80 evsel->core.nr_members <= 1)))
82 } else if (evsel == leader) {
90 evlist__for_each_entry(evlist, evsel) {
92 if (evsel->leader != leader) {
93 evsel->leader = leader;
94 if (leader->core.nr_members < 1)
95 leader->core.nr_members = 1;
96 leader->core.nr_members += 1;
98 } else if (evsel == leader) {
108 static bool auxtrace__dont_decode(struct perf_session *session)
110 return !session->itrace_synth_opts ||
111 session->itrace_synth_opts->dont_decode;
114 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
115 struct auxtrace_mmap_params *mp,
116 void *userpg, int fd)
118 struct perf_event_mmap_page *pc = userpg;
120 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
135 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
136 pr_err("Cannot use AUX area tracing mmaps\n");
140 pc->aux_offset = mp->offset;
141 pc->aux_size = mp->len;
143 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
144 if (mm->base == MAP_FAILED) {
145 pr_debug2("failed to mmap AUX area\n");
153 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
156 munmap(mm->base, mm->len);
161 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
162 off_t auxtrace_offset,
163 unsigned int auxtrace_pages,
164 bool auxtrace_overwrite)
166 if (auxtrace_pages) {
167 mp->offset = auxtrace_offset;
168 mp->len = auxtrace_pages * (size_t)page_size;
169 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
170 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
171 pr_debug2("AUX area mmap length %zu\n", mp->len);
177 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
178 struct evlist *evlist, int idx,
184 mp->cpu = evlist->core.cpus->map[idx];
185 if (evlist->core.threads)
186 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
191 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
195 #define AUXTRACE_INIT_NR_QUEUES 32
197 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
199 struct auxtrace_queue *queue_array;
200 unsigned int max_nr_queues, i;
202 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
203 if (nr_queues > max_nr_queues)
206 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
210 for (i = 0; i < nr_queues; i++) {
211 INIT_LIST_HEAD(&queue_array[i].head);
212 queue_array[i].priv = NULL;
218 int auxtrace_queues__init(struct auxtrace_queues *queues)
220 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
221 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
222 if (!queues->queue_array)
227 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
228 unsigned int new_nr_queues)
230 unsigned int nr_queues = queues->nr_queues;
231 struct auxtrace_queue *queue_array;
235 nr_queues = AUXTRACE_INIT_NR_QUEUES;
237 while (nr_queues && nr_queues < new_nr_queues)
240 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
243 queue_array = auxtrace_alloc_queue_array(nr_queues);
247 for (i = 0; i < queues->nr_queues; i++) {
248 list_splice_tail(&queues->queue_array[i].head,
249 &queue_array[i].head);
250 queue_array[i].tid = queues->queue_array[i].tid;
251 queue_array[i].cpu = queues->queue_array[i].cpu;
252 queue_array[i].set = queues->queue_array[i].set;
253 queue_array[i].priv = queues->queue_array[i].priv;
256 queues->nr_queues = nr_queues;
257 queues->queue_array = queue_array;
262 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
264 int fd = perf_data__fd(session->data);
268 if (size > SSIZE_MAX)
275 ret = readn(fd, p, size);
276 if (ret != (ssize_t)size) {
284 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
286 struct auxtrace_buffer *buffer)
288 struct auxtrace_queue *queue;
291 if (idx >= queues->nr_queues) {
292 err = auxtrace_queues__grow(queues, idx + 1);
297 queue = &queues->queue_array[idx];
301 queue->tid = buffer->tid;
302 queue->cpu = buffer->cpu;
303 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
304 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
305 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
309 buffer->buffer_nr = queues->next_buffer_nr++;
311 list_add_tail(&buffer->list, &queue->head);
313 queues->new_data = true;
314 queues->populated = true;
319 /* Limit buffers to 32MiB on 32-bit */
320 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
322 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
324 struct auxtrace_buffer *buffer)
326 u64 sz = buffer->size;
327 bool consecutive = false;
328 struct auxtrace_buffer *b;
331 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
332 b = memdup(buffer, sizeof(struct auxtrace_buffer));
335 b->size = BUFFER_LIMIT_FOR_32_BIT;
336 b->consecutive = consecutive;
337 err = auxtrace_queues__queue_buffer(queues, idx, b);
339 auxtrace_buffer__free(b);
342 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
343 sz -= BUFFER_LIMIT_FOR_32_BIT;
348 buffer->consecutive = consecutive;
353 static bool filter_cpu(struct perf_session *session, int cpu)
355 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
357 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
360 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
361 struct perf_session *session,
363 struct auxtrace_buffer *buffer,
364 struct auxtrace_buffer **buffer_ptr)
368 if (filter_cpu(session, buffer->cpu))
371 buffer = memdup(buffer, sizeof(*buffer));
375 if (session->one_mmap) {
376 buffer->data = buffer->data_offset - session->one_mmap_offset +
377 session->one_mmap_addr;
378 } else if (perf_data__is_pipe(session->data)) {
379 buffer->data = auxtrace_copy_data(buffer->size, session);
382 buffer->data_needs_freeing = true;
383 } else if (BITS_PER_LONG == 32 &&
384 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
385 err = auxtrace_queues__split_buffer(queues, idx, buffer);
390 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
394 /* FIXME: Doesn't work for split buffer */
396 *buffer_ptr = buffer;
401 auxtrace_buffer__free(buffer);
405 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
406 struct perf_session *session,
407 union perf_event *event, off_t data_offset,
408 struct auxtrace_buffer **buffer_ptr)
410 struct auxtrace_buffer buffer = {
412 .tid = event->auxtrace.tid,
413 .cpu = event->auxtrace.cpu,
414 .data_offset = data_offset,
415 .offset = event->auxtrace.offset,
416 .reference = event->auxtrace.reference,
417 .size = event->auxtrace.size,
419 unsigned int idx = event->auxtrace.idx;
421 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
425 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
426 struct perf_session *session,
427 off_t file_offset, size_t sz)
429 union perf_event *event;
431 char buf[PERF_SAMPLE_MAX_SIZE];
433 err = perf_session__peek_event(session, file_offset, buf,
434 PERF_SAMPLE_MAX_SIZE, &event, NULL);
438 if (event->header.type == PERF_RECORD_AUXTRACE) {
439 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
440 event->header.size != sz) {
444 file_offset += event->header.size;
445 err = auxtrace_queues__add_event(queues, session, event,
452 void auxtrace_queues__free(struct auxtrace_queues *queues)
456 for (i = 0; i < queues->nr_queues; i++) {
457 while (!list_empty(&queues->queue_array[i].head)) {
458 struct auxtrace_buffer *buffer;
460 buffer = list_entry(queues->queue_array[i].head.next,
461 struct auxtrace_buffer, list);
462 list_del_init(&buffer->list);
463 auxtrace_buffer__free(buffer);
467 zfree(&queues->queue_array);
468 queues->nr_queues = 0;
471 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
472 unsigned int pos, unsigned int queue_nr,
478 parent = (pos - 1) >> 1;
479 if (heap_array[parent].ordinal <= ordinal)
481 heap_array[pos] = heap_array[parent];
484 heap_array[pos].queue_nr = queue_nr;
485 heap_array[pos].ordinal = ordinal;
488 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
491 struct auxtrace_heap_item *heap_array;
493 if (queue_nr >= heap->heap_sz) {
494 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
496 while (heap_sz <= queue_nr)
498 heap_array = realloc(heap->heap_array,
499 heap_sz * sizeof(struct auxtrace_heap_item));
502 heap->heap_array = heap_array;
503 heap->heap_sz = heap_sz;
506 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
511 void auxtrace_heap__free(struct auxtrace_heap *heap)
513 zfree(&heap->heap_array);
518 void auxtrace_heap__pop(struct auxtrace_heap *heap)
520 unsigned int pos, last, heap_cnt = heap->heap_cnt;
521 struct auxtrace_heap_item *heap_array;
528 heap_array = heap->heap_array;
532 unsigned int left, right;
534 left = (pos << 1) + 1;
535 if (left >= heap_cnt)
538 if (right >= heap_cnt) {
539 heap_array[pos] = heap_array[left];
542 if (heap_array[left].ordinal < heap_array[right].ordinal) {
543 heap_array[pos] = heap_array[left];
546 heap_array[pos] = heap_array[right];
552 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
553 heap_array[last].ordinal);
556 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
557 struct evlist *evlist)
560 return itr->info_priv_size(itr, evlist);
564 static int auxtrace_not_supported(void)
566 pr_err("AUX area tracing is not supported on this architecture\n");
570 int auxtrace_record__info_fill(struct auxtrace_record *itr,
571 struct perf_session *session,
572 struct perf_record_auxtrace_info *auxtrace_info,
576 return itr->info_fill(itr, session, auxtrace_info, priv_size);
577 return auxtrace_not_supported();
580 void auxtrace_record__free(struct auxtrace_record *itr)
586 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
588 if (itr && itr->snapshot_start)
589 return itr->snapshot_start(itr);
593 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
595 if (!on_exit && itr && itr->snapshot_finish)
596 return itr->snapshot_finish(itr);
600 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
601 struct auxtrace_mmap *mm,
602 unsigned char *data, u64 *head, u64 *old)
604 if (itr && itr->find_snapshot)
605 return itr->find_snapshot(itr, idx, mm, data, head, old);
609 int auxtrace_record__options(struct auxtrace_record *itr,
610 struct evlist *evlist,
611 struct record_opts *opts)
614 itr->evlist = evlist;
615 return itr->recording_options(itr, evlist, opts);
620 u64 auxtrace_record__reference(struct auxtrace_record *itr)
623 return itr->reference(itr);
627 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
628 struct record_opts *opts, const char *str)
633 /* PMU-agnostic options */
636 opts->auxtrace_snapshot_on_exit = true;
644 return itr->parse_snapshot_options(itr, opts, str);
646 pr_err("No AUX area tracing to snapshot\n");
650 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
654 if (!itr->evlist || !itr->pmu)
657 evlist__for_each_entry(itr->evlist, evsel) {
658 if (evsel->core.attr.type == itr->pmu->type) {
661 return perf_evlist__enable_event_idx(itr->evlist, evsel,
669 * Event record size is 16-bit which results in a maximum size of about 64KiB.
670 * Allow about 4KiB for the rest of the sample record, to give a maximum
671 * AUX area sample size of 60KiB.
673 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
675 /* Arbitrary default size if no other default provided */
676 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
678 static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
679 struct record_opts *opts)
682 bool has_aux_leader = false;
685 evlist__for_each_entry(evlist, evsel) {
686 sz = evsel->core.attr.aux_sample_size;
687 if (evsel__is_group_leader(evsel)) {
688 has_aux_leader = evsel__is_aux_event(evsel);
691 pr_err("Cannot add AUX area sampling to an AUX area event\n");
693 pr_err("Cannot add AUX area sampling to a group leader\n");
697 if (sz > MAX_AUX_SAMPLE_SIZE) {
698 pr_err("AUX area sample size %u too big, max. %d\n",
699 sz, MAX_AUX_SAMPLE_SIZE);
703 if (!has_aux_leader) {
704 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
707 evsel__set_sample_bit(evsel, AUX);
708 opts->auxtrace_sample_mode = true;
710 evsel__reset_sample_bit(evsel, AUX);
714 if (!opts->auxtrace_sample_mode) {
715 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
719 if (!perf_can_aux_sample()) {
720 pr_err("AUX area sampling is not supported by kernel\n");
727 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
728 struct evlist *evlist,
729 struct record_opts *opts, const char *str)
731 struct evsel_config_term *term;
732 struct evsel *aux_evsel;
733 bool has_aux_sample_size = false;
734 bool has_aux_leader = false;
743 pr_err("No AUX area event to sample\n");
747 sz = strtoul(str, &endptr, 0);
748 if (*endptr || sz > UINT_MAX) {
749 pr_err("Bad AUX area sampling option: '%s'\n", str);
754 sz = itr->default_aux_sample_size;
757 sz = DEFAULT_AUX_SAMPLE_SIZE;
759 /* Set aux_sample_size based on --aux-sample option */
760 evlist__for_each_entry(evlist, evsel) {
761 if (evsel__is_group_leader(evsel)) {
762 has_aux_leader = evsel__is_aux_event(evsel);
763 } else if (has_aux_leader) {
764 evsel->core.attr.aux_sample_size = sz;
769 /* Override with aux_sample_size from config term */
770 evlist__for_each_entry(evlist, evsel) {
771 if (evsel__is_aux_event(evsel))
773 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
775 has_aux_sample_size = true;
776 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
777 /* If possible, group with the AUX event */
778 if (aux_evsel && evsel->core.attr.aux_sample_size)
779 perf_evlist__regroup(evlist, aux_evsel, evsel);
783 if (!str && !has_aux_sample_size)
787 pr_err("No AUX area event to sample\n");
791 return auxtrace_validate_aux_sample_size(evlist, opts);
794 struct auxtrace_record *__weak
795 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
801 static int auxtrace_index__alloc(struct list_head *head)
803 struct auxtrace_index *auxtrace_index;
805 auxtrace_index = malloc(sizeof(struct auxtrace_index));
809 auxtrace_index->nr = 0;
810 INIT_LIST_HEAD(&auxtrace_index->list);
812 list_add_tail(&auxtrace_index->list, head);
817 void auxtrace_index__free(struct list_head *head)
819 struct auxtrace_index *auxtrace_index, *n;
821 list_for_each_entry_safe(auxtrace_index, n, head, list) {
822 list_del_init(&auxtrace_index->list);
823 free(auxtrace_index);
827 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
829 struct auxtrace_index *auxtrace_index;
832 if (list_empty(head)) {
833 err = auxtrace_index__alloc(head);
838 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
840 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
841 err = auxtrace_index__alloc(head);
844 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
848 return auxtrace_index;
851 int auxtrace_index__auxtrace_event(struct list_head *head,
852 union perf_event *event, off_t file_offset)
854 struct auxtrace_index *auxtrace_index;
857 auxtrace_index = auxtrace_index__last(head);
861 nr = auxtrace_index->nr;
862 auxtrace_index->entries[nr].file_offset = file_offset;
863 auxtrace_index->entries[nr].sz = event->header.size;
864 auxtrace_index->nr += 1;
869 static int auxtrace_index__do_write(int fd,
870 struct auxtrace_index *auxtrace_index)
872 struct auxtrace_index_entry ent;
875 for (i = 0; i < auxtrace_index->nr; i++) {
876 ent.file_offset = auxtrace_index->entries[i].file_offset;
877 ent.sz = auxtrace_index->entries[i].sz;
878 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
884 int auxtrace_index__write(int fd, struct list_head *head)
886 struct auxtrace_index *auxtrace_index;
890 list_for_each_entry(auxtrace_index, head, list)
891 total += auxtrace_index->nr;
893 if (writen(fd, &total, sizeof(total)) != sizeof(total))
896 list_for_each_entry(auxtrace_index, head, list) {
897 err = auxtrace_index__do_write(fd, auxtrace_index);
905 static int auxtrace_index__process_entry(int fd, struct list_head *head,
908 struct auxtrace_index *auxtrace_index;
909 struct auxtrace_index_entry ent;
912 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
915 auxtrace_index = auxtrace_index__last(head);
919 nr = auxtrace_index->nr;
921 auxtrace_index->entries[nr].file_offset =
922 bswap_64(ent.file_offset);
923 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
925 auxtrace_index->entries[nr].file_offset = ent.file_offset;
926 auxtrace_index->entries[nr].sz = ent.sz;
929 auxtrace_index->nr = nr + 1;
934 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
937 struct list_head *head = &session->auxtrace_index;
940 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
946 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
952 err = auxtrace_index__process_entry(fd, head, needs_swap);
960 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
961 struct perf_session *session,
962 struct auxtrace_index_entry *ent)
964 return auxtrace_queues__add_indexed_event(queues, session,
965 ent->file_offset, ent->sz);
968 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
969 struct perf_session *session)
971 struct auxtrace_index *auxtrace_index;
972 struct auxtrace_index_entry *ent;
976 if (auxtrace__dont_decode(session))
979 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
980 for (i = 0; i < auxtrace_index->nr; i++) {
981 ent = &auxtrace_index->entries[i];
982 err = auxtrace_queues__process_index_entry(queues,
992 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
993 struct auxtrace_buffer *buffer)
996 if (list_is_last(&buffer->list, &queue->head))
998 return list_entry(buffer->list.next, struct auxtrace_buffer,
1001 if (list_empty(&queue->head))
1003 return list_entry(queue->head.next, struct auxtrace_buffer,
1008 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1009 struct perf_sample *sample,
1010 struct perf_session *session)
1012 struct perf_sample_id *sid;
1020 sid = perf_evlist__id2sid(session->evlist, id);
1026 if (idx >= queues->nr_queues)
1029 return &queues->queue_array[idx];
1032 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1033 struct perf_session *session,
1034 struct perf_sample *sample, u64 data_offset,
1037 struct auxtrace_buffer buffer = {
1039 .data_offset = data_offset,
1040 .reference = reference,
1041 .size = sample->aux_sample.size,
1043 struct perf_sample_id *sid;
1044 u64 id = sample->id;
1050 sid = perf_evlist__id2sid(session->evlist, id);
1055 buffer.tid = sid->tid;
1056 buffer.cpu = sid->cpu;
1058 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1066 static int auxtrace_queue_data_cb(struct perf_session *session,
1067 union perf_event *event, u64 offset,
1070 struct queue_data *qd = data;
1071 struct perf_sample sample;
1074 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1075 if (event->header.size < sizeof(struct perf_record_auxtrace))
1077 offset += event->header.size;
1078 return session->auxtrace->queue_data(session, NULL, event,
1082 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1085 err = perf_evlist__parse_sample(session->evlist, event, &sample);
1089 if (!sample.aux_sample.size)
1092 offset += sample.aux_sample.data - (void *)event;
1094 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1097 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1099 struct queue_data qd = {
1104 if (auxtrace__dont_decode(session))
1107 if (!session->auxtrace || !session->auxtrace->queue_data)
1110 return perf_session__peek_events(session, session->header.data_offset,
1111 session->header.data_size,
1112 auxtrace_queue_data_cb, &qd);
1115 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
1117 size_t adj = buffer->data_offset & (page_size - 1);
1118 size_t size = buffer->size + adj;
1119 off_t file_offset = buffer->data_offset - adj;
1123 return buffer->data;
1125 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
1126 if (addr == MAP_FAILED)
1129 buffer->mmap_addr = addr;
1130 buffer->mmap_size = size;
1132 buffer->data = addr + adj;
1134 return buffer->data;
1137 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1139 if (!buffer->data || !buffer->mmap_addr)
1141 munmap(buffer->mmap_addr, buffer->mmap_size);
1142 buffer->mmap_addr = NULL;
1143 buffer->mmap_size = 0;
1144 buffer->data = NULL;
1145 buffer->use_data = NULL;
1148 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1150 auxtrace_buffer__put_data(buffer);
1151 if (buffer->data_needs_freeing) {
1152 buffer->data_needs_freeing = false;
1153 zfree(&buffer->data);
1154 buffer->use_data = NULL;
1159 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1161 auxtrace_buffer__drop_data(buffer);
1165 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1166 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1167 const char *msg, u64 timestamp)
1171 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1173 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1174 auxtrace_error->type = type;
1175 auxtrace_error->code = code;
1176 auxtrace_error->cpu = cpu;
1177 auxtrace_error->pid = pid;
1178 auxtrace_error->tid = tid;
1179 auxtrace_error->fmt = 1;
1180 auxtrace_error->ip = ip;
1181 auxtrace_error->time = timestamp;
1182 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1184 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1185 strlen(auxtrace_error->msg) + 1;
1186 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1189 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1190 struct perf_tool *tool,
1191 struct perf_session *session,
1192 perf_event__handler_t process)
1194 union perf_event *ev;
1198 pr_debug2("Synthesizing auxtrace information\n");
1199 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1200 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1204 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1205 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1207 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1212 err = process(tool, ev, NULL, NULL);
1218 static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1220 struct evsel *new_leader = NULL;
1221 struct evsel *evsel;
1223 /* Find new leader for the group */
1224 evlist__for_each_entry(evlist, evsel) {
1225 if (evsel->leader != leader || evsel == leader)
1229 evsel->leader = new_leader;
1232 /* Update group information */
1234 zfree(&new_leader->group_name);
1235 new_leader->group_name = leader->group_name;
1236 leader->group_name = NULL;
1238 new_leader->core.nr_members = leader->core.nr_members - 1;
1239 leader->core.nr_members = 1;
1243 static void unleader_auxtrace(struct perf_session *session)
1245 struct evsel *evsel;
1247 evlist__for_each_entry(session->evlist, evsel) {
1248 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1249 evsel__is_group_leader(evsel)) {
1250 unleader_evsel(session->evlist, evsel);
1255 int perf_event__process_auxtrace_info(struct perf_session *session,
1256 union perf_event *event)
1258 enum auxtrace_type type = event->auxtrace_info.type;
1262 fprintf(stdout, " type: %u\n", type);
1265 case PERF_AUXTRACE_INTEL_PT:
1266 err = intel_pt_process_auxtrace_info(event, session);
1268 case PERF_AUXTRACE_INTEL_BTS:
1269 err = intel_bts_process_auxtrace_info(event, session);
1271 case PERF_AUXTRACE_ARM_SPE:
1272 err = arm_spe_process_auxtrace_info(event, session);
1274 case PERF_AUXTRACE_CS_ETM:
1275 err = cs_etm__process_auxtrace_info(event, session);
1277 case PERF_AUXTRACE_S390_CPUMSF:
1278 err = s390_cpumsf_process_auxtrace_info(event, session);
1280 case PERF_AUXTRACE_UNKNOWN:
1288 unleader_auxtrace(session);
1293 s64 perf_event__process_auxtrace(struct perf_session *session,
1294 union perf_event *event)
1299 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1300 event->auxtrace.size, event->auxtrace.offset,
1301 event->auxtrace.reference, event->auxtrace.idx,
1302 event->auxtrace.tid, event->auxtrace.cpu);
1304 if (auxtrace__dont_decode(session))
1305 return event->auxtrace.size;
1307 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1310 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1314 return event->auxtrace.size;
1317 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1318 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1319 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1320 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1321 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1322 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1324 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1327 synth_opts->branches = true;
1328 synth_opts->transactions = true;
1329 synth_opts->ptwrites = true;
1330 synth_opts->pwr_events = true;
1331 synth_opts->other_events = true;
1332 synth_opts->errors = true;
1333 synth_opts->flc = true;
1334 synth_opts->llc = true;
1335 synth_opts->tlb = true;
1336 synth_opts->mem = true;
1337 synth_opts->remote_access = true;
1340 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1341 synth_opts->period = 1;
1342 synth_opts->calls = true;
1344 synth_opts->instructions = true;
1345 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1346 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1348 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1349 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1350 synth_opts->initial_skip = 0;
1353 static int get_flag(const char **ptr, unsigned int *flags)
1358 if (c >= 'a' && c <= 'z') {
1359 *flags |= 1 << (c - 'a');
1362 } else if (c == ' ') {
1371 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1377 if (get_flag(ptr, plus_flags))
1382 if (get_flag(ptr, minus_flags))
1395 * Please check tools/perf/Documentation/perf-script.txt for information
1396 * about the options parsed here, which is introduced after this cset,
1397 * when support in 'perf script' for these options is introduced.
1399 int itrace_parse_synth_opts(const struct option *opt, const char *str,
1402 struct itrace_synth_opts *synth_opts = opt->value;
1405 bool period_type_set = false;
1406 bool period_set = false;
1408 synth_opts->set = true;
1411 synth_opts->dont_decode = true;
1416 itrace_synth_opts__set_default(synth_opts,
1417 synth_opts->default_no_sample);
1421 for (p = str; *p;) {
1424 synth_opts->instructions = true;
1425 while (*p == ' ' || *p == ',')
1428 synth_opts->period = strtoull(p, &endptr, 10);
1431 while (*p == ' ' || *p == ',')
1435 synth_opts->period_type =
1436 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1437 period_type_set = true;
1440 synth_opts->period_type =
1441 PERF_ITRACE_PERIOD_TICKS;
1442 period_type_set = true;
1445 synth_opts->period *= 1000;
1448 synth_opts->period *= 1000;
1453 synth_opts->period_type =
1454 PERF_ITRACE_PERIOD_NANOSECS;
1455 period_type_set = true;
1465 synth_opts->branches = true;
1468 synth_opts->transactions = true;
1471 synth_opts->ptwrites = true;
1474 synth_opts->pwr_events = true;
1477 synth_opts->other_events = true;
1480 synth_opts->errors = true;
1481 if (get_flags(&p, &synth_opts->error_plus_flags,
1482 &synth_opts->error_minus_flags))
1486 synth_opts->log = true;
1487 if (get_flags(&p, &synth_opts->log_plus_flags,
1488 &synth_opts->log_minus_flags))
1492 synth_opts->branches = true;
1493 synth_opts->calls = true;
1496 synth_opts->branches = true;
1497 synth_opts->returns = true;
1502 synth_opts->add_callchain = true;
1504 synth_opts->callchain = true;
1505 synth_opts->callchain_sz =
1506 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1507 while (*p == ' ' || *p == ',')
1512 val = strtoul(p, &endptr, 10);
1514 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1516 synth_opts->callchain_sz = val;
1522 synth_opts->add_last_branch = true;
1524 synth_opts->last_branch = true;
1525 synth_opts->last_branch_sz =
1526 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1527 while (*p == ' ' || *p == ',')
1532 val = strtoul(p, &endptr, 10);
1535 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1537 synth_opts->last_branch_sz = val;
1541 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1547 synth_opts->flc = true;
1550 synth_opts->llc = true;
1553 synth_opts->tlb = true;
1556 synth_opts->remote_access = true;
1559 synth_opts->mem = true;
1562 synth_opts->quick += 1;
1572 if (synth_opts->instructions) {
1573 if (!period_type_set)
1574 synth_opts->period_type =
1575 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1577 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1583 pr_err("Bad Instruction Tracing options '%s'\n", str);
1587 static const char * const auxtrace_error_type_name[] = {
1588 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1591 static const char *auxtrace_error_name(int type)
1593 const char *error_type_name = NULL;
1595 if (type < PERF_AUXTRACE_ERROR_MAX)
1596 error_type_name = auxtrace_error_type_name[type];
1597 if (!error_type_name)
1598 error_type_name = "unknown AUX";
1599 return error_type_name;
1602 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1604 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1605 unsigned long long nsecs = e->time;
1606 const char *msg = e->msg;
1609 ret = fprintf(fp, " %s error type %u",
1610 auxtrace_error_name(e->type), e->type);
1612 if (e->fmt && nsecs) {
1613 unsigned long secs = nsecs / NSEC_PER_SEC;
1615 nsecs -= secs * NSEC_PER_SEC;
1616 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1618 ret += fprintf(fp, " time 0");
1622 msg = (const char *)&e->time;
1624 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1625 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1629 void perf_session__auxtrace_error_inc(struct perf_session *session,
1630 union perf_event *event)
1632 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1634 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1635 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1638 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1642 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1643 if (!stats->nr_auxtrace_errors[i])
1645 ui__warning("%u %s errors\n",
1646 stats->nr_auxtrace_errors[i],
1647 auxtrace_error_name(i));
1651 int perf_event__process_auxtrace_error(struct perf_session *session,
1652 union perf_event *event)
1654 if (auxtrace__dont_decode(session))
1657 perf_event__fprintf_auxtrace_error(event, stdout);
1661 static int __auxtrace_mmap__read(struct mmap *map,
1662 struct auxtrace_record *itr,
1663 struct perf_tool *tool, process_auxtrace_t fn,
1664 bool snapshot, size_t snapshot_size)
1666 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1667 u64 head, old = mm->prev, offset, ref;
1668 unsigned char *data = mm->base;
1669 size_t size, head_off, old_off, len1, len2, padding;
1670 union perf_event ev;
1671 void *data1, *data2;
1674 head = auxtrace_mmap__read_snapshot_head(mm);
1675 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1679 head = auxtrace_mmap__read_head(mm);
1685 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1686 mm->idx, old, head, head - old);
1689 head_off = head & mm->mask;
1690 old_off = old & mm->mask;
1692 head_off = head % mm->len;
1693 old_off = old % mm->len;
1696 if (head_off > old_off)
1697 size = head_off - old_off;
1699 size = mm->len - (old_off - head_off);
1701 if (snapshot && size > snapshot_size)
1702 size = snapshot_size;
1704 ref = auxtrace_record__reference(itr);
1706 if (head > old || size <= head || mm->mask) {
1707 offset = head - size;
1710 * When the buffer size is not a power of 2, 'head' wraps at the
1711 * highest multiple of the buffer size, so we have to subtract
1712 * the remainder here.
1714 u64 rem = (0ULL - mm->len) % mm->len;
1716 offset = head - size - rem;
1719 if (size > head_off) {
1720 len1 = size - head_off;
1721 data1 = &data[mm->len - len1];
1726 data1 = &data[head_off - len1];
1731 if (itr->alignment) {
1732 unsigned int unwanted = len1 % itr->alignment;
1738 /* padding must be written by fn() e.g. record__process_auxtrace() */
1739 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1741 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1743 memset(&ev, 0, sizeof(ev));
1744 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1745 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1746 ev.auxtrace.size = size + padding;
1747 ev.auxtrace.offset = offset;
1748 ev.auxtrace.reference = ref;
1749 ev.auxtrace.idx = mm->idx;
1750 ev.auxtrace.tid = mm->tid;
1751 ev.auxtrace.cpu = mm->cpu;
1753 if (fn(tool, map, &ev, data1, len1, data2, len2))
1759 auxtrace_mmap__write_tail(mm, head);
1760 if (itr->read_finish) {
1763 err = itr->read_finish(itr, mm->idx);
1772 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1773 struct perf_tool *tool, process_auxtrace_t fn)
1775 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1778 int auxtrace_mmap__read_snapshot(struct mmap *map,
1779 struct auxtrace_record *itr,
1780 struct perf_tool *tool, process_auxtrace_t fn,
1781 size_t snapshot_size)
1783 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1787 * struct auxtrace_cache - hash table to implement a cache
1788 * @hashtable: the hashtable
1789 * @sz: hashtable size (number of hlists)
1790 * @entry_size: size of an entry
1791 * @limit: limit the number of entries to this maximum, when reached the cache
1792 * is dropped and caching begins again with an empty cache
1793 * @cnt: current number of entries
1794 * @bits: hashtable size (@sz = 2^@bits)
1796 struct auxtrace_cache {
1797 struct hlist_head *hashtable;
1805 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1806 unsigned int limit_percent)
1808 struct auxtrace_cache *c;
1809 struct hlist_head *ht;
1812 c = zalloc(sizeof(struct auxtrace_cache));
1818 ht = calloc(sz, sizeof(struct hlist_head));
1822 for (i = 0; i < sz; i++)
1823 INIT_HLIST_HEAD(&ht[i]);
1827 c->entry_size = entry_size;
1828 c->limit = (c->sz * limit_percent) / 100;
1838 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1840 struct auxtrace_cache_entry *entry;
1841 struct hlist_node *tmp;
1847 for (i = 0; i < c->sz; i++) {
1848 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1849 hlist_del(&entry->hash);
1850 auxtrace_cache__free_entry(c, entry);
1857 void auxtrace_cache__free(struct auxtrace_cache *c)
1862 auxtrace_cache__drop(c);
1863 zfree(&c->hashtable);
1867 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1869 return malloc(c->entry_size);
1872 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1878 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1879 struct auxtrace_cache_entry *entry)
1881 if (c->limit && ++c->cnt > c->limit)
1882 auxtrace_cache__drop(c);
1885 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1890 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
1893 struct auxtrace_cache_entry *entry;
1894 struct hlist_head *hlist;
1895 struct hlist_node *n;
1900 hlist = &c->hashtable[hash_32(key, c->bits)];
1901 hlist_for_each_entry_safe(entry, n, hlist, hash) {
1902 if (entry->key == key) {
1903 hlist_del(&entry->hash);
1911 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
1913 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
1915 auxtrace_cache__free_entry(c, entry);
1918 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1920 struct auxtrace_cache_entry *entry;
1921 struct hlist_head *hlist;
1926 hlist = &c->hashtable[hash_32(key, c->bits)];
1927 hlist_for_each_entry(entry, hlist, hash) {
1928 if (entry->key == key)
1935 static void addr_filter__free_str(struct addr_filter *filt)
1938 filt->action = NULL;
1939 filt->sym_from = NULL;
1940 filt->sym_to = NULL;
1941 filt->filename = NULL;
1944 static struct addr_filter *addr_filter__new(void)
1946 struct addr_filter *filt = zalloc(sizeof(*filt));
1949 INIT_LIST_HEAD(&filt->list);
1954 static void addr_filter__free(struct addr_filter *filt)
1957 addr_filter__free_str(filt);
1961 static void addr_filters__add(struct addr_filters *filts,
1962 struct addr_filter *filt)
1964 list_add_tail(&filt->list, &filts->head);
1968 static void addr_filters__del(struct addr_filters *filts,
1969 struct addr_filter *filt)
1971 list_del_init(&filt->list);
1975 void addr_filters__init(struct addr_filters *filts)
1977 INIT_LIST_HEAD(&filts->head);
1981 void addr_filters__exit(struct addr_filters *filts)
1983 struct addr_filter *filt, *n;
1985 list_for_each_entry_safe(filt, n, &filts->head, list) {
1986 addr_filters__del(filts, filt);
1987 addr_filter__free(filt);
1991 static int parse_num_or_str(char **inp, u64 *num, const char **str,
1992 const char *str_delim)
1994 *inp += strspn(*inp, " ");
1996 if (isdigit(**inp)) {
2002 *num = strtoull(*inp, &endptr, 0);
2013 *inp += strspn(*inp, " ");
2015 n = strcspn(*inp, str_delim);
2027 static int parse_action(struct addr_filter *filt)
2029 if (!strcmp(filt->action, "filter")) {
2032 } else if (!strcmp(filt->action, "start")) {
2034 } else if (!strcmp(filt->action, "stop")) {
2035 filt->start = false;
2036 } else if (!strcmp(filt->action, "tracestop")) {
2037 filt->start = false;
2039 filt->action += 5; /* Change 'tracestop' to 'stop' */
2046 static int parse_sym_idx(char **inp, int *idx)
2050 *inp += strspn(*inp, " ");
2057 if (**inp == 'g' || **inp == 'G') {
2065 num = strtoul(*inp, &endptr, 0);
2068 if (endptr == *inp || num > INT_MAX)
2077 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2079 int err = parse_num_or_str(inp, num, str, " ");
2082 err = parse_sym_idx(inp, idx);
2087 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2092 filt->str = fstr = strdup(*filter_inp);
2096 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2100 err = parse_action(filt);
2104 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2105 &filt->sym_from_idx);
2109 fstr += strspn(fstr, " ");
2113 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2120 fstr += strspn(fstr, " ");
2124 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2129 fstr += strspn(fstr, " ,");
2131 *filter_inp += fstr - filt->str;
2136 addr_filter__free_str(filt);
2141 int addr_filters__parse_bare_filter(struct addr_filters *filts,
2144 struct addr_filter *filt;
2145 const char *fstr = filter;
2149 filt = addr_filter__new();
2150 err = parse_one_filter(filt, &fstr);
2152 addr_filter__free(filt);
2153 addr_filters__exit(filts);
2156 addr_filters__add(filts, filt);
2175 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2177 /* A function with the same name, and global or the n'th found or any */
2178 return kallsyms__is_function(type) &&
2179 !strcmp(name, args->name) &&
2180 ((args->global && isupper(type)) ||
2181 (args->selected && ++(args->cnt) == args->idx) ||
2182 (!args->global && !args->selected));
2185 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2187 struct sym_args *args = arg;
2189 if (args->started) {
2191 args->size = start - args->start;
2192 if (args->selected) {
2195 } else if (kern_sym_match(args, name, type)) {
2196 args->duplicate = true;
2199 } else if (kern_sym_match(args, name, type)) {
2200 args->started = true;
2201 args->start = start;
2207 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2209 struct sym_args *args = arg;
2211 if (kern_sym_match(args, name, type)) {
2212 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2213 ++args->cnt, start, type, name);
2215 } else if (args->near) {
2217 pr_err("\t\twhich is near\t\t%s\n", name);
2223 static int sym_not_found_error(const char *sym_name, int idx)
2226 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2229 pr_err("Global symbol '%s' not found.\n", sym_name);
2231 pr_err("Symbol '%s' not found.\n", sym_name);
2233 pr_err("Note that symbols must be functions.\n");
2238 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2240 struct sym_args args = {
2244 .selected = idx > 0,
2251 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2253 pr_err("Failed to parse /proc/kallsyms\n");
2257 if (args.duplicate) {
2258 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2260 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2261 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2263 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2267 if (!args.started) {
2268 pr_err("Kernel symbol lookup: ");
2269 return sym_not_found_error(sym_name, idx);
2272 *start = args.start;
2278 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2279 char type, u64 start)
2281 struct sym_args *args = arg;
2283 if (!kallsyms__is_function(type))
2286 if (!args->started) {
2287 args->started = true;
2288 args->start = start;
2290 /* Don't know exactly where the kernel ends, so we add a page */
2291 args->size = round_up(start, page_size) + page_size - args->start;
2296 static int addr_filter__entire_kernel(struct addr_filter *filt)
2298 struct sym_args args = { .started = false };
2301 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2302 if (err < 0 || !args.started) {
2303 pr_err("Failed to parse /proc/kallsyms\n");
2307 filt->addr = args.start;
2308 filt->size = args.size;
2313 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2315 if (start + size >= filt->addr)
2318 if (filt->sym_from) {
2319 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2320 filt->sym_to, start, filt->sym_from, filt->addr);
2322 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2323 filt->sym_to, start, filt->addr);
2329 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2331 bool no_size = false;
2335 if (symbol_conf.kptr_restrict) {
2336 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2340 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2341 return addr_filter__entire_kernel(filt);
2343 if (filt->sym_from) {
2344 err = find_kern_sym(filt->sym_from, &start, &size,
2345 filt->sym_from_idx);
2349 if (filt->range && !filt->size && !filt->sym_to) {
2356 err = find_kern_sym(filt->sym_to, &start, &size,
2361 err = check_end_after_start(filt, start, size);
2364 filt->size = start + size - filt->addr;
2368 /* The very last symbol in kallsyms does not imply a particular size */
2370 pr_err("Cannot determine size of symbol '%s'\n",
2371 filt->sym_to ? filt->sym_to : filt->sym_from);
2378 static struct dso *load_dso(const char *name)
2383 map = dso__new_map(name);
2387 if (map__load(map) < 0)
2388 pr_err("File '%s' not found or has no symbols.\n", name);
2390 dso = dso__get(map->dso);
2397 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2400 /* Same name, and global or the n'th found or any */
2401 return !arch__compare_symbol_names(name, sym->name) &&
2402 ((!idx && sym->binding == STB_GLOBAL) ||
2403 (idx > 0 && ++*cnt == idx) ||
2407 static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2413 pr_err("Multiple symbols with name '%s'\n", sym_name);
2415 sym = dso__first_symbol(dso);
2417 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2418 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2420 sym->binding == STB_GLOBAL ? 'g' :
2421 sym->binding == STB_LOCAL ? 'l' : 'w',
2426 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2428 sym = dso__next_symbol(sym);
2431 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2433 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2436 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2445 sym = dso__first_symbol(dso);
2449 *size = sym->start - *start;
2453 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2454 print_duplicate_syms(dso, sym_name);
2457 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2458 *start = sym->start;
2459 *size = sym->end - sym->start;
2461 sym = dso__next_symbol(sym);
2465 return sym_not_found_error(sym_name, idx);
2470 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2472 if (dso__data_file_size(dso, NULL)) {
2473 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2479 filt->size = dso->data.file_size;
2484 static int addr_filter__resolve_syms(struct addr_filter *filt)
2490 if (!filt->sym_from && !filt->sym_to)
2493 if (!filt->filename)
2494 return addr_filter__resolve_kernel_syms(filt);
2496 dso = load_dso(filt->filename);
2498 pr_err("Failed to load symbols from: %s\n", filt->filename);
2502 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2503 err = addr_filter__entire_dso(filt, dso);
2507 if (filt->sym_from) {
2508 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2509 filt->sym_from_idx);
2513 if (filt->range && !filt->size && !filt->sym_to)
2518 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2523 err = check_end_after_start(filt, start, size);
2527 filt->size = start + size - filt->addr;
2536 static char *addr_filter__to_str(struct addr_filter *filt)
2538 char filename_buf[PATH_MAX];
2539 const char *at = "";
2540 const char *fn = "";
2544 if (filt->filename) {
2546 fn = realpath(filt->filename, filename_buf);
2552 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2553 filt->action, filt->addr, filt->size, at, fn);
2555 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2556 filt->action, filt->addr, at, fn);
2559 return err < 0 ? NULL : filter;
2562 static int parse_addr_filter(struct evsel *evsel, const char *filter,
2565 struct addr_filters filts;
2566 struct addr_filter *filt;
2569 addr_filters__init(&filts);
2571 err = addr_filters__parse_bare_filter(&filts, filter);
2575 if (filts.cnt > max_nr) {
2576 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2582 list_for_each_entry(filt, &filts.head, list) {
2585 err = addr_filter__resolve_syms(filt);
2589 new_filter = addr_filter__to_str(filt);
2595 if (evsel__append_addr_filter(evsel, new_filter)) {
2602 addr_filters__exit(&filts);
2605 pr_err("Failed to parse address filter: '%s'\n", filter);
2606 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2607 pr_err("Where multiple filters are separated by space or comma.\n");
2613 static int evsel__nr_addr_filter(struct evsel *evsel)
2615 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2616 int nr_addr_filters = 0;
2621 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2623 return nr_addr_filters;
2626 int auxtrace_parse_filters(struct evlist *evlist)
2628 struct evsel *evsel;
2632 evlist__for_each_entry(evlist, evsel) {
2633 filter = evsel->filter;
2634 max_nr = evsel__nr_addr_filter(evsel);
2635 if (!filter || !max_nr)
2637 evsel->filter = NULL;
2638 err = parse_addr_filter(evsel, filter, max_nr);
2642 pr_debug("Address filter: %s\n", evsel->filter);
2648 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2649 struct perf_sample *sample, struct perf_tool *tool)
2651 if (!session->auxtrace)
2654 return session->auxtrace->process_event(session, event, sample, tool);
2657 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2658 struct perf_sample *sample)
2660 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2661 auxtrace__dont_decode(session))
2664 session->auxtrace->dump_auxtrace_sample(session, sample);
2667 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2669 if (!session->auxtrace)
2672 return session->auxtrace->flush_events(session, tool);
2675 void auxtrace__free_events(struct perf_session *session)
2677 if (!session->auxtrace)
2680 return session->auxtrace->free_events(session);
2683 void auxtrace__free(struct perf_session *session)
2685 if (!session->auxtrace)
2688 return session->auxtrace->free(session);
2691 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2692 struct evsel *evsel)
2694 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2697 return session->auxtrace->evsel_is_auxtrace(session, evsel);