2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include "thread_map.h"
19 #include "parse-events.h"
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
37 perf_evlist__set_maps(evlist, cpus, threads);
38 evlist->workload.pid = -1;
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 struct thread_map *threads)
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
47 perf_evlist__init(evlist, cpus, threads);
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 struct perf_record_opts *opts)
55 struct perf_evsel *evsel, *first;
57 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true;
60 first = list_entry(evlist->entries.next, struct perf_evsel, node);
62 list_for_each_entry(evsel, &evlist->entries, node) {
63 perf_evsel__config(evsel, opts, first);
65 if (evlist->nr_entries > 1)
66 evsel->attr.sample_type |= PERF_SAMPLE_ID;
70 static void perf_evlist__purge(struct perf_evlist *evlist)
72 struct perf_evsel *pos, *n;
74 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75 list_del_init(&pos->node);
76 perf_evsel__delete(pos);
79 evlist->nr_entries = 0;
82 void perf_evlist__exit(struct perf_evlist *evlist)
87 evlist->pollfd = NULL;
90 void perf_evlist__delete(struct perf_evlist *evlist)
92 perf_evlist__purge(evlist);
93 perf_evlist__exit(evlist);
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
99 list_add_tail(&entry->node, &evlist->entries);
100 ++evlist->nr_entries;
103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104 struct list_head *list,
107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
111 int perf_evlist__add_default(struct perf_evlist *evlist)
113 struct perf_event_attr attr = {
114 .type = PERF_TYPE_HARDWARE,
115 .config = PERF_COUNT_HW_CPU_CYCLES,
117 struct perf_evsel *evsel;
119 event_attr_init(&attr);
121 evsel = perf_evsel__new(&attr, 0);
125 /* use strdup() because free(evsel) assumes name is allocated */
126 evsel->name = strdup("cycles");
130 perf_evlist__add(evlist, evsel);
133 perf_evsel__delete(evsel);
138 int perf_evlist__add_attrs(struct perf_evlist *evlist,
139 struct perf_event_attr *attrs, size_t nr_attrs)
141 struct perf_evsel *evsel, *n;
145 for (i = 0; i < nr_attrs; i++) {
146 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
148 goto out_delete_partial_list;
149 list_add_tail(&evsel->node, &head);
152 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
156 out_delete_partial_list:
157 list_for_each_entry_safe(evsel, n, &head, node)
158 perf_evsel__delete(evsel);
162 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
167 for (i = 0; i < nr_attrs; i++)
168 event_attr_init(attrs + i);
170 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
173 static int trace_event__id(const char *evname)
175 char *filename, *colon;
178 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
181 colon = strrchr(filename, ':');
185 fd = open(filename, O_RDONLY);
188 if (read(fd, id, sizeof(id)) > 0)
197 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
198 const char *tracepoints[],
199 size_t nr_tracepoints)
203 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
208 for (i = 0; i < nr_tracepoints; i++) {
209 err = trace_event__id(tracepoints[i]);
214 attrs[i].type = PERF_TYPE_TRACEPOINT;
215 attrs[i].config = err;
216 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
218 attrs[i].sample_period = 1;
221 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
227 static struct perf_evsel *
228 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
230 struct perf_evsel *evsel;
232 list_for_each_entry(evsel, &evlist->entries, node) {
233 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
234 (int)evsel->attr.config == id)
241 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
242 const struct perf_evsel_str_handler *assocs,
245 struct perf_evsel *evsel;
249 for (i = 0; i < nr_assocs; i++) {
250 err = trace_event__id(assocs[i].name);
254 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
259 if (evsel->handler.func != NULL)
261 evsel->handler.func = assocs[i].handler;
269 void perf_evlist__disable(struct perf_evlist *evlist)
272 struct perf_evsel *pos;
274 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
275 list_for_each_entry(pos, &evlist->entries, node) {
276 for (thread = 0; thread < evlist->threads->nr; thread++)
277 ioctl(FD(pos, cpu, thread),
278 PERF_EVENT_IOC_DISABLE, 0);
283 void perf_evlist__enable(struct perf_evlist *evlist)
286 struct perf_evsel *pos;
288 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
289 list_for_each_entry(pos, &evlist->entries, node) {
290 for (thread = 0; thread < evlist->threads->nr; thread++)
291 ioctl(FD(pos, cpu, thread),
292 PERF_EVENT_IOC_ENABLE, 0);
297 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
299 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
300 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
301 return evlist->pollfd != NULL ? 0 : -ENOMEM;
304 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
306 fcntl(fd, F_SETFL, O_NONBLOCK);
307 evlist->pollfd[evlist->nr_fds].fd = fd;
308 evlist->pollfd[evlist->nr_fds].events = POLLIN;
312 static void perf_evlist__id_hash(struct perf_evlist *evlist,
313 struct perf_evsel *evsel,
314 int cpu, int thread, u64 id)
317 struct perf_sample_id *sid = SID(evsel, cpu, thread);
321 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
322 hlist_add_head(&sid->node, &evlist->heads[hash]);
325 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
326 int cpu, int thread, u64 id)
328 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
329 evsel->id[evsel->ids++] = id;
332 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
333 struct perf_evsel *evsel,
334 int cpu, int thread, int fd)
336 u64 read_data[4] = { 0, };
337 int id_idx = 1; /* The first entry is the counter value */
339 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
340 read(fd, &read_data, sizeof(read_data)) == -1)
343 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
345 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
348 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
352 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
354 struct hlist_head *head;
355 struct hlist_node *pos;
356 struct perf_sample_id *sid;
359 if (evlist->nr_entries == 1)
360 return list_entry(evlist->entries.next, struct perf_evsel, node);
362 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
363 head = &evlist->heads[hash];
365 hlist_for_each_entry(sid, pos, head, node)
369 if (!perf_evlist__sample_id_all(evlist))
370 return list_entry(evlist->entries.next, struct perf_evsel, node);
375 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
377 /* XXX Move this to perf.c, making it generally available */
378 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
379 struct perf_mmap *md = &evlist->mmap[idx];
380 unsigned int head = perf_mmap__read_head(md);
381 unsigned int old = md->prev;
382 unsigned char *data = md->base + page_size;
383 union perf_event *event = NULL;
385 if (evlist->overwrite) {
387 * If we're further behind than half the buffer, there's a chance
388 * the writer will bite our tail and mess up the samples under us.
390 * If we somehow ended up ahead of the head, we got messed up.
392 * In either case, truncate and restart at head.
394 int diff = head - old;
395 if (diff > md->mask / 2 || diff < 0) {
396 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
399 * head points to a known good entry, start there.
408 event = (union perf_event *)&data[old & md->mask];
409 size = event->header.size;
412 * Event straddles the mmap boundary -- header should always
413 * be inside due to u64 alignment of output.
415 if ((old & md->mask) + size != ((old + size) & md->mask)) {
416 unsigned int offset = old;
417 unsigned int len = min(sizeof(*event), size), cpy;
418 void *dst = &evlist->event_copy;
421 cpy = min(md->mask + 1 - (offset & md->mask), len);
422 memcpy(dst, &data[offset & md->mask], cpy);
428 event = &evlist->event_copy;
436 if (!evlist->overwrite)
437 perf_mmap__write_tail(md, old);
442 void perf_evlist__munmap(struct perf_evlist *evlist)
446 for (i = 0; i < evlist->nr_mmaps; i++) {
447 if (evlist->mmap[i].base != NULL) {
448 munmap(evlist->mmap[i].base, evlist->mmap_len);
449 evlist->mmap[i].base = NULL;
457 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
459 evlist->nr_mmaps = evlist->cpus->nr;
460 if (evlist->cpus->map[0] == -1)
461 evlist->nr_mmaps = evlist->threads->nr;
462 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
463 return evlist->mmap != NULL ? 0 : -ENOMEM;
466 static int __perf_evlist__mmap(struct perf_evlist *evlist,
467 int idx, int prot, int mask, int fd)
469 evlist->mmap[idx].prev = 0;
470 evlist->mmap[idx].mask = mask;
471 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
473 if (evlist->mmap[idx].base == MAP_FAILED) {
474 evlist->mmap[idx].base = NULL;
478 perf_evlist__add_pollfd(evlist, fd);
482 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
484 struct perf_evsel *evsel;
487 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
490 for (thread = 0; thread < evlist->threads->nr; thread++) {
491 list_for_each_entry(evsel, &evlist->entries, node) {
492 int fd = FD(evsel, cpu, thread);
496 if (__perf_evlist__mmap(evlist, cpu,
497 prot, mask, output) < 0)
500 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
504 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
505 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
514 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
515 if (evlist->mmap[cpu].base != NULL) {
516 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
517 evlist->mmap[cpu].base = NULL;
523 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
525 struct perf_evsel *evsel;
528 for (thread = 0; thread < evlist->threads->nr; thread++) {
531 list_for_each_entry(evsel, &evlist->entries, node) {
532 int fd = FD(evsel, 0, thread);
536 if (__perf_evlist__mmap(evlist, thread,
537 prot, mask, output) < 0)
540 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
544 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
545 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
553 for (thread = 0; thread < evlist->threads->nr; thread++) {
554 if (evlist->mmap[thread].base != NULL) {
555 munmap(evlist->mmap[thread].base, evlist->mmap_len);
556 evlist->mmap[thread].base = NULL;
562 /** perf_evlist__mmap - Create per cpu maps to receive events
564 * @evlist - list of events
565 * @pages - map length in pages
566 * @overwrite - overwrite older events?
568 * If overwrite is false the user needs to signal event consuption using:
570 * struct perf_mmap *m = &evlist->mmap[cpu];
571 * unsigned int head = perf_mmap__read_head(m);
573 * perf_mmap__write_tail(m, head)
575 * Using perf_evlist__read_on_cpu does this automatically.
577 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
580 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
581 struct perf_evsel *evsel;
582 const struct cpu_map *cpus = evlist->cpus;
583 const struct thread_map *threads = evlist->threads;
584 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
586 /* 512 kiB: default amount of unprivileged mlocked memory */
587 if (pages == UINT_MAX)
588 pages = (512 * 1024) / page_size;
589 else if (!is_power_of_2(pages))
592 mask = pages * page_size - 1;
594 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
597 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
600 evlist->overwrite = overwrite;
601 evlist->mmap_len = (pages + 1) * page_size;
603 list_for_each_entry(evsel, &evlist->entries, node) {
604 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
605 evsel->sample_id == NULL &&
606 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
610 if (evlist->cpus->map[0] == -1)
611 return perf_evlist__mmap_per_thread(evlist, prot, mask);
613 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
616 int perf_evlist__create_maps(struct perf_evlist *evlist,
617 struct perf_target *target)
619 evlist->threads = thread_map__new_str(target->pid, target->tid,
622 if (evlist->threads == NULL)
625 if (perf_target__has_task(target))
626 evlist->cpus = cpu_map__dummy_new();
627 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
628 evlist->cpus = cpu_map__dummy_new();
630 evlist->cpus = cpu_map__new(target->cpu_list);
632 if (evlist->cpus == NULL)
633 goto out_delete_threads;
638 thread_map__delete(evlist->threads);
642 void perf_evlist__delete_maps(struct perf_evlist *evlist)
644 cpu_map__delete(evlist->cpus);
645 thread_map__delete(evlist->threads);
647 evlist->threads = NULL;
650 int perf_evlist__set_filters(struct perf_evlist *evlist)
652 const struct thread_map *threads = evlist->threads;
653 const struct cpu_map *cpus = evlist->cpus;
654 struct perf_evsel *evsel;
661 list_for_each_entry(evsel, &evlist->entries, node) {
662 filter = evsel->filter;
665 for (cpu = 0; cpu < cpus->nr; cpu++) {
666 for (thread = 0; thread < threads->nr; thread++) {
667 fd = FD(evsel, cpu, thread);
668 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
678 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
680 struct perf_evsel *pos, *first;
682 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
684 list_for_each_entry_continue(pos, &evlist->entries, node) {
685 if (first->attr.sample_type != pos->attr.sample_type)
692 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
694 struct perf_evsel *first;
696 first = list_entry(evlist->entries.next, struct perf_evsel, node);
697 return first->attr.sample_type;
700 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
702 struct perf_evsel *first;
703 struct perf_sample *data;
707 first = list_entry(evlist->entries.next, struct perf_evsel, node);
709 if (!first->attr.sample_id_all)
712 sample_type = first->attr.sample_type;
714 if (sample_type & PERF_SAMPLE_TID)
715 size += sizeof(data->tid) * 2;
717 if (sample_type & PERF_SAMPLE_TIME)
718 size += sizeof(data->time);
720 if (sample_type & PERF_SAMPLE_ID)
721 size += sizeof(data->id);
723 if (sample_type & PERF_SAMPLE_STREAM_ID)
724 size += sizeof(data->stream_id);
726 if (sample_type & PERF_SAMPLE_CPU)
727 size += sizeof(data->cpu) * 2;
732 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
734 struct perf_evsel *pos, *first;
736 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
738 list_for_each_entry_continue(pos, &evlist->entries, node) {
739 if (first->attr.sample_id_all != pos->attr.sample_id_all)
746 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
748 struct perf_evsel *first;
750 first = list_entry(evlist->entries.next, struct perf_evsel, node);
751 return first->attr.sample_id_all;
754 void perf_evlist__set_selected(struct perf_evlist *evlist,
755 struct perf_evsel *evsel)
757 evlist->selected = evsel;
760 int perf_evlist__open(struct perf_evlist *evlist, bool group)
762 struct perf_evsel *evsel, *first;
763 int err, ncpus, nthreads;
765 first = list_entry(evlist->entries.next, struct perf_evsel, node);
767 list_for_each_entry(evsel, &evlist->entries, node) {
768 struct xyarray *group_fd = NULL;
770 if (group && evsel != first)
771 group_fd = first->fd;
773 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
781 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
782 nthreads = evlist->threads ? evlist->threads->nr : 1;
784 list_for_each_entry_reverse(evsel, &evlist->entries, node)
785 perf_evsel__close(evsel, ncpus, nthreads);
791 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
792 struct perf_record_opts *opts,
795 int child_ready_pipe[2], go_pipe[2];
798 if (pipe(child_ready_pipe) < 0) {
799 perror("failed to create 'ready' pipe");
803 if (pipe(go_pipe) < 0) {
804 perror("failed to create 'go' pipe");
805 goto out_close_ready_pipe;
808 evlist->workload.pid = fork();
809 if (evlist->workload.pid < 0) {
810 perror("failed to fork");
811 goto out_close_pipes;
814 if (!evlist->workload.pid) {
815 if (opts->pipe_output)
818 close(child_ready_pipe[0]);
820 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
823 * Do a dummy execvp to get the PLT entry resolved,
824 * so we avoid the resolver overhead on the real
827 execvp("", (char **)argv);
830 * Tell the parent we're ready to go
832 close(child_ready_pipe[1]);
835 * Wait until the parent tells us to go.
837 if (read(go_pipe[0], &bf, 1) == -1)
838 perror("unable to read pipe");
840 execvp(argv[0], (char **)argv);
843 kill(getppid(), SIGUSR1);
847 if (perf_target__none(&opts->target))
848 evlist->threads->map[0] = evlist->workload.pid;
850 close(child_ready_pipe[1]);
853 * wait for child to settle
855 if (read(child_ready_pipe[0], &bf, 1) == -1) {
856 perror("unable to read pipe");
857 goto out_close_pipes;
860 evlist->workload.cork_fd = go_pipe[1];
861 close(child_ready_pipe[0]);
867 out_close_ready_pipe:
868 close(child_ready_pipe[0]);
869 close(child_ready_pipe[1]);
873 int perf_evlist__start_workload(struct perf_evlist *evlist)
875 if (evlist->workload.cork_fd > 0) {
877 * Remove the cork, let it rip!
879 return close(evlist->workload.cork_fd);