2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/fs.h>
15 #include "thread_map.h"
25 #include "parse-events.h"
26 #include <subcmd/parse-options.h>
28 #include <sys/ioctl.h>
31 #include <linux/bitops.h>
32 #include <linux/hash.h>
33 #include <linux/log2.h>
34 #include <linux/err.h>
36 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
37 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
39 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
40 struct thread_map *threads)
44 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
45 INIT_HLIST_HEAD(&evlist->heads[i]);
46 INIT_LIST_HEAD(&evlist->entries);
47 perf_evlist__set_maps(evlist, cpus, threads);
48 fdarray__init(&evlist->pollfd, 64);
49 evlist->workload.pid = -1;
50 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
53 struct perf_evlist *perf_evlist__new(void)
55 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
58 perf_evlist__init(evlist, NULL, NULL);
63 struct perf_evlist *perf_evlist__new_default(void)
65 struct perf_evlist *evlist = perf_evlist__new();
67 if (evlist && perf_evlist__add_default(evlist)) {
68 perf_evlist__delete(evlist);
75 struct perf_evlist *perf_evlist__new_dummy(void)
77 struct perf_evlist *evlist = perf_evlist__new();
79 if (evlist && perf_evlist__add_dummy(evlist)) {
80 perf_evlist__delete(evlist);
88 * perf_evlist__set_id_pos - set the positions of event ids.
89 * @evlist: selected event list
91 * Events with compatible sample types all have the same id_pos
92 * and is_pos. For convenience, put a copy on evlist.
94 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
96 struct perf_evsel *first = perf_evlist__first(evlist);
98 evlist->id_pos = first->id_pos;
99 evlist->is_pos = first->is_pos;
102 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
104 struct perf_evsel *evsel;
106 evlist__for_each_entry(evlist, evsel)
107 perf_evsel__calc_id_pos(evsel);
109 perf_evlist__set_id_pos(evlist);
112 static void perf_evlist__purge(struct perf_evlist *evlist)
114 struct perf_evsel *pos, *n;
116 evlist__for_each_entry_safe(evlist, n, pos) {
117 list_del_init(&pos->node);
119 perf_evsel__delete(pos);
122 evlist->nr_entries = 0;
125 void perf_evlist__exit(struct perf_evlist *evlist)
127 zfree(&evlist->mmap);
128 zfree(&evlist->backward_mmap);
129 fdarray__exit(&evlist->pollfd);
132 void perf_evlist__delete(struct perf_evlist *evlist)
137 perf_evlist__munmap(evlist);
138 perf_evlist__close(evlist);
139 cpu_map__put(evlist->cpus);
140 thread_map__put(evlist->threads);
142 evlist->threads = NULL;
143 perf_evlist__purge(evlist);
144 perf_evlist__exit(evlist);
148 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
149 struct perf_evsel *evsel)
152 * We already have cpus for evsel (via PMU sysfs) so
153 * keep it, if there's no target cpu list defined.
155 if (!evsel->own_cpus || evlist->has_user_cpus) {
156 cpu_map__put(evsel->cpus);
157 evsel->cpus = cpu_map__get(evlist->cpus);
158 } else if (evsel->cpus != evsel->own_cpus) {
159 cpu_map__put(evsel->cpus);
160 evsel->cpus = cpu_map__get(evsel->own_cpus);
163 thread_map__put(evsel->threads);
164 evsel->threads = thread_map__get(evlist->threads);
167 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
169 struct perf_evsel *evsel;
171 evlist__for_each_entry(evlist, evsel)
172 __perf_evlist__propagate_maps(evlist, evsel);
175 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
177 entry->evlist = evlist;
178 list_add_tail(&entry->node, &evlist->entries);
179 entry->idx = evlist->nr_entries;
180 entry->tracking = !entry->idx;
182 if (!evlist->nr_entries++)
183 perf_evlist__set_id_pos(evlist);
185 __perf_evlist__propagate_maps(evlist, entry);
188 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
190 evsel->evlist = NULL;
191 list_del_init(&evsel->node);
192 evlist->nr_entries -= 1;
195 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
196 struct list_head *list)
198 struct perf_evsel *evsel, *temp;
200 __evlist__for_each_entry_safe(list, temp, evsel) {
201 list_del_init(&evsel->node);
202 perf_evlist__add(evlist, evsel);
206 void __perf_evlist__set_leader(struct list_head *list)
208 struct perf_evsel *evsel, *leader;
210 leader = list_entry(list->next, struct perf_evsel, node);
211 evsel = list_entry(list->prev, struct perf_evsel, node);
213 leader->nr_members = evsel->idx - leader->idx + 1;
215 __evlist__for_each_entry(list, evsel) {
216 evsel->leader = leader;
220 void perf_evlist__set_leader(struct perf_evlist *evlist)
222 if (evlist->nr_entries) {
223 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
224 __perf_evlist__set_leader(&evlist->entries);
228 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
230 attr->precise_ip = 3;
232 while (attr->precise_ip != 0) {
233 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
242 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
244 struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
249 perf_evlist__add(evlist, evsel);
253 int perf_evlist__add_dummy(struct perf_evlist *evlist)
255 struct perf_event_attr attr = {
256 .type = PERF_TYPE_SOFTWARE,
257 .config = PERF_COUNT_SW_DUMMY,
258 .size = sizeof(attr), /* to capture ABI version */
260 struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
265 perf_evlist__add(evlist, evsel);
269 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
270 struct perf_event_attr *attrs, size_t nr_attrs)
272 struct perf_evsel *evsel, *n;
276 for (i = 0; i < nr_attrs; i++) {
277 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
279 goto out_delete_partial_list;
280 list_add_tail(&evsel->node, &head);
283 perf_evlist__splice_list_tail(evlist, &head);
287 out_delete_partial_list:
288 __evlist__for_each_entry_safe(&head, n, evsel)
289 perf_evsel__delete(evsel);
293 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
294 struct perf_event_attr *attrs, size_t nr_attrs)
298 for (i = 0; i < nr_attrs; i++)
299 event_attr_init(attrs + i);
301 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
305 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
307 struct perf_evsel *evsel;
309 evlist__for_each_entry(evlist, evsel) {
310 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
311 (int)evsel->attr.config == id)
319 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
322 struct perf_evsel *evsel;
324 evlist__for_each_entry(evlist, evsel) {
325 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
326 (strcmp(evsel->name, name) == 0))
333 int perf_evlist__add_newtp(struct perf_evlist *evlist,
334 const char *sys, const char *name, void *handler)
336 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
341 evsel->handler = handler;
342 perf_evlist__add(evlist, evsel);
346 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
347 struct perf_evsel *evsel)
349 if (evsel->system_wide)
352 return thread_map__nr(evlist->threads);
355 void perf_evlist__disable(struct perf_evlist *evlist)
357 struct perf_evsel *pos;
359 evlist__for_each_entry(evlist, pos) {
360 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
362 perf_evsel__disable(pos);
365 evlist->enabled = false;
368 void perf_evlist__enable(struct perf_evlist *evlist)
370 struct perf_evsel *pos;
372 evlist__for_each_entry(evlist, pos) {
373 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
375 perf_evsel__enable(pos);
378 evlist->enabled = true;
381 void perf_evlist__toggle_enable(struct perf_evlist *evlist)
383 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
386 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
387 struct perf_evsel *evsel, int cpu)
390 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
395 for (thread = 0; thread < nr_threads; thread++) {
396 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
403 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
404 struct perf_evsel *evsel,
408 int nr_cpus = cpu_map__nr(evlist->cpus);
413 for (cpu = 0; cpu < nr_cpus; cpu++) {
414 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
421 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
422 struct perf_evsel *evsel, int idx)
424 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
427 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
429 return perf_evlist__enable_event_thread(evlist, evsel, idx);
432 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
434 int nr_cpus = cpu_map__nr(evlist->cpus);
435 int nr_threads = thread_map__nr(evlist->threads);
437 struct perf_evsel *evsel;
439 evlist__for_each_entry(evlist, evsel) {
440 if (evsel->system_wide)
443 nfds += nr_cpus * nr_threads;
446 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
447 fdarray__grow(&evlist->pollfd, nfds) < 0)
453 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
454 struct perf_mmap *map, short revent)
456 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
458 * Save the idx so that when we filter out fds POLLHUP'ed we can
459 * close the associated evlist->mmap[] entry.
462 evlist->pollfd.priv[pos].ptr = map;
464 fcntl(fd, F_SETFL, O_NONBLOCK);
470 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
472 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
475 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
476 void *arg __maybe_unused)
478 struct perf_mmap *map = fda->priv[fd].ptr;
484 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
486 return fdarray__filter(&evlist->pollfd, revents_and_mask,
487 perf_evlist__munmap_filtered, NULL);
490 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
492 return fdarray__poll(&evlist->pollfd, timeout);
495 static void perf_evlist__id_hash(struct perf_evlist *evlist,
496 struct perf_evsel *evsel,
497 int cpu, int thread, u64 id)
500 struct perf_sample_id *sid = SID(evsel, cpu, thread);
504 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
505 hlist_add_head(&sid->node, &evlist->heads[hash]);
508 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
509 int cpu, int thread, u64 id)
511 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
512 evsel->id[evsel->ids++] = id;
515 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
516 struct perf_evsel *evsel,
517 int cpu, int thread, int fd)
519 u64 read_data[4] = { 0, };
520 int id_idx = 1; /* The first entry is the counter value */
524 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
531 /* Legacy way to get event id.. All hail to old kernels! */
534 * This way does not work with group format read, so bail
537 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
540 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
541 read(fd, &read_data, sizeof(read_data)) == -1)
544 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
546 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
549 id = read_data[id_idx];
552 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
556 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
557 struct perf_evsel *evsel, int idx, int cpu,
560 struct perf_sample_id *sid = SID(evsel, cpu, thread);
562 if (evlist->cpus && cpu >= 0)
563 sid->cpu = evlist->cpus->map[cpu];
566 if (!evsel->system_wide && evlist->threads && thread >= 0)
567 sid->tid = thread_map__pid(evlist->threads, thread);
572 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
574 struct hlist_head *head;
575 struct perf_sample_id *sid;
578 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
579 head = &evlist->heads[hash];
581 hlist_for_each_entry(sid, head, node)
588 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
590 struct perf_sample_id *sid;
592 if (evlist->nr_entries == 1 || !id)
593 return perf_evlist__first(evlist);
595 sid = perf_evlist__id2sid(evlist, id);
599 if (!perf_evlist__sample_id_all(evlist))
600 return perf_evlist__first(evlist);
605 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
608 struct perf_sample_id *sid;
613 sid = perf_evlist__id2sid(evlist, id);
620 static int perf_evlist__event2id(struct perf_evlist *evlist,
621 union perf_event *event, u64 *id)
623 const u64 *array = event->sample.array;
626 n = (event->header.size - sizeof(event->header)) >> 3;
628 if (event->header.type == PERF_RECORD_SAMPLE) {
629 if (evlist->id_pos >= n)
631 *id = array[evlist->id_pos];
633 if (evlist->is_pos > n)
641 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
642 union perf_event *event)
644 struct perf_evsel *first = perf_evlist__first(evlist);
645 struct hlist_head *head;
646 struct perf_sample_id *sid;
650 if (evlist->nr_entries == 1)
653 if (!first->attr.sample_id_all &&
654 event->header.type != PERF_RECORD_SAMPLE)
657 if (perf_evlist__event2id(evlist, event, &id))
660 /* Synthesized events have an id of zero */
664 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
665 head = &evlist->heads[hash];
667 hlist_for_each_entry(sid, head, node) {
674 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
678 if (!evlist->backward_mmap)
681 for (i = 0; i < evlist->nr_mmaps; i++) {
682 int fd = evlist->backward_mmap[i].fd;
687 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
694 static int perf_evlist__pause(struct perf_evlist *evlist)
696 return perf_evlist__set_paused(evlist, true);
699 static int perf_evlist__resume(struct perf_evlist *evlist)
701 return perf_evlist__set_paused(evlist, false);
704 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
706 struct perf_mmap *md = &evlist->mmap[idx];
709 * Check messup is required for forward overwritable ring buffer:
710 * memory pointed by md->prev can be overwritten in this case.
711 * No need for read-write ring buffer: kernel stop outputting when
712 * it hit md->prev (perf_mmap__consume()).
714 return perf_mmap__read_forward(md, evlist->overwrite);
717 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
719 struct perf_mmap *md = &evlist->mmap[idx];
722 * No need to check messup for backward ring buffer:
723 * We can always read arbitrary long data from a backward
724 * ring buffer unless we forget to pause it before reading.
726 return perf_mmap__read_backward(md);
729 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
731 return perf_evlist__mmap_read_forward(evlist, idx);
734 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
736 perf_mmap__read_catchup(&evlist->mmap[idx]);
739 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
741 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
744 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
749 for (i = 0; i < evlist->nr_mmaps; i++)
750 perf_mmap__munmap(&evlist->mmap[i]);
752 if (evlist->backward_mmap)
753 for (i = 0; i < evlist->nr_mmaps; i++)
754 perf_mmap__munmap(&evlist->backward_mmap[i]);
757 void perf_evlist__munmap(struct perf_evlist *evlist)
759 perf_evlist__munmap_nofree(evlist);
760 zfree(&evlist->mmap);
761 zfree(&evlist->backward_mmap);
764 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
767 struct perf_mmap *map;
769 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
770 if (cpu_map__empty(evlist->cpus))
771 evlist->nr_mmaps = thread_map__nr(evlist->threads);
772 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
776 for (i = 0; i < evlist->nr_mmaps; i++) {
779 * When the perf_mmap() call is made we grab one refcount, plus
780 * one extra to let perf_evlist__mmap_consume() get the last
781 * events after all real references (perf_mmap__get()) are
784 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
785 * thus does perf_mmap__get() on it.
787 refcount_set(&map[i].refcnt, 0);
793 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
794 struct perf_evsel *evsel)
796 if (evsel->attr.write_backward)
801 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
802 struct mmap_params *mp, int cpu_idx,
803 int thread, int *_output, int *_output_backward)
805 struct perf_evsel *evsel;
807 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
809 evlist__for_each_entry(evlist, evsel) {
810 struct perf_mmap *maps = evlist->mmap;
811 int *output = _output;
815 if (evsel->attr.write_backward) {
816 output = _output_backward;
817 maps = evlist->backward_mmap;
820 maps = perf_evlist__alloc_mmap(evlist);
823 evlist->backward_mmap = maps;
824 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
825 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
829 if (evsel->system_wide && thread)
832 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
836 fd = FD(evsel, cpu, thread);
841 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
844 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
847 perf_mmap__get(&maps[idx]);
850 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
853 * The system_wide flag causes a selected event to be opened
854 * always without a pid. Consequently it will never get a
855 * POLLHUP, but it is used for tracking in combination with
856 * other events, so it should not need to be polled anyway.
857 * Therefore don't add it for polling.
859 if (!evsel->system_wide &&
860 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
861 perf_mmap__put(&maps[idx]);
865 if (evsel->attr.read_format & PERF_FORMAT_ID) {
866 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
869 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
877 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
878 struct mmap_params *mp)
881 int nr_cpus = cpu_map__nr(evlist->cpus);
882 int nr_threads = thread_map__nr(evlist->threads);
884 pr_debug2("perf event ring buffer mmapped per cpu\n");
885 for (cpu = 0; cpu < nr_cpus; cpu++) {
887 int output_backward = -1;
889 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
892 for (thread = 0; thread < nr_threads; thread++) {
893 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
894 thread, &output, &output_backward))
902 perf_evlist__munmap_nofree(evlist);
906 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
907 struct mmap_params *mp)
910 int nr_threads = thread_map__nr(evlist->threads);
912 pr_debug2("perf event ring buffer mmapped per thread\n");
913 for (thread = 0; thread < nr_threads; thread++) {
915 int output_backward = -1;
917 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
920 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
921 &output, &output_backward))
928 perf_evlist__munmap_nofree(evlist);
932 unsigned long perf_event_mlock_kb_in_pages(void)
937 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
939 * Pick a once upon a time good value, i.e. things look
940 * strange since we can't read a sysctl value, but lets not
945 max -= (page_size / 1024);
948 pages = (max * 1024) / page_size;
949 if (!is_power_of_2(pages))
950 pages = rounddown_pow_of_two(pages);
955 size_t perf_evlist__mmap_size(unsigned long pages)
957 if (pages == UINT_MAX)
958 pages = perf_event_mlock_kb_in_pages();
959 else if (!is_power_of_2(pages))
962 return (pages + 1) * page_size;
965 static long parse_pages_arg(const char *str, unsigned long min,
968 unsigned long pages, val;
969 static struct parse_tag tags[] = {
970 { .tag = 'B', .mult = 1 },
971 { .tag = 'K', .mult = 1 << 10 },
972 { .tag = 'M', .mult = 1 << 20 },
973 { .tag = 'G', .mult = 1 << 30 },
980 val = parse_tag_value(str, tags);
981 if (val != (unsigned long) -1) {
982 /* we got file size value */
983 pages = PERF_ALIGN(val, page_size) / page_size;
985 /* we got pages count value */
987 pages = strtoul(str, &eptr, 10);
992 if (pages == 0 && min == 0) {
993 /* leave number of pages at 0 */
994 } else if (!is_power_of_2(pages)) {
997 /* round pages up to next power of 2 */
998 pages = roundup_pow_of_two(pages);
1002 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
1003 pr_info("rounding mmap pages size to %s (%lu pages)\n",
1013 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1015 unsigned long max = UINT_MAX;
1018 if (max > SIZE_MAX / page_size)
1019 max = SIZE_MAX / page_size;
1021 pages = parse_pages_arg(str, 1, max);
1023 pr_err("Invalid argument for --mmap_pages/-m\n");
1027 *mmap_pages = pages;
1031 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1032 int unset __maybe_unused)
1034 return __perf_evlist__parse_mmap_pages(opt->value, str);
1038 * perf_evlist__mmap_ex - Create mmaps to receive events.
1039 * @evlist: list of events
1040 * @pages: map length in pages
1041 * @overwrite: overwrite older events?
1042 * @auxtrace_pages - auxtrace map length in pages
1043 * @auxtrace_overwrite - overwrite older auxtrace data?
1045 * If @overwrite is %false the user needs to signal event consumption using
1046 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1049 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1050 * consumption using auxtrace_mmap__write_tail().
1052 * Return: %0 on success, negative error code otherwise.
1054 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1055 bool overwrite, unsigned int auxtrace_pages,
1056 bool auxtrace_overwrite)
1058 struct perf_evsel *evsel;
1059 const struct cpu_map *cpus = evlist->cpus;
1060 const struct thread_map *threads = evlist->threads;
1061 struct mmap_params mp = {
1062 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1066 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1070 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1073 evlist->overwrite = overwrite;
1074 evlist->mmap_len = perf_evlist__mmap_size(pages);
1075 pr_debug("mmap size %zuB\n", evlist->mmap_len);
1076 mp.mask = evlist->mmap_len - page_size - 1;
1078 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1079 auxtrace_pages, auxtrace_overwrite);
1081 evlist__for_each_entry(evlist, evsel) {
1082 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1083 evsel->sample_id == NULL &&
1084 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1088 if (cpu_map__empty(cpus))
1089 return perf_evlist__mmap_per_thread(evlist, &mp);
1091 return perf_evlist__mmap_per_cpu(evlist, &mp);
1094 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1097 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1100 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1102 struct cpu_map *cpus;
1103 struct thread_map *threads;
1105 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1110 if (target__uses_dummy_map(target))
1111 cpus = cpu_map__dummy_new();
1113 cpus = cpu_map__new(target->cpu_list);
1116 goto out_delete_threads;
1118 evlist->has_user_cpus = !!target->cpu_list;
1120 perf_evlist__set_maps(evlist, cpus, threads);
1125 thread_map__put(threads);
1129 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1130 struct thread_map *threads)
1133 * Allow for the possibility that one or another of the maps isn't being
1134 * changed i.e. don't put it. Note we are assuming the maps that are
1135 * being applied are brand new and evlist is taking ownership of the
1136 * original reference count of 1. If that is not the case it is up to
1137 * the caller to increase the reference count.
1139 if (cpus != evlist->cpus) {
1140 cpu_map__put(evlist->cpus);
1141 evlist->cpus = cpu_map__get(cpus);
1144 if (threads != evlist->threads) {
1145 thread_map__put(evlist->threads);
1146 evlist->threads = thread_map__get(threads);
1149 perf_evlist__propagate_maps(evlist);
1152 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1153 enum perf_event_sample_format bit)
1155 struct perf_evsel *evsel;
1157 evlist__for_each_entry(evlist, evsel)
1158 __perf_evsel__set_sample_bit(evsel, bit);
1161 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1162 enum perf_event_sample_format bit)
1164 struct perf_evsel *evsel;
1166 evlist__for_each_entry(evlist, evsel)
1167 __perf_evsel__reset_sample_bit(evsel, bit);
1170 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1172 struct perf_evsel *evsel;
1175 evlist__for_each_entry(evlist, evsel) {
1176 if (evsel->filter == NULL)
1180 * filters only work for tracepoint event, which doesn't have cpu limit.
1181 * So evlist and evsel should always be same.
1183 err = perf_evsel__apply_filter(evsel, evsel->filter);
1193 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1195 struct perf_evsel *evsel;
1198 evlist__for_each_entry(evlist, evsel) {
1199 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1202 err = perf_evsel__set_filter(evsel, filter);
1210 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1216 for (i = 0; i < npids; ++i) {
1218 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1223 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1231 ret = perf_evlist__set_filter(evlist, filter);
1237 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1239 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1242 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1244 struct perf_evsel *pos;
1246 if (evlist->nr_entries == 1)
1249 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1252 evlist__for_each_entry(evlist, pos) {
1253 if (pos->id_pos != evlist->id_pos ||
1254 pos->is_pos != evlist->is_pos)
1261 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1263 struct perf_evsel *evsel;
1265 if (evlist->combined_sample_type)
1266 return evlist->combined_sample_type;
1268 evlist__for_each_entry(evlist, evsel)
1269 evlist->combined_sample_type |= evsel->attr.sample_type;
1271 return evlist->combined_sample_type;
1274 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1276 evlist->combined_sample_type = 0;
1277 return __perf_evlist__combined_sample_type(evlist);
1280 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1282 struct perf_evsel *evsel;
1283 u64 branch_type = 0;
1285 evlist__for_each_entry(evlist, evsel)
1286 branch_type |= evsel->attr.branch_sample_type;
1290 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1292 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1293 u64 read_format = first->attr.read_format;
1294 u64 sample_type = first->attr.sample_type;
1296 evlist__for_each_entry(evlist, pos) {
1297 if (read_format != pos->attr.read_format)
1301 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1302 if ((sample_type & PERF_SAMPLE_READ) &&
1303 !(read_format & PERF_FORMAT_ID)) {
1310 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1312 struct perf_evsel *first = perf_evlist__first(evlist);
1313 return first->attr.read_format;
1316 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1318 struct perf_evsel *first = perf_evlist__first(evlist);
1319 struct perf_sample *data;
1323 if (!first->attr.sample_id_all)
1326 sample_type = first->attr.sample_type;
1328 if (sample_type & PERF_SAMPLE_TID)
1329 size += sizeof(data->tid) * 2;
1331 if (sample_type & PERF_SAMPLE_TIME)
1332 size += sizeof(data->time);
1334 if (sample_type & PERF_SAMPLE_ID)
1335 size += sizeof(data->id);
1337 if (sample_type & PERF_SAMPLE_STREAM_ID)
1338 size += sizeof(data->stream_id);
1340 if (sample_type & PERF_SAMPLE_CPU)
1341 size += sizeof(data->cpu) * 2;
1343 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1344 size += sizeof(data->id);
1349 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1351 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1353 evlist__for_each_entry_continue(evlist, pos) {
1354 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1361 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1363 struct perf_evsel *first = perf_evlist__first(evlist);
1364 return first->attr.sample_id_all;
1367 void perf_evlist__set_selected(struct perf_evlist *evlist,
1368 struct perf_evsel *evsel)
1370 evlist->selected = evsel;
1373 void perf_evlist__close(struct perf_evlist *evlist)
1375 struct perf_evsel *evsel;
1377 evlist__for_each_entry_reverse(evlist, evsel)
1378 perf_evsel__close(evsel);
1381 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1383 struct cpu_map *cpus;
1384 struct thread_map *threads;
1388 * Try reading /sys/devices/system/cpu/online to get
1391 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1392 * code needs an overhaul to properly forward the
1393 * error, and we may not want to do that fallback to a
1394 * default cpu identity map :-\
1396 cpus = cpu_map__new(NULL);
1400 threads = thread_map__new_dummy();
1404 perf_evlist__set_maps(evlist, cpus, threads);
1412 int perf_evlist__open(struct perf_evlist *evlist)
1414 struct perf_evsel *evsel;
1418 * Default: one fd per CPU, all threads, aka systemwide
1419 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1421 if (evlist->threads == NULL && evlist->cpus == NULL) {
1422 err = perf_evlist__create_syswide_maps(evlist);
1427 perf_evlist__update_id_pos(evlist);
1429 evlist__for_each_entry(evlist, evsel) {
1430 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1437 perf_evlist__close(evlist);
1442 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1443 const char *argv[], bool pipe_output,
1444 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1446 int child_ready_pipe[2], go_pipe[2];
1449 if (pipe(child_ready_pipe) < 0) {
1450 perror("failed to create 'ready' pipe");
1454 if (pipe(go_pipe) < 0) {
1455 perror("failed to create 'go' pipe");
1456 goto out_close_ready_pipe;
1459 evlist->workload.pid = fork();
1460 if (evlist->workload.pid < 0) {
1461 perror("failed to fork");
1462 goto out_close_pipes;
1465 if (!evlist->workload.pid) {
1471 signal(SIGTERM, SIG_DFL);
1473 close(child_ready_pipe[0]);
1475 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1478 * Tell the parent we're ready to go
1480 close(child_ready_pipe[1]);
1483 * Wait until the parent tells us to go.
1485 ret = read(go_pipe[0], &bf, 1);
1487 * The parent will ask for the execvp() to be performed by
1488 * writing exactly one byte, in workload.cork_fd, usually via
1489 * perf_evlist__start_workload().
1491 * For cancelling the workload without actually running it,
1492 * the parent will just close workload.cork_fd, without writing
1493 * anything, i.e. read will return zero and we just exit()
1498 perror("unable to read pipe");
1502 execvp(argv[0], (char **)argv);
1507 val.sival_int = errno;
1508 if (sigqueue(getppid(), SIGUSR1, val))
1516 struct sigaction act = {
1517 .sa_flags = SA_SIGINFO,
1518 .sa_sigaction = exec_error,
1520 sigaction(SIGUSR1, &act, NULL);
1523 if (target__none(target)) {
1524 if (evlist->threads == NULL) {
1525 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1526 __func__, __LINE__);
1527 goto out_close_pipes;
1529 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1532 close(child_ready_pipe[1]);
1535 * wait for child to settle
1537 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1538 perror("unable to read pipe");
1539 goto out_close_pipes;
1542 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1543 evlist->workload.cork_fd = go_pipe[1];
1544 close(child_ready_pipe[0]);
1550 out_close_ready_pipe:
1551 close(child_ready_pipe[0]);
1552 close(child_ready_pipe[1]);
1556 int perf_evlist__start_workload(struct perf_evlist *evlist)
1558 if (evlist->workload.cork_fd > 0) {
1562 * Remove the cork, let it rip!
1564 ret = write(evlist->workload.cork_fd, &bf, 1);
1566 perror("unable to write to pipe");
1568 close(evlist->workload.cork_fd);
1575 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1576 struct perf_sample *sample)
1578 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1582 return perf_evsel__parse_sample(evsel, event, sample);
1585 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1587 struct perf_evsel *evsel;
1590 evlist__for_each_entry(evlist, evsel) {
1591 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1592 perf_evsel__name(evsel));
1595 return printed + fprintf(fp, "\n");
1598 int perf_evlist__strerror_open(struct perf_evlist *evlist,
1599 int err, char *buf, size_t size)
1602 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1607 printed = scnprintf(buf, size,
1609 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1611 value = perf_event_paranoid();
1613 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1616 printed += scnprintf(buf + printed, size - printed,
1617 "For your workloads it needs to be <= 1\nHint:\t");
1619 printed += scnprintf(buf + printed, size - printed,
1620 "For system wide tracing it needs to be set to -1.\n");
1622 printed += scnprintf(buf + printed, size - printed,
1623 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1624 "Hint:\tThe current value is %d.", value);
1627 struct perf_evsel *first = perf_evlist__first(evlist);
1630 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1633 if (first->attr.sample_freq < (u64)max_freq)
1636 printed = scnprintf(buf, size,
1638 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1639 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1640 emsg, max_freq, first->attr.sample_freq);
1645 scnprintf(buf, size, "%s", emsg);
1652 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1654 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1655 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1659 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1660 printed += scnprintf(buf + printed, size - printed,
1662 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1663 "Hint:\tTried using %zd kB.\n",
1664 emsg, pages_max_per_user, pages_attempted);
1666 if (pages_attempted >= pages_max_per_user) {
1667 printed += scnprintf(buf + printed, size - printed,
1668 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1669 pages_max_per_user + pages_attempted);
1672 printed += scnprintf(buf + printed, size - printed,
1673 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1676 scnprintf(buf, size, "%s", emsg);
1683 void perf_evlist__to_front(struct perf_evlist *evlist,
1684 struct perf_evsel *move_evsel)
1686 struct perf_evsel *evsel, *n;
1689 if (move_evsel == perf_evlist__first(evlist))
1692 evlist__for_each_entry_safe(evlist, n, evsel) {
1693 if (evsel->leader == move_evsel->leader)
1694 list_move_tail(&evsel->node, &move);
1697 list_splice(&move, &evlist->entries);
1700 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1701 struct perf_evsel *tracking_evsel)
1703 struct perf_evsel *evsel;
1705 if (tracking_evsel->tracking)
1708 evlist__for_each_entry(evlist, evsel) {
1709 if (evsel != tracking_evsel)
1710 evsel->tracking = false;
1713 tracking_evsel->tracking = true;
1717 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1720 struct perf_evsel *evsel;
1722 evlist__for_each_entry(evlist, evsel) {
1725 if (strcmp(str, evsel->name) == 0)
1732 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1733 enum bkw_mmap_state state)
1735 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1742 if (!evlist->backward_mmap)
1745 switch (old_state) {
1746 case BKW_MMAP_NOTREADY: {
1747 if (state != BKW_MMAP_RUNNING)
1751 case BKW_MMAP_RUNNING: {
1752 if (state != BKW_MMAP_DATA_PENDING)
1757 case BKW_MMAP_DATA_PENDING: {
1758 if (state != BKW_MMAP_EMPTY)
1762 case BKW_MMAP_EMPTY: {
1763 if (state != BKW_MMAP_RUNNING)
1769 WARN_ONCE(1, "Shouldn't get there\n");
1772 evlist->bkw_mmap_state = state;
1776 perf_evlist__pause(evlist);
1779 perf_evlist__resume(evlist);
1790 bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1792 struct perf_evsel *evsel;
1794 evlist__for_each_entry(evlist, evsel) {
1795 if (!evsel->attr.exclude_kernel)