1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
13 #include "util/mmap.h"
14 #include "thread_map.h"
20 #include <internal/lib.h> // page_size
24 #include "bpf-event.h"
25 #include "util/string2.h"
26 #include "util/perf_api_probe.h"
32 #include "parse-events.h"
33 #include <subcmd/parse-options.h>
36 #include <sys/ioctl.h>
39 #include <linux/bitops.h>
40 #include <linux/hash.h>
41 #include <linux/log2.h>
42 #include <linux/err.h>
43 #include <linux/string.h>
44 #include <linux/zalloc.h>
45 #include <perf/evlist.h>
46 #include <perf/evsel.h>
47 #include <perf/cpumap.h>
48 #include <perf/mmap.h>
50 #include <internal/xyarray.h>
52 #ifdef LACKS_SIGQUEUE_PROTOTYPE
53 int sigqueue(pid_t pid, int sig, const union sigval value);
56 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
57 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
59 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
60 struct perf_thread_map *threads)
62 perf_evlist__init(&evlist->core);
63 perf_evlist__set_maps(&evlist->core, cpus, threads);
64 evlist->workload.pid = -1;
65 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
66 evlist->ctl_fd.fd = -1;
67 evlist->ctl_fd.ack = -1;
68 evlist->ctl_fd.pos = -1;
71 struct evlist *evlist__new(void)
73 struct evlist *evlist = zalloc(sizeof(*evlist));
76 evlist__init(evlist, NULL, NULL);
81 struct evlist *evlist__new_default(void)
83 struct evlist *evlist = evlist__new();
85 if (evlist && evlist__add_default(evlist)) {
86 evlist__delete(evlist);
93 struct evlist *evlist__new_dummy(void)
95 struct evlist *evlist = evlist__new();
97 if (evlist && evlist__add_dummy(evlist)) {
98 evlist__delete(evlist);
106 * evlist__set_id_pos - set the positions of event ids.
107 * @evlist: selected event list
109 * Events with compatible sample types all have the same id_pos
110 * and is_pos. For convenience, put a copy on evlist.
112 void evlist__set_id_pos(struct evlist *evlist)
114 struct evsel *first = evlist__first(evlist);
116 evlist->id_pos = first->id_pos;
117 evlist->is_pos = first->is_pos;
120 static void evlist__update_id_pos(struct evlist *evlist)
124 evlist__for_each_entry(evlist, evsel)
125 evsel__calc_id_pos(evsel);
127 evlist__set_id_pos(evlist);
130 static void evlist__purge(struct evlist *evlist)
132 struct evsel *pos, *n;
134 evlist__for_each_entry_safe(evlist, n, pos) {
135 list_del_init(&pos->core.node);
140 evlist->core.nr_entries = 0;
143 void evlist__exit(struct evlist *evlist)
145 zfree(&evlist->mmap);
146 zfree(&evlist->overwrite_mmap);
147 perf_evlist__exit(&evlist->core);
150 void evlist__delete(struct evlist *evlist)
155 evlist__munmap(evlist);
156 evlist__close(evlist);
157 evlist__purge(evlist);
158 evlist__exit(evlist);
162 void evlist__add(struct evlist *evlist, struct evsel *entry)
164 entry->evlist = evlist;
165 entry->idx = evlist->core.nr_entries;
166 entry->tracking = !entry->idx;
168 perf_evlist__add(&evlist->core, &entry->core);
170 if (evlist->core.nr_entries == 1)
171 evlist__set_id_pos(evlist);
174 void evlist__remove(struct evlist *evlist, struct evsel *evsel)
176 evsel->evlist = NULL;
177 perf_evlist__remove(&evlist->core, &evsel->core);
180 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
182 while (!list_empty(list)) {
183 struct evsel *evsel, *temp, *leader = NULL;
185 __evlist__for_each_entry_safe(list, temp, evsel) {
186 list_del_init(&evsel->core.node);
187 evlist__add(evlist, evsel);
192 __evlist__for_each_entry_safe(list, temp, evsel) {
193 if (evsel->leader == leader) {
194 list_del_init(&evsel->core.node);
195 evlist__add(evlist, evsel);
201 int __evlist__set_tracepoints_handlers(struct evlist *evlist,
202 const struct evsel_str_handler *assocs, size_t nr_assocs)
207 for (i = 0; i < nr_assocs; i++) {
208 // Adding a handler for an event not in this evlist, just ignore it.
209 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
214 if (evsel->handler != NULL)
216 evsel->handler = assocs[i].handler;
224 void __evlist__set_leader(struct list_head *list)
226 struct evsel *evsel, *leader;
228 leader = list_entry(list->next, struct evsel, core.node);
229 evsel = list_entry(list->prev, struct evsel, core.node);
231 leader->core.nr_members = evsel->idx - leader->idx + 1;
233 __evlist__for_each_entry(list, evsel) {
234 evsel->leader = leader;
238 void evlist__set_leader(struct evlist *evlist)
240 if (evlist->core.nr_entries) {
241 evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
242 __evlist__set_leader(&evlist->core.entries);
246 int __evlist__add_default(struct evlist *evlist, bool precise)
248 struct evsel *evsel = evsel__new_cycles(precise);
253 evlist__add(evlist, evsel);
257 int evlist__add_dummy(struct evlist *evlist)
259 struct perf_event_attr attr = {
260 .type = PERF_TYPE_SOFTWARE,
261 .config = PERF_COUNT_SW_DUMMY,
262 .size = sizeof(attr), /* to capture ABI version */
264 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
269 evlist__add(evlist, evsel);
273 static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
275 struct evsel *evsel, *n;
279 for (i = 0; i < nr_attrs; i++) {
280 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
282 goto out_delete_partial_list;
283 list_add_tail(&evsel->core.node, &head);
286 evlist__splice_list_tail(evlist, &head);
290 out_delete_partial_list:
291 __evlist__for_each_entry_safe(&head, n, evsel)
292 evsel__delete(evsel);
296 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
300 for (i = 0; i < nr_attrs; i++)
301 event_attr_init(attrs + i);
303 return evlist__add_attrs(evlist, attrs, nr_attrs);
306 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
310 evlist__for_each_entry(evlist, evsel) {
311 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
312 (int)evsel->core.attr.config == id)
319 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
323 evlist__for_each_entry(evlist, evsel) {
324 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
325 (strcmp(evsel->name, name) == 0))
332 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
334 struct evsel *evsel = evsel__newtp(sys, name);
339 evsel->handler = handler;
340 evlist__add(evlist, evsel);
344 static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel)
346 if (evsel->core.system_wide)
349 return perf_thread_map__nr(evlist->core.threads);
352 void evlist__cpu_iter_start(struct evlist *evlist)
357 * Reset the per evsel cpu_iter. This is needed because
358 * each evsel's cpumap may have a different index space,
359 * and some operations need the index to modify
360 * the FD xyarray (e.g. open, close)
362 evlist__for_each_entry(evlist, pos)
366 bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
368 if (ev->cpu_iter >= ev->core.cpus->nr)
370 if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
375 bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
377 if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
384 void evlist__disable(struct evlist *evlist)
387 struct affinity affinity;
389 bool has_imm = false;
391 if (affinity__setup(&affinity) < 0)
394 /* Disable 'immediate' events last */
395 for (imm = 0; imm <= 1; imm++) {
396 evlist__for_each_cpu(evlist, i, cpu) {
397 affinity__set(&affinity, cpu);
399 evlist__for_each_entry(evlist, pos) {
400 if (evsel__cpu_iter_skip(pos, cpu))
402 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
406 if (pos->immediate != imm)
408 evsel__disable_cpu(pos, pos->cpu_iter - 1);
415 affinity__cleanup(&affinity);
416 evlist__for_each_entry(evlist, pos) {
417 if (!evsel__is_group_leader(pos) || !pos->core.fd)
419 pos->disabled = true;
422 evlist->enabled = false;
425 void evlist__enable(struct evlist *evlist)
428 struct affinity affinity;
431 if (affinity__setup(&affinity) < 0)
434 evlist__for_each_cpu(evlist, i, cpu) {
435 affinity__set(&affinity, cpu);
437 evlist__for_each_entry(evlist, pos) {
438 if (evsel__cpu_iter_skip(pos, cpu))
440 if (!evsel__is_group_leader(pos) || !pos->core.fd)
442 evsel__enable_cpu(pos, pos->cpu_iter - 1);
445 affinity__cleanup(&affinity);
446 evlist__for_each_entry(evlist, pos) {
447 if (!evsel__is_group_leader(pos) || !pos->core.fd)
449 pos->disabled = false;
452 evlist->enabled = true;
455 void evlist__toggle_enable(struct evlist *evlist)
457 (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
460 static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu)
463 int nr_threads = evlist__nr_threads(evlist, evsel);
468 for (thread = 0; thread < nr_threads; thread++) {
469 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
476 static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
479 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
484 for (cpu = 0; cpu < nr_cpus; cpu++) {
485 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
492 int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
494 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
497 return evlist__enable_event_cpu(evlist, evsel, idx);
499 return evlist__enable_event_thread(evlist, evsel, idx);
502 int evlist__add_pollfd(struct evlist *evlist, int fd)
504 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
507 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
509 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
512 int evlist__poll(struct evlist *evlist, int timeout)
514 return perf_evlist__poll(&evlist->core, timeout);
517 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
519 struct hlist_head *head;
520 struct perf_sample_id *sid;
523 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
524 head = &evlist->core.heads[hash];
526 hlist_for_each_entry(sid, head, node)
533 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
535 struct perf_sample_id *sid;
537 if (evlist->core.nr_entries == 1 || !id)
538 return evlist__first(evlist);
540 sid = evlist__id2sid(evlist, id);
542 return container_of(sid->evsel, struct evsel, core);
544 if (!evlist__sample_id_all(evlist))
545 return evlist__first(evlist);
550 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
552 struct perf_sample_id *sid;
557 sid = evlist__id2sid(evlist, id);
559 return container_of(sid->evsel, struct evsel, core);
564 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
566 const __u64 *array = event->sample.array;
569 n = (event->header.size - sizeof(event->header)) >> 3;
571 if (event->header.type == PERF_RECORD_SAMPLE) {
572 if (evlist->id_pos >= n)
574 *id = array[evlist->id_pos];
576 if (evlist->is_pos > n)
584 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
586 struct evsel *first = evlist__first(evlist);
587 struct hlist_head *head;
588 struct perf_sample_id *sid;
592 if (evlist->core.nr_entries == 1)
595 if (!first->core.attr.sample_id_all &&
596 event->header.type != PERF_RECORD_SAMPLE)
599 if (evlist__event2id(evlist, event, &id))
602 /* Synthesized events have an id of zero */
606 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
607 head = &evlist->core.heads[hash];
609 hlist_for_each_entry(sid, head, node) {
611 return container_of(sid->evsel, struct evsel, core);
616 static int evlist__set_paused(struct evlist *evlist, bool value)
620 if (!evlist->overwrite_mmap)
623 for (i = 0; i < evlist->core.nr_mmaps; i++) {
624 int fd = evlist->overwrite_mmap[i].core.fd;
629 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
636 static int evlist__pause(struct evlist *evlist)
638 return evlist__set_paused(evlist, true);
641 static int evlist__resume(struct evlist *evlist)
643 return evlist__set_paused(evlist, false);
646 static void evlist__munmap_nofree(struct evlist *evlist)
651 for (i = 0; i < evlist->core.nr_mmaps; i++)
652 perf_mmap__munmap(&evlist->mmap[i].core);
654 if (evlist->overwrite_mmap)
655 for (i = 0; i < evlist->core.nr_mmaps; i++)
656 perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
659 void evlist__munmap(struct evlist *evlist)
661 evlist__munmap_nofree(evlist);
662 zfree(&evlist->mmap);
663 zfree(&evlist->overwrite_mmap);
666 static void perf_mmap__unmap_cb(struct perf_mmap *map)
668 struct mmap *m = container_of(map, struct mmap, core);
673 static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
679 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
683 for (i = 0; i < evlist->core.nr_mmaps; i++) {
684 struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
687 * When the perf_mmap() call is made we grab one refcount, plus
688 * one extra to let perf_mmap__consume() get the last
689 * events after all real references (perf_mmap__get()) are
692 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
693 * thus does perf_mmap__get() on it.
695 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
702 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
703 struct perf_mmap_param *_mp,
704 int idx, bool per_cpu)
706 struct evlist *evlist = container_of(_evlist, struct evlist, core);
707 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
709 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
712 static struct perf_mmap*
713 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
715 struct evlist *evlist = container_of(_evlist, struct evlist, core);
718 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
721 maps = evlist__alloc_mmap(evlist, overwrite);
726 evlist->overwrite_mmap = maps;
727 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
728 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
734 return &maps[idx].core;
738 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
741 struct mmap *map = container_of(_map, struct mmap, core);
742 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
744 return mmap__mmap(map, mp, output, cpu);
747 unsigned long perf_event_mlock_kb_in_pages(void)
752 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
754 * Pick a once upon a time good value, i.e. things look
755 * strange since we can't read a sysctl value, but lets not
760 max -= (page_size / 1024);
763 pages = (max * 1024) / page_size;
764 if (!is_power_of_2(pages))
765 pages = rounddown_pow_of_two(pages);
770 size_t evlist__mmap_size(unsigned long pages)
772 if (pages == UINT_MAX)
773 pages = perf_event_mlock_kb_in_pages();
774 else if (!is_power_of_2(pages))
777 return (pages + 1) * page_size;
780 static long parse_pages_arg(const char *str, unsigned long min,
783 unsigned long pages, val;
784 static struct parse_tag tags[] = {
785 { .tag = 'B', .mult = 1 },
786 { .tag = 'K', .mult = 1 << 10 },
787 { .tag = 'M', .mult = 1 << 20 },
788 { .tag = 'G', .mult = 1 << 30 },
795 val = parse_tag_value(str, tags);
796 if (val != (unsigned long) -1) {
797 /* we got file size value */
798 pages = PERF_ALIGN(val, page_size) / page_size;
800 /* we got pages count value */
802 pages = strtoul(str, &eptr, 10);
807 if (pages == 0 && min == 0) {
808 /* leave number of pages at 0 */
809 } else if (!is_power_of_2(pages)) {
812 /* round pages up to next power of 2 */
813 pages = roundup_pow_of_two(pages);
817 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
818 pr_info("rounding mmap pages size to %s (%lu pages)\n",
828 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
830 unsigned long max = UINT_MAX;
833 if (max > SIZE_MAX / page_size)
834 max = SIZE_MAX / page_size;
836 pages = parse_pages_arg(str, 1, max);
838 pr_err("Invalid argument for --mmap_pages/-m\n");
846 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
848 return __evlist__parse_mmap_pages(opt->value, str);
852 * evlist__mmap_ex - Create mmaps to receive events.
853 * @evlist: list of events
854 * @pages: map length in pages
855 * @overwrite: overwrite older events?
856 * @auxtrace_pages - auxtrace map length in pages
857 * @auxtrace_overwrite - overwrite older auxtrace data?
859 * If @overwrite is %false the user needs to signal event consumption using
860 * perf_mmap__write_tail(). Using evlist__mmap_read() does this
863 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
864 * consumption using auxtrace_mmap__write_tail().
866 * Return: %0 on success, negative error code otherwise.
868 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
869 unsigned int auxtrace_pages,
870 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
874 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
875 * Its value is decided by evsel's write_backward.
876 * So &mp should not be passed through const pointer.
878 struct mmap_params mp = {
879 .nr_cblocks = nr_cblocks,
880 .affinity = affinity,
882 .comp_level = comp_level
884 struct perf_evlist_mmap_ops ops = {
885 .idx = perf_evlist__mmap_cb_idx,
886 .get = perf_evlist__mmap_cb_get,
887 .mmap = perf_evlist__mmap_cb_mmap,
890 evlist->core.mmap_len = evlist__mmap_size(pages);
891 pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
893 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
894 auxtrace_pages, auxtrace_overwrite);
896 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
899 int evlist__mmap(struct evlist *evlist, unsigned int pages)
901 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
904 int evlist__create_maps(struct evlist *evlist, struct target *target)
906 bool all_threads = (target->per_thread && target->system_wide);
907 struct perf_cpu_map *cpus;
908 struct perf_thread_map *threads;
911 * If specify '-a' and '--per-thread' to perf record, perf record
912 * will override '--per-thread'. target->per_thread = false and
913 * target->system_wide = true.
915 * If specify '--per-thread' only to perf record,
916 * target->per_thread = true and target->system_wide = false.
918 * So target->per_thread && target->system_wide is false.
919 * For perf record, thread_map__new_str doesn't call
920 * thread_map__new_all_cpus. That will keep perf record's
923 * For perf stat, it allows the case that target->per_thread and
924 * target->system_wide are all true. It means to collect system-wide
925 * per-thread data. thread_map__new_str will call
926 * thread_map__new_all_cpus to enumerate all threads.
928 threads = thread_map__new_str(target->pid, target->tid, target->uid,
934 if (target__uses_dummy_map(target))
935 cpus = perf_cpu_map__dummy_new();
937 cpus = perf_cpu_map__new(target->cpu_list);
940 goto out_delete_threads;
942 evlist->core.has_user_cpus = !!target->cpu_list;
944 perf_evlist__set_maps(&evlist->core, cpus, threads);
946 /* as evlist now has references, put count here */
947 perf_cpu_map__put(cpus);
948 perf_thread_map__put(threads);
953 perf_thread_map__put(threads);
957 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
962 evlist__for_each_entry(evlist, evsel) {
963 if (evsel->filter == NULL)
967 * filters only work for tracepoint event, which doesn't have cpu limit.
968 * So evlist and evsel should always be same.
970 err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
980 int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
988 evlist__for_each_entry(evlist, evsel) {
989 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
992 err = evsel__set_filter(evsel, filter);
1000 int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1002 struct evsel *evsel;
1008 evlist__for_each_entry(evlist, evsel) {
1009 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1012 err = evsel__append_tp_filter(evsel, filter);
1020 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
1025 for (i = 0; i < npids; ++i) {
1027 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1032 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1046 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1048 char *filter = asprintf__tp_filter_pids(npids, pids);
1049 int ret = evlist__set_tp_filter(evlist, filter);
1055 int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1057 return evlist__set_tp_filter_pids(evlist, 1, &pid);
1060 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1062 char *filter = asprintf__tp_filter_pids(npids, pids);
1063 int ret = evlist__append_tp_filter(evlist, filter);
1069 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1071 return evlist__append_tp_filter_pids(evlist, 1, &pid);
1074 bool evlist__valid_sample_type(struct evlist *evlist)
1078 if (evlist->core.nr_entries == 1)
1081 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1084 evlist__for_each_entry(evlist, pos) {
1085 if (pos->id_pos != evlist->id_pos ||
1086 pos->is_pos != evlist->is_pos)
1093 u64 __evlist__combined_sample_type(struct evlist *evlist)
1095 struct evsel *evsel;
1097 if (evlist->combined_sample_type)
1098 return evlist->combined_sample_type;
1100 evlist__for_each_entry(evlist, evsel)
1101 evlist->combined_sample_type |= evsel->core.attr.sample_type;
1103 return evlist->combined_sample_type;
1106 u64 evlist__combined_sample_type(struct evlist *evlist)
1108 evlist->combined_sample_type = 0;
1109 return __evlist__combined_sample_type(evlist);
1112 u64 evlist__combined_branch_type(struct evlist *evlist)
1114 struct evsel *evsel;
1115 u64 branch_type = 0;
1117 evlist__for_each_entry(evlist, evsel)
1118 branch_type |= evsel->core.attr.branch_sample_type;
1122 bool evlist__valid_read_format(struct evlist *evlist)
1124 struct evsel *first = evlist__first(evlist), *pos = first;
1125 u64 read_format = first->core.attr.read_format;
1126 u64 sample_type = first->core.attr.sample_type;
1128 evlist__for_each_entry(evlist, pos) {
1129 if (read_format != pos->core.attr.read_format) {
1130 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1131 read_format, (u64)pos->core.attr.read_format);
1135 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1136 if ((sample_type & PERF_SAMPLE_READ) &&
1137 !(read_format & PERF_FORMAT_ID)) {
1144 u16 evlist__id_hdr_size(struct evlist *evlist)
1146 struct evsel *first = evlist__first(evlist);
1147 struct perf_sample *data;
1151 if (!first->core.attr.sample_id_all)
1154 sample_type = first->core.attr.sample_type;
1156 if (sample_type & PERF_SAMPLE_TID)
1157 size += sizeof(data->tid) * 2;
1159 if (sample_type & PERF_SAMPLE_TIME)
1160 size += sizeof(data->time);
1162 if (sample_type & PERF_SAMPLE_ID)
1163 size += sizeof(data->id);
1165 if (sample_type & PERF_SAMPLE_STREAM_ID)
1166 size += sizeof(data->stream_id);
1168 if (sample_type & PERF_SAMPLE_CPU)
1169 size += sizeof(data->cpu) * 2;
1171 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1172 size += sizeof(data->id);
1177 bool evlist__valid_sample_id_all(struct evlist *evlist)
1179 struct evsel *first = evlist__first(evlist), *pos = first;
1181 evlist__for_each_entry_continue(evlist, pos) {
1182 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1189 bool evlist__sample_id_all(struct evlist *evlist)
1191 struct evsel *first = evlist__first(evlist);
1192 return first->core.attr.sample_id_all;
1195 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
1197 evlist->selected = evsel;
1200 void evlist__close(struct evlist *evlist)
1202 struct evsel *evsel;
1203 struct affinity affinity;
1207 * With perf record core.cpus is usually NULL.
1208 * Use the old method to handle this for now.
1210 if (!evlist->core.cpus) {
1211 evlist__for_each_entry_reverse(evlist, evsel)
1212 evsel__close(evsel);
1216 if (affinity__setup(&affinity) < 0)
1218 evlist__for_each_cpu(evlist, i, cpu) {
1219 affinity__set(&affinity, cpu);
1221 evlist__for_each_entry_reverse(evlist, evsel) {
1222 if (evsel__cpu_iter_skip(evsel, cpu))
1224 perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
1227 affinity__cleanup(&affinity);
1228 evlist__for_each_entry_reverse(evlist, evsel) {
1229 perf_evsel__free_fd(&evsel->core);
1230 perf_evsel__free_id(&evsel->core);
1234 static int evlist__create_syswide_maps(struct evlist *evlist)
1236 struct perf_cpu_map *cpus;
1237 struct perf_thread_map *threads;
1241 * Try reading /sys/devices/system/cpu/online to get
1244 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1245 * code needs an overhaul to properly forward the
1246 * error, and we may not want to do that fallback to a
1247 * default cpu identity map :-\
1249 cpus = perf_cpu_map__new(NULL);
1253 threads = perf_thread_map__new_dummy();
1257 perf_evlist__set_maps(&evlist->core, cpus, threads);
1259 perf_thread_map__put(threads);
1261 perf_cpu_map__put(cpus);
1266 int evlist__open(struct evlist *evlist)
1268 struct evsel *evsel;
1272 * Default: one fd per CPU, all threads, aka systemwide
1273 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1275 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
1276 err = evlist__create_syswide_maps(evlist);
1281 evlist__update_id_pos(evlist);
1283 evlist__for_each_entry(evlist, evsel) {
1284 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1291 evlist__close(evlist);
1296 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
1297 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1299 int child_ready_pipe[2], go_pipe[2];
1302 if (pipe(child_ready_pipe) < 0) {
1303 perror("failed to create 'ready' pipe");
1307 if (pipe(go_pipe) < 0) {
1308 perror("failed to create 'go' pipe");
1309 goto out_close_ready_pipe;
1312 evlist->workload.pid = fork();
1313 if (evlist->workload.pid < 0) {
1314 perror("failed to fork");
1315 goto out_close_pipes;
1318 if (!evlist->workload.pid) {
1324 signal(SIGTERM, SIG_DFL);
1326 close(child_ready_pipe[0]);
1328 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1331 * Tell the parent we're ready to go
1333 close(child_ready_pipe[1]);
1336 * Wait until the parent tells us to go.
1338 ret = read(go_pipe[0], &bf, 1);
1340 * The parent will ask for the execvp() to be performed by
1341 * writing exactly one byte, in workload.cork_fd, usually via
1342 * evlist__start_workload().
1344 * For cancelling the workload without actually running it,
1345 * the parent will just close workload.cork_fd, without writing
1346 * anything, i.e. read will return zero and we just exit()
1351 perror("unable to read pipe");
1355 execvp(argv[0], (char **)argv);
1360 val.sival_int = errno;
1361 if (sigqueue(getppid(), SIGUSR1, val))
1369 struct sigaction act = {
1370 .sa_flags = SA_SIGINFO,
1371 .sa_sigaction = exec_error,
1373 sigaction(SIGUSR1, &act, NULL);
1376 if (target__none(target)) {
1377 if (evlist->core.threads == NULL) {
1378 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1379 __func__, __LINE__);
1380 goto out_close_pipes;
1382 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1385 close(child_ready_pipe[1]);
1388 * wait for child to settle
1390 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1391 perror("unable to read pipe");
1392 goto out_close_pipes;
1395 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1396 evlist->workload.cork_fd = go_pipe[1];
1397 close(child_ready_pipe[0]);
1403 out_close_ready_pipe:
1404 close(child_ready_pipe[0]);
1405 close(child_ready_pipe[1]);
1409 int evlist__start_workload(struct evlist *evlist)
1411 if (evlist->workload.cork_fd > 0) {
1415 * Remove the cork, let it rip!
1417 ret = write(evlist->workload.cork_fd, &bf, 1);
1419 perror("unable to write to pipe");
1421 close(evlist->workload.cork_fd);
1428 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1430 struct evsel *evsel = evlist__event2evsel(evlist, event);
1434 return evsel__parse_sample(evsel, event, sample);
1437 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
1439 struct evsel *evsel = evlist__event2evsel(evlist, event);
1443 return evsel__parse_sample_timestamp(evsel, event, timestamp);
1446 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
1449 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1454 printed = scnprintf(buf, size,
1456 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1458 value = perf_event_paranoid();
1460 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1463 printed += scnprintf(buf + printed, size - printed,
1464 "For your workloads it needs to be <= 1\nHint:\t");
1466 printed += scnprintf(buf + printed, size - printed,
1467 "For system wide tracing it needs to be set to -1.\n");
1469 printed += scnprintf(buf + printed, size - printed,
1470 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1471 "Hint:\tThe current value is %d.", value);
1474 struct evsel *first = evlist__first(evlist);
1477 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1480 if (first->core.attr.sample_freq < (u64)max_freq)
1483 printed = scnprintf(buf, size,
1485 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1486 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1487 emsg, max_freq, first->core.attr.sample_freq);
1492 scnprintf(buf, size, "%s", emsg);
1499 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1501 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1502 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1506 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1507 printed += scnprintf(buf + printed, size - printed,
1509 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1510 "Hint:\tTried using %zd kB.\n",
1511 emsg, pages_max_per_user, pages_attempted);
1513 if (pages_attempted >= pages_max_per_user) {
1514 printed += scnprintf(buf + printed, size - printed,
1515 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1516 pages_max_per_user + pages_attempted);
1519 printed += scnprintf(buf + printed, size - printed,
1520 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1523 scnprintf(buf, size, "%s", emsg);
1530 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
1532 struct evsel *evsel, *n;
1535 if (move_evsel == evlist__first(evlist))
1538 evlist__for_each_entry_safe(evlist, n, evsel) {
1539 if (evsel->leader == move_evsel->leader)
1540 list_move_tail(&evsel->core.node, &move);
1543 list_splice(&move, &evlist->core.entries);
1546 struct evsel *evlist__get_tracking_event(struct evlist *evlist)
1548 struct evsel *evsel;
1550 evlist__for_each_entry(evlist, evsel) {
1551 if (evsel->tracking)
1555 return evlist__first(evlist);
1558 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
1560 struct evsel *evsel;
1562 if (tracking_evsel->tracking)
1565 evlist__for_each_entry(evlist, evsel) {
1566 if (evsel != tracking_evsel)
1567 evsel->tracking = false;
1570 tracking_evsel->tracking = true;
1573 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
1575 struct evsel *evsel;
1577 evlist__for_each_entry(evlist, evsel) {
1580 if (strcmp(str, evsel->name) == 0)
1587 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
1589 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1596 if (!evlist->overwrite_mmap)
1599 switch (old_state) {
1600 case BKW_MMAP_NOTREADY: {
1601 if (state != BKW_MMAP_RUNNING)
1605 case BKW_MMAP_RUNNING: {
1606 if (state != BKW_MMAP_DATA_PENDING)
1611 case BKW_MMAP_DATA_PENDING: {
1612 if (state != BKW_MMAP_EMPTY)
1616 case BKW_MMAP_EMPTY: {
1617 if (state != BKW_MMAP_RUNNING)
1623 WARN_ONCE(1, "Shouldn't get there\n");
1626 evlist->bkw_mmap_state = state;
1630 evlist__pause(evlist);
1633 evlist__resume(evlist);
1644 bool evlist__exclude_kernel(struct evlist *evlist)
1646 struct evsel *evsel;
1648 evlist__for_each_entry(evlist, evsel) {
1649 if (!evsel->core.attr.exclude_kernel)
1657 * Events in data file are not collect in groups, but we still want
1658 * the group display. Set the artificial group and set the leader's
1659 * forced_leader flag to notify the display code.
1661 void evlist__force_leader(struct evlist *evlist)
1663 if (!evlist->nr_groups) {
1664 struct evsel *leader = evlist__first(evlist);
1666 evlist__set_leader(evlist);
1667 leader->forced_leader = true;
1671 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
1673 struct evsel *c2, *leader;
1674 bool is_open = true;
1676 leader = evsel->leader;
1677 pr_debug("Weak group for %s/%d failed\n",
1678 leader->name, leader->core.nr_members);
1681 * for_each_group_member doesn't work here because it doesn't
1682 * include the first entry.
1684 evlist__for_each_entry(evsel_list, c2) {
1687 if (c2->leader == leader) {
1688 if (is_open && close)
1689 perf_evsel__close(&c2->core);
1691 c2->core.nr_members = 0;
1693 * Set this for all former members of the group
1694 * to indicate they get reopened.
1696 c2->reset_group = true;
1702 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1707 if (strncmp(str, "fifo:", 5))
1711 if (!*str || *str == ',')
1723 * O_RDWR avoids POLLHUPs which is necessary to allow the other
1724 * end of a FIFO to be repeatedly opened and closed.
1726 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1728 pr_err("Failed to open '%s'\n", s);
1733 *ctl_fd_close = true;
1736 /* O_RDWR | O_NONBLOCK means the other end need not be open */
1737 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1739 pr_err("Failed to open '%s'\n", p);
1751 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1753 char *comma = NULL, *endptr = NULL;
1755 *ctl_fd_close = false;
1757 if (strncmp(str, "fd:", 3))
1758 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
1760 *ctl_fd = strtoul(&str[3], &endptr, 0);
1761 if (endptr == &str[3])
1764 comma = strchr(str, ',');
1766 if (endptr != comma)
1769 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
1770 if (endptr == comma + 1 || *endptr != '\0')
1777 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
1779 if (*ctl_fd_close) {
1780 *ctl_fd_close = false;
1782 if (ctl_fd_ack >= 0)
1787 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
1790 pr_debug("Control descriptor is not initialized\n");
1794 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
1795 fdarray_flag__nonfilterable);
1796 if (evlist->ctl_fd.pos < 0) {
1797 evlist->ctl_fd.pos = -1;
1798 pr_err("Failed to add ctl fd entry: %m\n");
1802 evlist->ctl_fd.fd = fd;
1803 evlist->ctl_fd.ack = ack;
1808 bool evlist__ctlfd_initialized(struct evlist *evlist)
1810 return evlist->ctl_fd.pos >= 0;
1813 int evlist__finalize_ctlfd(struct evlist *evlist)
1815 struct pollfd *entries = evlist->core.pollfd.entries;
1817 if (!evlist__ctlfd_initialized(evlist))
1820 entries[evlist->ctl_fd.pos].fd = -1;
1821 entries[evlist->ctl_fd.pos].events = 0;
1822 entries[evlist->ctl_fd.pos].revents = 0;
1824 evlist->ctl_fd.pos = -1;
1825 evlist->ctl_fd.ack = -1;
1826 evlist->ctl_fd.fd = -1;
1831 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
1832 char *cmd_data, size_t data_size)
1836 size_t bytes_read = 0;
1838 *cmd = EVLIST_CTL_CMD_UNSUPPORTED;
1839 memset(cmd_data, 0, data_size);
1843 err = read(evlist->ctl_fd.fd, &c, 1);
1845 if (c == '\n' || c == '\0')
1847 cmd_data[bytes_read++] = c;
1848 if (bytes_read == data_size)
1851 } else if (err == -1) {
1854 if (errno == EAGAIN || errno == EWOULDBLOCK)
1857 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
1862 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
1863 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
1865 if (bytes_read > 0) {
1866 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
1867 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
1868 *cmd = EVLIST_CTL_CMD_ENABLE;
1869 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
1870 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
1871 *cmd = EVLIST_CTL_CMD_DISABLE;
1872 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
1873 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
1874 *cmd = EVLIST_CTL_CMD_SNAPSHOT;
1875 pr_debug("is snapshot\n");
1879 return bytes_read ? (int)bytes_read : err;
1882 int evlist__ctlfd_ack(struct evlist *evlist)
1886 if (evlist->ctl_fd.ack == -1)
1889 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
1890 sizeof(EVLIST_CTL_CMD_ACK_TAG));
1892 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
1897 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
1900 char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
1901 int ctlfd_pos = evlist->ctl_fd.pos;
1902 struct pollfd *entries = evlist->core.pollfd.entries;
1904 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
1907 if (entries[ctlfd_pos].revents & POLLIN) {
1908 err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
1909 EVLIST_CTL_CMD_MAX_LEN);
1912 case EVLIST_CTL_CMD_ENABLE:
1913 evlist__enable(evlist);
1915 case EVLIST_CTL_CMD_DISABLE:
1916 evlist__disable(evlist);
1918 case EVLIST_CTL_CMD_SNAPSHOT:
1920 case EVLIST_CTL_CMD_ACK:
1921 case EVLIST_CTL_CMD_UNSUPPORTED:
1923 pr_debug("ctlfd: unsupported %d\n", *cmd);
1926 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
1927 *cmd == EVLIST_CTL_CMD_SNAPSHOT))
1928 evlist__ctlfd_ack(evlist);
1932 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
1933 evlist__finalize_ctlfd(evlist);
1935 entries[ctlfd_pos].revents = 0;
1940 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
1942 struct evsel *evsel;
1944 evlist__for_each_entry(evlist, evsel) {
1945 if (evsel->idx == idx)