1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
13 #include "util/mmap.h"
14 #include "thread_map.h"
20 #include "bpf_counter.h"
21 #include <internal/lib.h> // page_size
25 #include "bpf-event.h"
26 #include "util/string2.h"
27 #include "util/perf_api_probe.h"
28 #include "util/evsel_fprintf.h"
29 #include "util/evlist-hybrid.h"
35 #include "parse-events.h"
36 #include <subcmd/parse-options.h>
39 #include <sys/ioctl.h>
41 #include <sys/prctl.h>
43 #include <linux/bitops.h>
44 #include <linux/hash.h>
45 #include <linux/log2.h>
46 #include <linux/err.h>
47 #include <linux/string.h>
48 #include <linux/zalloc.h>
49 #include <perf/evlist.h>
50 #include <perf/evsel.h>
51 #include <perf/cpumap.h>
52 #include <perf/mmap.h>
54 #include <internal/xyarray.h>
56 #ifdef LACKS_SIGQUEUE_PROTOTYPE
57 int sigqueue(pid_t pid, int sig, const union sigval value);
60 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
61 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
63 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
64 struct perf_thread_map *threads)
66 perf_evlist__init(&evlist->core);
67 perf_evlist__set_maps(&evlist->core, cpus, threads);
68 evlist->workload.pid = -1;
69 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
70 evlist->ctl_fd.fd = -1;
71 evlist->ctl_fd.ack = -1;
72 evlist->ctl_fd.pos = -1;
75 struct evlist *evlist__new(void)
77 struct evlist *evlist = zalloc(sizeof(*evlist));
80 evlist__init(evlist, NULL, NULL);
85 struct evlist *evlist__new_default(void)
87 struct evlist *evlist = evlist__new();
89 if (evlist && evlist__add_default(evlist)) {
90 evlist__delete(evlist);
97 struct evlist *evlist__new_dummy(void)
99 struct evlist *evlist = evlist__new();
101 if (evlist && evlist__add_dummy(evlist)) {
102 evlist__delete(evlist);
110 * evlist__set_id_pos - set the positions of event ids.
111 * @evlist: selected event list
113 * Events with compatible sample types all have the same id_pos
114 * and is_pos. For convenience, put a copy on evlist.
116 void evlist__set_id_pos(struct evlist *evlist)
118 struct evsel *first = evlist__first(evlist);
120 evlist->id_pos = first->id_pos;
121 evlist->is_pos = first->is_pos;
124 static void evlist__update_id_pos(struct evlist *evlist)
128 evlist__for_each_entry(evlist, evsel)
129 evsel__calc_id_pos(evsel);
131 evlist__set_id_pos(evlist);
134 static void evlist__purge(struct evlist *evlist)
136 struct evsel *pos, *n;
138 evlist__for_each_entry_safe(evlist, n, pos) {
139 list_del_init(&pos->core.node);
144 evlist->core.nr_entries = 0;
147 void evlist__exit(struct evlist *evlist)
149 zfree(&evlist->mmap);
150 zfree(&evlist->overwrite_mmap);
151 perf_evlist__exit(&evlist->core);
154 void evlist__delete(struct evlist *evlist)
159 evlist__munmap(evlist);
160 evlist__close(evlist);
161 evlist__purge(evlist);
162 evlist__exit(evlist);
166 void evlist__add(struct evlist *evlist, struct evsel *entry)
168 perf_evlist__add(&evlist->core, &entry->core);
169 entry->evlist = evlist;
170 entry->tracking = !entry->core.idx;
172 if (evlist->core.nr_entries == 1)
173 evlist__set_id_pos(evlist);
176 void evlist__remove(struct evlist *evlist, struct evsel *evsel)
178 evsel->evlist = NULL;
179 perf_evlist__remove(&evlist->core, &evsel->core);
182 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
184 while (!list_empty(list)) {
185 struct evsel *evsel, *temp, *leader = NULL;
187 __evlist__for_each_entry_safe(list, temp, evsel) {
188 list_del_init(&evsel->core.node);
189 evlist__add(evlist, evsel);
194 __evlist__for_each_entry_safe(list, temp, evsel) {
195 if (evsel__has_leader(evsel, leader)) {
196 list_del_init(&evsel->core.node);
197 evlist__add(evlist, evsel);
203 int __evlist__set_tracepoints_handlers(struct evlist *evlist,
204 const struct evsel_str_handler *assocs, size_t nr_assocs)
209 for (i = 0; i < nr_assocs; i++) {
210 // Adding a handler for an event not in this evlist, just ignore it.
211 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
216 if (evsel->handler != NULL)
218 evsel->handler = assocs[i].handler;
226 void evlist__set_leader(struct evlist *evlist)
228 perf_evlist__set_leader(&evlist->core);
231 int __evlist__add_default(struct evlist *evlist, bool precise)
235 evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
236 PERF_COUNT_HW_CPU_CYCLES);
240 evlist__add(evlist, evsel);
244 int evlist__add_dummy(struct evlist *evlist)
246 struct perf_event_attr attr = {
247 .type = PERF_TYPE_SOFTWARE,
248 .config = PERF_COUNT_SW_DUMMY,
249 .size = sizeof(attr), /* to capture ABI version */
251 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
256 evlist__add(evlist, evsel);
260 static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
262 struct evsel *evsel, *n;
266 for (i = 0; i < nr_attrs; i++) {
267 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
269 goto out_delete_partial_list;
270 list_add_tail(&evsel->core.node, &head);
273 evlist__splice_list_tail(evlist, &head);
277 out_delete_partial_list:
278 __evlist__for_each_entry_safe(&head, n, evsel)
279 evsel__delete(evsel);
283 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
287 for (i = 0; i < nr_attrs; i++)
288 event_attr_init(attrs + i);
290 return evlist__add_attrs(evlist, attrs, nr_attrs);
293 __weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused)
298 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
302 evlist__for_each_entry(evlist, evsel) {
303 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
304 (int)evsel->core.attr.config == id)
311 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
315 evlist__for_each_entry(evlist, evsel) {
316 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
317 (strcmp(evsel->name, name) == 0))
324 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
326 struct evsel *evsel = evsel__newtp(sys, name);
331 evsel->handler = handler;
332 evlist__add(evlist, evsel);
336 static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel)
338 if (evsel->core.system_wide)
341 return perf_thread_map__nr(evlist->core.threads);
344 void evlist__cpu_iter_start(struct evlist *evlist)
349 * Reset the per evsel cpu_iter. This is needed because
350 * each evsel's cpumap may have a different index space,
351 * and some operations need the index to modify
352 * the FD xyarray (e.g. open, close)
354 evlist__for_each_entry(evlist, pos)
358 bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
360 if (ev->cpu_iter >= ev->core.cpus->nr)
362 if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
367 bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
369 if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
376 static int evsel__strcmp(struct evsel *pos, char *evsel_name)
380 if (evsel__is_dummy_event(pos))
382 return strcmp(pos->name, evsel_name);
385 static int evlist__is_enabled(struct evlist *evlist)
389 evlist__for_each_entry(evlist, pos) {
390 if (!evsel__is_group_leader(pos) || !pos->core.fd)
392 /* If at least one event is enabled, evlist is enabled. */
399 static void __evlist__disable(struct evlist *evlist, char *evsel_name)
402 struct affinity affinity;
404 bool has_imm = false;
406 if (affinity__setup(&affinity) < 0)
409 /* Disable 'immediate' events last */
410 for (imm = 0; imm <= 1; imm++) {
411 evlist__for_each_cpu(evlist, i, cpu) {
412 affinity__set(&affinity, cpu);
414 evlist__for_each_entry(evlist, pos) {
415 if (evsel__strcmp(pos, evsel_name))
417 if (evsel__cpu_iter_skip(pos, cpu))
419 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
423 if (pos->immediate != imm)
425 evsel__disable_cpu(pos, pos->cpu_iter - 1);
432 affinity__cleanup(&affinity);
433 evlist__for_each_entry(evlist, pos) {
434 if (evsel__strcmp(pos, evsel_name))
436 if (!evsel__is_group_leader(pos) || !pos->core.fd)
438 pos->disabled = true;
442 * If we disabled only single event, we need to check
443 * the enabled state of the evlist manually.
446 evlist->enabled = evlist__is_enabled(evlist);
448 evlist->enabled = false;
451 void evlist__disable(struct evlist *evlist)
453 __evlist__disable(evlist, NULL);
456 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
458 __evlist__disable(evlist, evsel_name);
461 static void __evlist__enable(struct evlist *evlist, char *evsel_name)
464 struct affinity affinity;
467 if (affinity__setup(&affinity) < 0)
470 evlist__for_each_cpu(evlist, i, cpu) {
471 affinity__set(&affinity, cpu);
473 evlist__for_each_entry(evlist, pos) {
474 if (evsel__strcmp(pos, evsel_name))
476 if (evsel__cpu_iter_skip(pos, cpu))
478 if (!evsel__is_group_leader(pos) || !pos->core.fd)
480 evsel__enable_cpu(pos, pos->cpu_iter - 1);
483 affinity__cleanup(&affinity);
484 evlist__for_each_entry(evlist, pos) {
485 if (evsel__strcmp(pos, evsel_name))
487 if (!evsel__is_group_leader(pos) || !pos->core.fd)
489 pos->disabled = false;
493 * Even single event sets the 'enabled' for evlist,
494 * so the toggle can work properly and toggle to
497 evlist->enabled = true;
500 void evlist__enable(struct evlist *evlist)
502 __evlist__enable(evlist, NULL);
505 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name)
507 __evlist__enable(evlist, evsel_name);
510 void evlist__toggle_enable(struct evlist *evlist)
512 (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
515 static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu)
518 int nr_threads = evlist__nr_threads(evlist, evsel);
523 for (thread = 0; thread < nr_threads; thread++) {
524 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
531 static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
534 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
539 for (cpu = 0; cpu < nr_cpus; cpu++) {
540 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
547 int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
549 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
552 return evlist__enable_event_cpu(evlist, evsel, idx);
554 return evlist__enable_event_thread(evlist, evsel, idx);
557 int evlist__add_pollfd(struct evlist *evlist, int fd)
559 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
562 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
564 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
567 #ifdef HAVE_EVENTFD_SUPPORT
568 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
570 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
571 fdarray_flag__nonfilterable);
575 int evlist__poll(struct evlist *evlist, int timeout)
577 return perf_evlist__poll(&evlist->core, timeout);
580 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
582 struct hlist_head *head;
583 struct perf_sample_id *sid;
586 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
587 head = &evlist->core.heads[hash];
589 hlist_for_each_entry(sid, head, node)
596 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
598 struct perf_sample_id *sid;
600 if (evlist->core.nr_entries == 1 || !id)
601 return evlist__first(evlist);
603 sid = evlist__id2sid(evlist, id);
605 return container_of(sid->evsel, struct evsel, core);
607 if (!evlist__sample_id_all(evlist))
608 return evlist__first(evlist);
613 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
615 struct perf_sample_id *sid;
620 sid = evlist__id2sid(evlist, id);
622 return container_of(sid->evsel, struct evsel, core);
627 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
629 const __u64 *array = event->sample.array;
632 n = (event->header.size - sizeof(event->header)) >> 3;
634 if (event->header.type == PERF_RECORD_SAMPLE) {
635 if (evlist->id_pos >= n)
637 *id = array[evlist->id_pos];
639 if (evlist->is_pos > n)
647 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
649 struct evsel *first = evlist__first(evlist);
650 struct hlist_head *head;
651 struct perf_sample_id *sid;
655 if (evlist->core.nr_entries == 1)
658 if (!first->core.attr.sample_id_all &&
659 event->header.type != PERF_RECORD_SAMPLE)
662 if (evlist__event2id(evlist, event, &id))
665 /* Synthesized events have an id of zero */
669 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
670 head = &evlist->core.heads[hash];
672 hlist_for_each_entry(sid, head, node) {
674 return container_of(sid->evsel, struct evsel, core);
679 static int evlist__set_paused(struct evlist *evlist, bool value)
683 if (!evlist->overwrite_mmap)
686 for (i = 0; i < evlist->core.nr_mmaps; i++) {
687 int fd = evlist->overwrite_mmap[i].core.fd;
692 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
699 static int evlist__pause(struct evlist *evlist)
701 return evlist__set_paused(evlist, true);
704 static int evlist__resume(struct evlist *evlist)
706 return evlist__set_paused(evlist, false);
709 static void evlist__munmap_nofree(struct evlist *evlist)
714 for (i = 0; i < evlist->core.nr_mmaps; i++)
715 perf_mmap__munmap(&evlist->mmap[i].core);
717 if (evlist->overwrite_mmap)
718 for (i = 0; i < evlist->core.nr_mmaps; i++)
719 perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
722 void evlist__munmap(struct evlist *evlist)
724 evlist__munmap_nofree(evlist);
725 zfree(&evlist->mmap);
726 zfree(&evlist->overwrite_mmap);
729 static void perf_mmap__unmap_cb(struct perf_mmap *map)
731 struct mmap *m = container_of(map, struct mmap, core);
736 static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
742 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
746 for (i = 0; i < evlist->core.nr_mmaps; i++) {
747 struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
750 * When the perf_mmap() call is made we grab one refcount, plus
751 * one extra to let perf_mmap__consume() get the last
752 * events after all real references (perf_mmap__get()) are
755 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
756 * thus does perf_mmap__get() on it.
758 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
765 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
766 struct perf_mmap_param *_mp,
767 int idx, bool per_cpu)
769 struct evlist *evlist = container_of(_evlist, struct evlist, core);
770 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
772 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
775 static struct perf_mmap*
776 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
778 struct evlist *evlist = container_of(_evlist, struct evlist, core);
781 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
784 maps = evlist__alloc_mmap(evlist, overwrite);
789 evlist->overwrite_mmap = maps;
790 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
791 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
797 return &maps[idx].core;
801 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
804 struct mmap *map = container_of(_map, struct mmap, core);
805 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
807 return mmap__mmap(map, mp, output, cpu);
810 unsigned long perf_event_mlock_kb_in_pages(void)
815 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
817 * Pick a once upon a time good value, i.e. things look
818 * strange since we can't read a sysctl value, but lets not
823 max -= (page_size / 1024);
826 pages = (max * 1024) / page_size;
827 if (!is_power_of_2(pages))
828 pages = rounddown_pow_of_two(pages);
833 size_t evlist__mmap_size(unsigned long pages)
835 if (pages == UINT_MAX)
836 pages = perf_event_mlock_kb_in_pages();
837 else if (!is_power_of_2(pages))
840 return (pages + 1) * page_size;
843 static long parse_pages_arg(const char *str, unsigned long min,
846 unsigned long pages, val;
847 static struct parse_tag tags[] = {
848 { .tag = 'B', .mult = 1 },
849 { .tag = 'K', .mult = 1 << 10 },
850 { .tag = 'M', .mult = 1 << 20 },
851 { .tag = 'G', .mult = 1 << 30 },
858 val = parse_tag_value(str, tags);
859 if (val != (unsigned long) -1) {
860 /* we got file size value */
861 pages = PERF_ALIGN(val, page_size) / page_size;
863 /* we got pages count value */
865 pages = strtoul(str, &eptr, 10);
870 if (pages == 0 && min == 0) {
871 /* leave number of pages at 0 */
872 } else if (!is_power_of_2(pages)) {
875 /* round pages up to next power of 2 */
876 pages = roundup_pow_of_two(pages);
880 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
881 pr_info("rounding mmap pages size to %s (%lu pages)\n",
891 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
893 unsigned long max = UINT_MAX;
896 if (max > SIZE_MAX / page_size)
897 max = SIZE_MAX / page_size;
899 pages = parse_pages_arg(str, 1, max);
901 pr_err("Invalid argument for --mmap_pages/-m\n");
909 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
911 return __evlist__parse_mmap_pages(opt->value, str);
915 * evlist__mmap_ex - Create mmaps to receive events.
916 * @evlist: list of events
917 * @pages: map length in pages
918 * @overwrite: overwrite older events?
919 * @auxtrace_pages - auxtrace map length in pages
920 * @auxtrace_overwrite - overwrite older auxtrace data?
922 * If @overwrite is %false the user needs to signal event consumption using
923 * perf_mmap__write_tail(). Using evlist__mmap_read() does this
926 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
927 * consumption using auxtrace_mmap__write_tail().
929 * Return: %0 on success, negative error code otherwise.
931 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
932 unsigned int auxtrace_pages,
933 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
937 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
938 * Its value is decided by evsel's write_backward.
939 * So &mp should not be passed through const pointer.
941 struct mmap_params mp = {
942 .nr_cblocks = nr_cblocks,
943 .affinity = affinity,
945 .comp_level = comp_level
947 struct perf_evlist_mmap_ops ops = {
948 .idx = perf_evlist__mmap_cb_idx,
949 .get = perf_evlist__mmap_cb_get,
950 .mmap = perf_evlist__mmap_cb_mmap,
953 evlist->core.mmap_len = evlist__mmap_size(pages);
954 pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
956 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
957 auxtrace_pages, auxtrace_overwrite);
959 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
962 int evlist__mmap(struct evlist *evlist, unsigned int pages)
964 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
967 int evlist__create_maps(struct evlist *evlist, struct target *target)
969 bool all_threads = (target->per_thread && target->system_wide);
970 struct perf_cpu_map *cpus;
971 struct perf_thread_map *threads;
974 * If specify '-a' and '--per-thread' to perf record, perf record
975 * will override '--per-thread'. target->per_thread = false and
976 * target->system_wide = true.
978 * If specify '--per-thread' only to perf record,
979 * target->per_thread = true and target->system_wide = false.
981 * So target->per_thread && target->system_wide is false.
982 * For perf record, thread_map__new_str doesn't call
983 * thread_map__new_all_cpus. That will keep perf record's
986 * For perf stat, it allows the case that target->per_thread and
987 * target->system_wide are all true. It means to collect system-wide
988 * per-thread data. thread_map__new_str will call
989 * thread_map__new_all_cpus to enumerate all threads.
991 threads = thread_map__new_str(target->pid, target->tid, target->uid,
997 if (target__uses_dummy_map(target))
998 cpus = perf_cpu_map__dummy_new();
1000 cpus = perf_cpu_map__new(target->cpu_list);
1003 goto out_delete_threads;
1005 evlist->core.has_user_cpus = !!target->cpu_list;
1007 perf_evlist__set_maps(&evlist->core, cpus, threads);
1009 /* as evlist now has references, put count here */
1010 perf_cpu_map__put(cpus);
1011 perf_thread_map__put(threads);
1016 perf_thread_map__put(threads);
1020 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
1022 struct evsel *evsel;
1025 evlist__for_each_entry(evlist, evsel) {
1026 if (evsel->filter == NULL)
1030 * filters only work for tracepoint event, which doesn't have cpu limit.
1031 * So evlist and evsel should always be same.
1033 err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
1043 int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
1045 struct evsel *evsel;
1051 evlist__for_each_entry(evlist, evsel) {
1052 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1055 err = evsel__set_filter(evsel, filter);
1063 int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1065 struct evsel *evsel;
1071 evlist__for_each_entry(evlist, evsel) {
1072 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1075 err = evsel__append_tp_filter(evsel, filter);
1083 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
1088 for (i = 0; i < npids; ++i) {
1090 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1095 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1109 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1111 char *filter = asprintf__tp_filter_pids(npids, pids);
1112 int ret = evlist__set_tp_filter(evlist, filter);
1118 int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1120 return evlist__set_tp_filter_pids(evlist, 1, &pid);
1123 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1125 char *filter = asprintf__tp_filter_pids(npids, pids);
1126 int ret = evlist__append_tp_filter(evlist, filter);
1132 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1134 return evlist__append_tp_filter_pids(evlist, 1, &pid);
1137 bool evlist__valid_sample_type(struct evlist *evlist)
1141 if (evlist->core.nr_entries == 1)
1144 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1147 evlist__for_each_entry(evlist, pos) {
1148 if (pos->id_pos != evlist->id_pos ||
1149 pos->is_pos != evlist->is_pos)
1156 u64 __evlist__combined_sample_type(struct evlist *evlist)
1158 struct evsel *evsel;
1160 if (evlist->combined_sample_type)
1161 return evlist->combined_sample_type;
1163 evlist__for_each_entry(evlist, evsel)
1164 evlist->combined_sample_type |= evsel->core.attr.sample_type;
1166 return evlist->combined_sample_type;
1169 u64 evlist__combined_sample_type(struct evlist *evlist)
1171 evlist->combined_sample_type = 0;
1172 return __evlist__combined_sample_type(evlist);
1175 u64 evlist__combined_branch_type(struct evlist *evlist)
1177 struct evsel *evsel;
1178 u64 branch_type = 0;
1180 evlist__for_each_entry(evlist, evsel)
1181 branch_type |= evsel->core.attr.branch_sample_type;
1185 bool evlist__valid_read_format(struct evlist *evlist)
1187 struct evsel *first = evlist__first(evlist), *pos = first;
1188 u64 read_format = first->core.attr.read_format;
1189 u64 sample_type = first->core.attr.sample_type;
1191 evlist__for_each_entry(evlist, pos) {
1192 if (read_format != pos->core.attr.read_format) {
1193 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1194 read_format, (u64)pos->core.attr.read_format);
1198 /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
1199 if ((sample_type & PERF_SAMPLE_READ) &&
1200 !(read_format & PERF_FORMAT_ID)) {
1207 u16 evlist__id_hdr_size(struct evlist *evlist)
1209 struct evsel *first = evlist__first(evlist);
1210 struct perf_sample *data;
1214 if (!first->core.attr.sample_id_all)
1217 sample_type = first->core.attr.sample_type;
1219 if (sample_type & PERF_SAMPLE_TID)
1220 size += sizeof(data->tid) * 2;
1222 if (sample_type & PERF_SAMPLE_TIME)
1223 size += sizeof(data->time);
1225 if (sample_type & PERF_SAMPLE_ID)
1226 size += sizeof(data->id);
1228 if (sample_type & PERF_SAMPLE_STREAM_ID)
1229 size += sizeof(data->stream_id);
1231 if (sample_type & PERF_SAMPLE_CPU)
1232 size += sizeof(data->cpu) * 2;
1234 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1235 size += sizeof(data->id);
1240 bool evlist__valid_sample_id_all(struct evlist *evlist)
1242 struct evsel *first = evlist__first(evlist), *pos = first;
1244 evlist__for_each_entry_continue(evlist, pos) {
1245 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1252 bool evlist__sample_id_all(struct evlist *evlist)
1254 struct evsel *first = evlist__first(evlist);
1255 return first->core.attr.sample_id_all;
1258 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
1260 evlist->selected = evsel;
1263 void evlist__close(struct evlist *evlist)
1265 struct evsel *evsel;
1266 struct affinity affinity;
1270 * With perf record core.cpus is usually NULL.
1271 * Use the old method to handle this for now.
1273 if (!evlist->core.cpus) {
1274 evlist__for_each_entry_reverse(evlist, evsel)
1275 evsel__close(evsel);
1279 if (affinity__setup(&affinity) < 0)
1281 evlist__for_each_cpu(evlist, i, cpu) {
1282 affinity__set(&affinity, cpu);
1284 evlist__for_each_entry_reverse(evlist, evsel) {
1285 if (evsel__cpu_iter_skip(evsel, cpu))
1287 perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
1290 affinity__cleanup(&affinity);
1291 evlist__for_each_entry_reverse(evlist, evsel) {
1292 perf_evsel__free_fd(&evsel->core);
1293 perf_evsel__free_id(&evsel->core);
1295 perf_evlist__reset_id_hash(&evlist->core);
1298 static int evlist__create_syswide_maps(struct evlist *evlist)
1300 struct perf_cpu_map *cpus;
1301 struct perf_thread_map *threads;
1305 * Try reading /sys/devices/system/cpu/online to get
1308 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1309 * code needs an overhaul to properly forward the
1310 * error, and we may not want to do that fallback to a
1311 * default cpu identity map :-\
1313 cpus = perf_cpu_map__new(NULL);
1317 threads = perf_thread_map__new_dummy();
1321 perf_evlist__set_maps(&evlist->core, cpus, threads);
1323 perf_thread_map__put(threads);
1325 perf_cpu_map__put(cpus);
1330 int evlist__open(struct evlist *evlist)
1332 struct evsel *evsel;
1336 * Default: one fd per CPU, all threads, aka systemwide
1337 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1339 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
1340 err = evlist__create_syswide_maps(evlist);
1345 evlist__update_id_pos(evlist);
1347 evlist__for_each_entry(evlist, evsel) {
1348 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1355 evlist__close(evlist);
1360 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
1361 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1363 int child_ready_pipe[2], go_pipe[2];
1366 if (pipe(child_ready_pipe) < 0) {
1367 perror("failed to create 'ready' pipe");
1371 if (pipe(go_pipe) < 0) {
1372 perror("failed to create 'go' pipe");
1373 goto out_close_ready_pipe;
1376 evlist->workload.pid = fork();
1377 if (evlist->workload.pid < 0) {
1378 perror("failed to fork");
1379 goto out_close_pipes;
1382 if (!evlist->workload.pid) {
1388 signal(SIGTERM, SIG_DFL);
1390 close(child_ready_pipe[0]);
1392 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1395 * Change the name of this process not to confuse --exclude-perf users
1396 * that sees 'perf' in the window up to the execvp() and thinks that
1397 * perf samples are not being excluded.
1399 prctl(PR_SET_NAME, "perf-exec");
1402 * Tell the parent we're ready to go
1404 close(child_ready_pipe[1]);
1407 * Wait until the parent tells us to go.
1409 ret = read(go_pipe[0], &bf, 1);
1411 * The parent will ask for the execvp() to be performed by
1412 * writing exactly one byte, in workload.cork_fd, usually via
1413 * evlist__start_workload().
1415 * For cancelling the workload without actually running it,
1416 * the parent will just close workload.cork_fd, without writing
1417 * anything, i.e. read will return zero and we just exit()
1422 perror("unable to read pipe");
1426 execvp(argv[0], (char **)argv);
1431 val.sival_int = errno;
1432 if (sigqueue(getppid(), SIGUSR1, val))
1440 struct sigaction act = {
1441 .sa_flags = SA_SIGINFO,
1442 .sa_sigaction = exec_error,
1444 sigaction(SIGUSR1, &act, NULL);
1447 if (target__none(target)) {
1448 if (evlist->core.threads == NULL) {
1449 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1450 __func__, __LINE__);
1451 goto out_close_pipes;
1453 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1456 close(child_ready_pipe[1]);
1459 * wait for child to settle
1461 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1462 perror("unable to read pipe");
1463 goto out_close_pipes;
1466 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1467 evlist->workload.cork_fd = go_pipe[1];
1468 close(child_ready_pipe[0]);
1474 out_close_ready_pipe:
1475 close(child_ready_pipe[0]);
1476 close(child_ready_pipe[1]);
1480 int evlist__start_workload(struct evlist *evlist)
1482 if (evlist->workload.cork_fd > 0) {
1486 * Remove the cork, let it rip!
1488 ret = write(evlist->workload.cork_fd, &bf, 1);
1490 perror("unable to write to pipe");
1492 close(evlist->workload.cork_fd);
1499 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1501 struct evsel *evsel = evlist__event2evsel(evlist, event);
1505 return evsel__parse_sample(evsel, event, sample);
1508 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
1510 struct evsel *evsel = evlist__event2evsel(evlist, event);
1514 return evsel__parse_sample_timestamp(evsel, event, timestamp);
1517 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
1520 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1525 printed = scnprintf(buf, size,
1527 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1529 value = perf_event_paranoid();
1531 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1534 printed += scnprintf(buf + printed, size - printed,
1535 "For your workloads it needs to be <= 1\nHint:\t");
1537 printed += scnprintf(buf + printed, size - printed,
1538 "For system wide tracing it needs to be set to -1.\n");
1540 printed += scnprintf(buf + printed, size - printed,
1541 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1542 "Hint:\tThe current value is %d.", value);
1545 struct evsel *first = evlist__first(evlist);
1548 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1551 if (first->core.attr.sample_freq < (u64)max_freq)
1554 printed = scnprintf(buf, size,
1556 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1557 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1558 emsg, max_freq, first->core.attr.sample_freq);
1563 scnprintf(buf, size, "%s", emsg);
1570 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1572 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1573 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1577 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1578 printed += scnprintf(buf + printed, size - printed,
1580 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1581 "Hint:\tTried using %zd kB.\n",
1582 emsg, pages_max_per_user, pages_attempted);
1584 if (pages_attempted >= pages_max_per_user) {
1585 printed += scnprintf(buf + printed, size - printed,
1586 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1587 pages_max_per_user + pages_attempted);
1590 printed += scnprintf(buf + printed, size - printed,
1591 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1594 scnprintf(buf, size, "%s", emsg);
1601 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
1603 struct evsel *evsel, *n;
1606 if (move_evsel == evlist__first(evlist))
1609 evlist__for_each_entry_safe(evlist, n, evsel) {
1610 if (evsel__leader(evsel) == evsel__leader(move_evsel))
1611 list_move_tail(&evsel->core.node, &move);
1614 list_splice(&move, &evlist->core.entries);
1617 struct evsel *evlist__get_tracking_event(struct evlist *evlist)
1619 struct evsel *evsel;
1621 evlist__for_each_entry(evlist, evsel) {
1622 if (evsel->tracking)
1626 return evlist__first(evlist);
1629 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
1631 struct evsel *evsel;
1633 if (tracking_evsel->tracking)
1636 evlist__for_each_entry(evlist, evsel) {
1637 if (evsel != tracking_evsel)
1638 evsel->tracking = false;
1641 tracking_evsel->tracking = true;
1644 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
1646 struct evsel *evsel;
1648 evlist__for_each_entry(evlist, evsel) {
1651 if (strcmp(str, evsel->name) == 0)
1658 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
1660 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1667 if (!evlist->overwrite_mmap)
1670 switch (old_state) {
1671 case BKW_MMAP_NOTREADY: {
1672 if (state != BKW_MMAP_RUNNING)
1676 case BKW_MMAP_RUNNING: {
1677 if (state != BKW_MMAP_DATA_PENDING)
1682 case BKW_MMAP_DATA_PENDING: {
1683 if (state != BKW_MMAP_EMPTY)
1687 case BKW_MMAP_EMPTY: {
1688 if (state != BKW_MMAP_RUNNING)
1694 WARN_ONCE(1, "Shouldn't get there\n");
1697 evlist->bkw_mmap_state = state;
1701 evlist__pause(evlist);
1704 evlist__resume(evlist);
1715 bool evlist__exclude_kernel(struct evlist *evlist)
1717 struct evsel *evsel;
1719 evlist__for_each_entry(evlist, evsel) {
1720 if (!evsel->core.attr.exclude_kernel)
1728 * Events in data file are not collect in groups, but we still want
1729 * the group display. Set the artificial group and set the leader's
1730 * forced_leader flag to notify the display code.
1732 void evlist__force_leader(struct evlist *evlist)
1734 if (!evlist->core.nr_groups) {
1735 struct evsel *leader = evlist__first(evlist);
1737 evlist__set_leader(evlist);
1738 leader->forced_leader = true;
1742 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
1744 struct evsel *c2, *leader;
1745 bool is_open = true;
1747 leader = evsel__leader(evsel);
1749 pr_debug("Weak group for %s/%d failed\n",
1750 leader->name, leader->core.nr_members);
1753 * for_each_group_member doesn't work here because it doesn't
1754 * include the first entry.
1756 evlist__for_each_entry(evsel_list, c2) {
1759 if (evsel__has_leader(c2, leader)) {
1760 if (is_open && close)
1761 perf_evsel__close(&c2->core);
1762 evsel__set_leader(c2, c2);
1763 c2->core.nr_members = 0;
1765 * Set this for all former members of the group
1766 * to indicate they get reopened.
1768 c2->reset_group = true;
1774 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1779 if (strncmp(str, "fifo:", 5))
1783 if (!*str || *str == ',')
1795 * O_RDWR avoids POLLHUPs which is necessary to allow the other
1796 * end of a FIFO to be repeatedly opened and closed.
1798 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1800 pr_err("Failed to open '%s'\n", s);
1805 *ctl_fd_close = true;
1808 /* O_RDWR | O_NONBLOCK means the other end need not be open */
1809 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1811 pr_err("Failed to open '%s'\n", p);
1823 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1825 char *comma = NULL, *endptr = NULL;
1827 *ctl_fd_close = false;
1829 if (strncmp(str, "fd:", 3))
1830 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
1832 *ctl_fd = strtoul(&str[3], &endptr, 0);
1833 if (endptr == &str[3])
1836 comma = strchr(str, ',');
1838 if (endptr != comma)
1841 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
1842 if (endptr == comma + 1 || *endptr != '\0')
1849 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
1851 if (*ctl_fd_close) {
1852 *ctl_fd_close = false;
1854 if (ctl_fd_ack >= 0)
1859 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
1862 pr_debug("Control descriptor is not initialized\n");
1866 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
1867 fdarray_flag__nonfilterable);
1868 if (evlist->ctl_fd.pos < 0) {
1869 evlist->ctl_fd.pos = -1;
1870 pr_err("Failed to add ctl fd entry: %m\n");
1874 evlist->ctl_fd.fd = fd;
1875 evlist->ctl_fd.ack = ack;
1880 bool evlist__ctlfd_initialized(struct evlist *evlist)
1882 return evlist->ctl_fd.pos >= 0;
1885 int evlist__finalize_ctlfd(struct evlist *evlist)
1887 struct pollfd *entries = evlist->core.pollfd.entries;
1889 if (!evlist__ctlfd_initialized(evlist))
1892 entries[evlist->ctl_fd.pos].fd = -1;
1893 entries[evlist->ctl_fd.pos].events = 0;
1894 entries[evlist->ctl_fd.pos].revents = 0;
1896 evlist->ctl_fd.pos = -1;
1897 evlist->ctl_fd.ack = -1;
1898 evlist->ctl_fd.fd = -1;
1903 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
1904 char *cmd_data, size_t data_size)
1908 size_t bytes_read = 0;
1910 *cmd = EVLIST_CTL_CMD_UNSUPPORTED;
1911 memset(cmd_data, 0, data_size);
1915 err = read(evlist->ctl_fd.fd, &c, 1);
1917 if (c == '\n' || c == '\0')
1919 cmd_data[bytes_read++] = c;
1920 if (bytes_read == data_size)
1923 } else if (err == -1) {
1926 if (errno == EAGAIN || errno == EWOULDBLOCK)
1929 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
1934 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
1935 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
1937 if (bytes_read > 0) {
1938 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
1939 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
1940 *cmd = EVLIST_CTL_CMD_ENABLE;
1941 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
1942 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
1943 *cmd = EVLIST_CTL_CMD_DISABLE;
1944 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
1945 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
1946 *cmd = EVLIST_CTL_CMD_SNAPSHOT;
1947 pr_debug("is snapshot\n");
1948 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
1949 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
1950 *cmd = EVLIST_CTL_CMD_EVLIST;
1951 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
1952 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
1953 *cmd = EVLIST_CTL_CMD_STOP;
1954 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
1955 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
1956 *cmd = EVLIST_CTL_CMD_PING;
1960 return bytes_read ? (int)bytes_read : err;
1963 int evlist__ctlfd_ack(struct evlist *evlist)
1967 if (evlist->ctl_fd.ack == -1)
1970 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
1971 sizeof(EVLIST_CTL_CMD_ACK_TAG));
1973 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
1978 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
1980 char *data = cmd_data + cmd_size;
1986 /* there's argument */
1996 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
1998 struct evsel *evsel;
2002 err = get_cmd_arg(cmd_data,
2003 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
2004 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
2007 pr_info("failed: wrong command\n");
2012 evsel = evlist__find_evsel_by_str(evlist, name);
2015 evlist__enable_evsel(evlist, name);
2017 evlist__disable_evsel(evlist, name);
2018 pr_info("Event %s %s\n", evsel->name,
2019 enable ? "enabled" : "disabled");
2021 pr_info("failed: can't find '%s' event\n", name);
2025 evlist__enable(evlist);
2026 pr_info(EVLIST_ENABLED_MSG);
2028 evlist__disable(evlist);
2029 pr_info(EVLIST_DISABLED_MSG);
2036 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
2038 struct perf_attr_details details = { .verbose = false, };
2039 struct evsel *evsel;
2043 err = get_cmd_arg(cmd_data,
2044 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
2047 pr_info("failed: wrong command\n");
2052 if (!strcmp(arg, "-v")) {
2053 details.verbose = true;
2054 } else if (!strcmp(arg, "-g")) {
2055 details.event_group = true;
2056 } else if (!strcmp(arg, "-F")) {
2057 details.freq = true;
2059 pr_info("failed: wrong command\n");
2064 evlist__for_each_entry(evlist, evsel)
2065 evsel__fprintf(evsel, &details, stderr);
2070 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
2073 char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
2074 int ctlfd_pos = evlist->ctl_fd.pos;
2075 struct pollfd *entries = evlist->core.pollfd.entries;
2077 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
2080 if (entries[ctlfd_pos].revents & POLLIN) {
2081 err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
2082 EVLIST_CTL_CMD_MAX_LEN);
2085 case EVLIST_CTL_CMD_ENABLE:
2086 case EVLIST_CTL_CMD_DISABLE:
2087 err = evlist__ctlfd_enable(evlist, cmd_data,
2088 *cmd == EVLIST_CTL_CMD_ENABLE);
2090 case EVLIST_CTL_CMD_EVLIST:
2091 err = evlist__ctlfd_list(evlist, cmd_data);
2093 case EVLIST_CTL_CMD_SNAPSHOT:
2094 case EVLIST_CTL_CMD_STOP:
2095 case EVLIST_CTL_CMD_PING:
2097 case EVLIST_CTL_CMD_ACK:
2098 case EVLIST_CTL_CMD_UNSUPPORTED:
2100 pr_debug("ctlfd: unsupported %d\n", *cmd);
2103 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
2104 *cmd == EVLIST_CTL_CMD_SNAPSHOT))
2105 evlist__ctlfd_ack(evlist);
2109 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
2110 evlist__finalize_ctlfd(evlist);
2112 entries[ctlfd_pos].revents = 0;
2117 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
2119 struct evsel *evsel;
2121 evlist__for_each_entry(evlist, evsel) {
2122 if (evsel->core.idx == idx)
2128 int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
2130 struct evsel *evsel;
2133 evlist__for_each_entry(evlist, evsel) {
2134 if (evsel__is_dummy_event(evsel))
2136 if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
2137 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
2139 printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
2147 void evlist__check_mem_load_aux(struct evlist *evlist)
2149 struct evsel *leader, *evsel, *pos;
2152 * For some platforms, the 'mem-loads' event is required to use
2153 * together with 'mem-loads-aux' within a group and 'mem-loads-aux'
2154 * must be the group leader. Now we disable this group before reporting
2155 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
2156 * any valid memory load information.
2158 evlist__for_each_entry(evlist, evsel) {
2159 leader = evsel__leader(evsel);
2160 if (leader == evsel)
2163 if (leader->name && strstr(leader->name, "mem-loads-aux")) {
2164 for_each_group_evsel(pos, leader) {
2165 evsel__set_leader(pos, pos);
2166 pos->core.nr_members = 0;