1 // SPDX-License-Identifier: GPL-2.0
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/threadmap.h>
15 #include <internal/lib.h>
16 #include <linux/string.h>
17 #include <sys/ioctl.h>
19 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
21 INIT_LIST_HEAD(&evsel->node);
25 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
27 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
30 perf_evsel__init(evsel, attr);
35 void perf_evsel__delete(struct perf_evsel *evsel)
40 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
42 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
44 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
48 for (cpu = 0; cpu < ncpus; cpu++) {
49 for (thread = 0; thread < nthreads; thread++) {
50 FD(evsel, cpu, thread) = -1;
55 return evsel->fd != NULL ? 0 : -ENOMEM;
59 sys_perf_event_open(struct perf_event_attr *attr,
60 pid_t pid, int cpu, int group_fd,
63 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
66 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
67 struct perf_thread_map *threads)
69 int cpu, thread, err = 0;
72 static struct perf_cpu_map *empty_cpu_map;
74 if (empty_cpu_map == NULL) {
75 empty_cpu_map = perf_cpu_map__dummy_new();
76 if (empty_cpu_map == NULL)
83 if (threads == NULL) {
84 static struct perf_thread_map *empty_thread_map;
86 if (empty_thread_map == NULL) {
87 empty_thread_map = perf_thread_map__new_dummy();
88 if (empty_thread_map == NULL)
92 threads = empty_thread_map;
95 if (evsel->fd == NULL &&
96 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
99 for (cpu = 0; cpu < cpus->nr; cpu++) {
100 for (thread = 0; thread < threads->nr; thread++) {
103 fd = sys_perf_event_open(&evsel->attr,
104 threads->map[thread].pid,
105 cpus->map[cpu], -1, 0);
110 FD(evsel, cpu, thread) = fd;
117 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
121 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
122 if (FD(evsel, cpu, thread) >= 0)
123 close(FD(evsel, cpu, thread));
124 FD(evsel, cpu, thread) = -1;
128 void perf_evsel__close_fd(struct perf_evsel *evsel)
132 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
133 perf_evsel__close_fd_cpu(evsel, cpu);
136 void perf_evsel__free_fd(struct perf_evsel *evsel)
138 xyarray__delete(evsel->fd);
142 void perf_evsel__close(struct perf_evsel *evsel)
144 if (evsel->fd == NULL)
147 perf_evsel__close_fd(evsel);
148 perf_evsel__free_fd(evsel);
151 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
153 if (evsel->fd == NULL)
156 perf_evsel__close_fd_cpu(evsel, cpu);
159 int perf_evsel__read_size(struct perf_evsel *evsel)
161 u64 read_format = evsel->attr.read_format;
162 int entry = sizeof(u64); /* value */
166 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
169 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
172 if (read_format & PERF_FORMAT_ID)
173 entry += sizeof(u64);
175 if (read_format & PERF_FORMAT_GROUP) {
176 nr = evsel->nr_members;
184 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
185 struct perf_counts_values *count)
187 size_t size = perf_evsel__read_size(evsel);
189 memset(count, 0, sizeof(*count));
191 if (FD(evsel, cpu, thread) < 0)
194 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
200 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
206 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
207 int fd = FD(evsel, cpu, thread),
208 err = ioctl(fd, ioc, arg);
217 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
219 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
222 int perf_evsel__enable(struct perf_evsel *evsel)
227 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
228 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
232 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
234 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
237 int perf_evsel__disable(struct perf_evsel *evsel)
242 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
243 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
247 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
251 for (i = 0; i < evsel->cpus->nr && !err; i++)
252 err = perf_evsel__run_ioctl(evsel,
253 PERF_EVENT_IOC_SET_FILTER,
258 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
263 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
265 return evsel->threads;
268 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
273 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
275 if (ncpus == 0 || nthreads == 0)
278 if (evsel->system_wide)
281 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
282 if (evsel->sample_id == NULL)
285 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
286 if (evsel->id == NULL) {
287 xyarray__delete(evsel->sample_id);
288 evsel->sample_id = NULL;
295 void perf_evsel__free_id(struct perf_evsel *evsel)
297 xyarray__delete(evsel->sample_id);
298 evsel->sample_id = NULL;