1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
14 #include "util/strlist.h"
17 #include "bpf_skel/off_cpu.skel.h"
20 /* we don't need actual timestamp, just want to put the samples at last */
21 #define OFF_CPU_TIMESTAMP (~0ull << 32)
23 static struct off_cpu_bpf *skel;
34 struct perf_event_header hdr;
35 u64 array[1024 / sizeof(u64)];
38 static int off_cpu_config(struct evlist *evlist)
41 struct perf_event_attr attr = {
42 .type = PERF_TYPE_SOFTWARE,
43 .config = PERF_COUNT_SW_BPF_OUTPUT,
44 .size = sizeof(attr), /* to capture ABI version */
46 char *evname = strdup(OFFCPU_EVENT);
51 evsel = evsel__new(&attr);
57 evsel->core.attr.freq = 1;
58 evsel->core.attr.sample_period = 1;
59 /* off-cpu analysis depends on stack trace */
60 evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
62 evlist__add(evlist, evsel);
70 static void off_cpu_start(void *arg)
72 struct evlist *evlist = arg;
74 /* update task filter for the given workload */
75 if (!skel->bss->has_cpu && !skel->bss->has_task &&
76 perf_thread_map__pid(evlist->core.threads, 0) != -1) {
81 skel->bss->has_task = 1;
82 skel->bss->uses_tgid = 1;
83 fd = bpf_map__fd(skel->maps.task_filter);
84 pid = perf_thread_map__pid(evlist->core.threads, 0);
85 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
88 skel->bss->enabled = 1;
91 static void off_cpu_finish(void *arg __maybe_unused)
93 skel->bss->enabled = 0;
94 off_cpu_bpf__destroy(skel);
97 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
98 static void check_sched_switch_args(void)
100 const struct btf *btf = bpf_object__btf(skel->obj);
101 const struct btf_type *t1, *t2, *t3;
104 type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
106 if ((s32)type_id < 0)
109 t1 = btf__type_by_id(btf, type_id);
113 t2 = btf__type_by_id(btf, t1->type);
114 if (t2 == NULL || !btf_is_ptr(t2))
117 t3 = btf__type_by_id(btf, t2->type);
118 if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
119 /* new format: pass prev_state as 4th arg */
120 skel->rodata->has_prev_state = true;
124 int off_cpu_prepare(struct evlist *evlist, struct target *target,
125 struct record_opts *opts)
128 int ncpus = 1, ntasks = 1, ncgrps = 1;
129 struct strlist *pid_slist = NULL;
130 struct str_node *pos;
132 if (off_cpu_config(evlist) < 0) {
133 pr_err("Failed to config off-cpu BPF event\n");
137 skel = off_cpu_bpf__open();
139 pr_err("Failed to open off-cpu BPF skeleton\n");
143 /* don't need to set cpu filter for system-wide mode */
144 if (target->cpu_list) {
145 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
146 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
150 pid_slist = strlist__new(target->pid, NULL);
152 pr_err("Failed to create a strlist for pid\n");
157 strlist__for_each_entry(pos, pid_slist) {
159 int pid = strtol(pos->s, &end_ptr, 10);
161 if (pid == INT_MIN || pid == INT_MAX ||
162 (*end_ptr != '\0' && *end_ptr != ','))
167 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
168 } else if (target__has_task(target)) {
169 ntasks = perf_thread_map__nr(evlist->core.threads);
170 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
173 if (evlist__first(evlist)->cgrp) {
174 ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
175 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
177 if (!cgroup_is_v2("perf_event"))
178 skel->rodata->uses_cgroup_v1 = true;
181 if (opts->record_cgroup) {
182 skel->rodata->needs_cgroup = true;
184 if (!cgroup_is_v2("perf_event"))
185 skel->rodata->uses_cgroup_v1 = true;
189 check_sched_switch_args();
191 err = off_cpu_bpf__load(skel);
193 pr_err("Failed to load off-cpu skeleton\n");
197 if (target->cpu_list) {
201 skel->bss->has_cpu = 1;
202 fd = bpf_map__fd(skel->maps.cpu_filter);
204 for (i = 0; i < ncpus; i++) {
205 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
206 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
213 skel->bss->has_task = 1;
214 skel->bss->uses_tgid = 1;
215 fd = bpf_map__fd(skel->maps.task_filter);
217 strlist__for_each_entry(pos, pid_slist) {
220 int pid = strtol(pos->s, &end_ptr, 10);
222 if (pid == INT_MIN || pid == INT_MAX ||
223 (*end_ptr != '\0' && *end_ptr != ','))
227 bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
229 } else if (target__has_task(target)) {
233 skel->bss->has_task = 1;
234 fd = bpf_map__fd(skel->maps.task_filter);
236 for (i = 0; i < ntasks; i++) {
237 pid = perf_thread_map__pid(evlist->core.threads, i);
238 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
242 if (evlist__first(evlist)->cgrp) {
246 skel->bss->has_cgroup = 1;
247 fd = bpf_map__fd(skel->maps.cgroup_filter);
249 evlist__for_each_entry(evlist, evsel) {
250 struct cgroup *cgrp = evsel->cgrp;
255 if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
256 pr_err("Failed to read cgroup id of %s\n",
261 bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
265 err = off_cpu_bpf__attach(skel);
267 pr_err("Failed to attach off-cpu BPF skeleton\n");
271 if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
272 perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
273 pr_err("Failed to attach off-cpu skeleton\n");
280 off_cpu_bpf__destroy(skel);
284 int off_cpu_write(struct perf_session *session)
288 u64 sample_type, val, sid = 0;
290 struct perf_data_file *file = &session->data->file;
291 struct off_cpu_key prev, key;
292 union off_cpu_data data = {
294 .type = PERF_RECORD_SAMPLE,
295 .misc = PERF_RECORD_MISC_USER,
298 u64 tstamp = OFF_CPU_TIMESTAMP;
300 skel->bss->enabled = 0;
302 evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
304 pr_err("%s evsel not found\n", OFFCPU_EVENT);
308 sample_type = evsel->core.attr.sample_type;
310 if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
311 pr_err("not supported sample type: %llx\n",
312 (unsigned long long)sample_type);
316 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
318 sid = evsel->core.id[0];
321 fd = bpf_map__fd(skel->maps.off_cpu);
322 stack = bpf_map__fd(skel->maps.stacks);
323 memset(&prev, 0, sizeof(prev));
325 while (!bpf_map_get_next_key(fd, &prev, &key)) {
326 int n = 1; /* start from perf_event_header */
329 bpf_map_lookup_elem(fd, &key, &val);
331 if (sample_type & PERF_SAMPLE_IDENTIFIER)
332 data.array[n++] = sid;
333 if (sample_type & PERF_SAMPLE_IP) {
335 data.array[n++] = 0; /* will be updated */
337 if (sample_type & PERF_SAMPLE_TID)
338 data.array[n++] = (u64)key.pid << 32 | key.tgid;
339 if (sample_type & PERF_SAMPLE_TIME)
340 data.array[n++] = tstamp;
341 if (sample_type & PERF_SAMPLE_ID)
342 data.array[n++] = sid;
343 if (sample_type & PERF_SAMPLE_CPU)
345 if (sample_type & PERF_SAMPLE_PERIOD)
346 data.array[n++] = val;
347 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
350 /* data.array[n] is callchain->nr (updated later) */
351 data.array[n + 1] = PERF_CONTEXT_USER;
352 data.array[n + 2] = 0;
354 bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
355 while (data.array[n + 2 + len])
358 /* update length of callchain */
359 data.array[n] = len + 1;
361 /* update sample ip with the first callchain entry */
363 data.array[ip_pos] = data.array[n + 2];
365 /* calculate sample callchain data array length */
368 if (sample_type & PERF_SAMPLE_CGROUP)
369 data.array[n++] = key.cgroup_id;
371 size = n * sizeof(u64);
372 data.hdr.size = size;
375 if (perf_data_file__write(file, &data, size) < 0) {
376 pr_err("failed to write perf data, error: %m\n");
381 /* increase dummy timestamp to sort later samples */