1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2019 Facebook */
10 #include <sys/resource.h>
11 #include <linux/err.h>
12 #include <linux/zalloc.h>
15 #include <bpf/libbpf.h>
16 #include <api/fs/fs.h>
17 #include <perf/bpf_perf.h>
19 #include "bpf_counter.h"
26 #include "thread_map.h"
28 #include "bpf_skel/bpf_prog_profiler.skel.h"
29 #include "bpf_skel/bperf_u.h"
30 #include "bpf_skel/bperf_leader.skel.h"
31 #include "bpf_skel/bperf_follower.skel.h"
33 #define ATTR_MAP_SIZE 16
35 static inline void *u64_to_ptr(__u64 ptr)
37 return (void *)(unsigned long)ptr;
40 static void set_max_rlimit(void)
42 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
44 setrlimit(RLIMIT_MEMLOCK, &rinf);
47 static struct bpf_counter *bpf_counter_alloc(void)
49 struct bpf_counter *counter;
51 counter = zalloc(sizeof(*counter));
53 INIT_LIST_HEAD(&counter->list);
57 static int bpf_program_profiler__destroy(struct evsel *evsel)
59 struct bpf_counter *counter, *tmp;
61 list_for_each_entry_safe(counter, tmp,
62 &evsel->bpf_counter_list, list) {
63 list_del_init(&counter->list);
64 bpf_prog_profiler_bpf__destroy(counter->skel);
67 assert(list_empty(&evsel->bpf_counter_list));
72 static char *bpf_target_prog_name(int tgt_fd)
74 struct bpf_prog_info_linear *info_linear;
75 struct bpf_func_info *func_info;
76 const struct btf_type *t;
80 info_linear = bpf_program__get_prog_info_linear(
81 tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
82 if (IS_ERR_OR_NULL(info_linear)) {
83 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
87 if (info_linear->info.btf_id == 0 ||
88 btf__get_from_id(info_linear->info.btf_id, &btf)) {
89 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
93 func_info = u64_to_ptr(info_linear->info.func_info);
94 t = btf__type_by_id(btf, func_info[0].type_id);
96 pr_debug("btf %d doesn't have type %d\n",
97 info_linear->info.btf_id, func_info[0].type_id);
100 name = strdup(btf__name_by_offset(btf, t->name_off));
106 static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
108 struct bpf_prog_profiler_bpf *skel;
109 struct bpf_counter *counter;
110 struct bpf_program *prog;
115 prog_fd = bpf_prog_get_fd_by_id(prog_id);
117 pr_err("Failed to open fd for bpf prog %u\n", prog_id);
120 counter = bpf_counter_alloc();
126 skel = bpf_prog_profiler_bpf__open();
128 pr_err("Failed to open bpf skeleton\n");
132 skel->rodata->num_cpu = evsel__nr_cpus(evsel);
134 bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
135 bpf_map__resize(skel->maps.fentry_readings, 1);
136 bpf_map__resize(skel->maps.accum_readings, 1);
138 prog_name = bpf_target_prog_name(prog_fd);
140 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
144 bpf_object__for_each_program(prog, skel->obj) {
145 err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
147 pr_err("bpf_program__set_attach_target failed.\n"
148 "Does bpf prog %u have BTF?\n", prog_id);
153 err = bpf_prog_profiler_bpf__load(skel);
155 pr_err("bpf_prog_profiler_bpf__load failed\n");
159 assert(skel != NULL);
160 counter->skel = skel;
161 list_add(&counter->list, &evsel->bpf_counter_list);
165 bpf_prog_profiler_bpf__destroy(skel);
171 static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
173 char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
177 bpf_str_ = bpf_str = strdup(target->bpf_str);
181 while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
182 prog_id = strtoul(tok, &p, 10);
183 if (prog_id == 0 || prog_id == UINT_MAX ||
184 (*p != '\0' && *p != ',')) {
185 pr_err("Failed to parse bpf prog ids %s\n",
190 ret = bpf_program_profiler_load_one(evsel, prog_id);
192 bpf_program_profiler__destroy(evsel);
202 static int bpf_program_profiler__enable(struct evsel *evsel)
204 struct bpf_counter *counter;
207 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
208 assert(counter->skel != NULL);
209 ret = bpf_prog_profiler_bpf__attach(counter->skel);
211 bpf_program_profiler__destroy(evsel);
218 static int bpf_program_profiler__disable(struct evsel *evsel)
220 struct bpf_counter *counter;
222 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
223 assert(counter->skel != NULL);
224 bpf_prog_profiler_bpf__detach(counter->skel);
229 static int bpf_program_profiler__read(struct evsel *evsel)
231 // perf_cpu_map uses /sys/devices/system/cpu/online
232 int num_cpu = evsel__nr_cpus(evsel);
233 // BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
234 // Sometimes possible > online, like on a Ryzen 3900X that has 24
235 // threads but its possible showed 0-31 -acme
236 int num_cpu_bpf = libbpf_num_possible_cpus();
237 struct bpf_perf_event_value values[num_cpu_bpf];
238 struct bpf_counter *counter;
243 if (list_empty(&evsel->bpf_counter_list))
246 for (cpu = 0; cpu < num_cpu; cpu++) {
247 perf_counts(evsel->counts, cpu, 0)->val = 0;
248 perf_counts(evsel->counts, cpu, 0)->ena = 0;
249 perf_counts(evsel->counts, cpu, 0)->run = 0;
251 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
252 struct bpf_prog_profiler_bpf *skel = counter->skel;
254 assert(skel != NULL);
255 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
257 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
259 pr_err("failed to read value\n");
263 for (cpu = 0; cpu < num_cpu; cpu++) {
264 perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
265 perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
266 perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
272 static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
275 struct bpf_prog_profiler_bpf *skel;
276 struct bpf_counter *counter;
279 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
280 skel = counter->skel;
281 assert(skel != NULL);
283 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
291 struct bpf_counter_ops bpf_program_profiler_ops = {
292 .load = bpf_program_profiler__load,
293 .enable = bpf_program_profiler__enable,
294 .disable = bpf_program_profiler__disable,
295 .read = bpf_program_profiler__read,
296 .destroy = bpf_program_profiler__destroy,
297 .install_pe = bpf_program_profiler__install_pe,
300 static __u32 bpf_link_get_id(int fd)
302 struct bpf_link_info link_info = {0};
303 __u32 link_info_len = sizeof(link_info);
305 bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
309 static __u32 bpf_link_get_prog_id(int fd)
311 struct bpf_link_info link_info = {0};
312 __u32 link_info_len = sizeof(link_info);
314 bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
315 return link_info.prog_id;
318 static __u32 bpf_map_get_id(int fd)
320 struct bpf_map_info map_info = {0};
321 __u32 map_info_len = sizeof(map_info);
323 bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
327 static bool bperf_attr_map_compatible(int attr_map_fd)
329 struct bpf_map_info map_info = {0};
330 __u32 map_info_len = sizeof(map_info);
333 err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
337 return (map_info.key_size == sizeof(struct perf_event_attr)) &&
338 (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
341 static int bperf_lock_attr_map(struct target *target)
346 if (target->attr_map) {
347 scnprintf(path, PATH_MAX, "%s", target->attr_map);
349 scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
350 BPF_PERF_DEFAULT_ATTR_MAP_PATH);
353 if (access(path, F_OK)) {
354 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
355 sizeof(struct perf_event_attr),
356 sizeof(struct perf_event_attr_map_entry),
361 err = bpf_obj_pin(map_fd, path);
363 /* someone pinned the map in parallel? */
365 map_fd = bpf_obj_get(path);
370 map_fd = bpf_obj_get(path);
375 if (!bperf_attr_map_compatible(map_fd)) {
380 err = flock(map_fd, LOCK_EX);
388 /* trigger the leader program on a cpu */
389 static int bperf_trigger_reading(int prog_fd, int cpu)
391 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
394 .flags = BPF_F_TEST_RUN_ON_CPU,
399 return bpf_prog_test_run_opts(prog_fd, &opts);
402 static int bperf_check_target(struct evsel *evsel,
403 struct target *target,
404 enum bperf_filter_type *filter_type,
405 __u32 *filter_entry_cnt)
407 if (evsel->leader->core.nr_members > 1) {
408 pr_err("bpf managed perf events do not yet support groups.\n");
412 /* determine filter type based on target */
413 if (target->system_wide) {
414 *filter_type = BPERF_FILTER_GLOBAL;
415 *filter_entry_cnt = 1;
416 } else if (target->cpu_list) {
417 *filter_type = BPERF_FILTER_CPU;
418 *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
419 } else if (target->tid) {
420 *filter_type = BPERF_FILTER_PID;
421 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
422 } else if (target->pid || evsel->evlist->workload.pid != -1) {
423 *filter_type = BPERF_FILTER_TGID;
424 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
426 pr_err("bpf managed perf events do not yet support these targets.\n");
433 static struct perf_cpu_map *all_cpu_map;
435 static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
436 struct perf_event_attr_map_entry *entry)
438 struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
439 int link_fd, diff_map_fd, err;
440 struct bpf_link *link = NULL;
443 pr_err("Failed to open leader skeleton\n");
447 bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
448 err = bperf_leader_bpf__load(skel);
450 pr_err("Failed to load leader skeleton\n");
455 link = bpf_program__attach(skel->progs.on_switch);
457 pr_err("Failed to attach leader program\n");
461 link_fd = bpf_link__fd(link);
462 diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
463 entry->link_id = bpf_link_get_id(link_fd);
464 entry->diff_map_id = bpf_map_get_id(diff_map_fd);
465 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
468 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
469 assert(evsel->bperf_leader_link_fd >= 0);
472 * save leader_skel for install_pe, which is called within
473 * following evsel__open_per_cpu call
475 evsel->leader_skel = skel;
476 evsel__open_per_cpu(evsel, all_cpu_map, -1);
479 bperf_leader_bpf__destroy(skel);
480 bpf_link__destroy(link);
484 static int bperf__load(struct evsel *evsel, struct target *target)
486 struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
487 int attr_map_fd, diff_map_fd = -1, err;
488 enum bperf_filter_type filter_type;
489 __u32 filter_entry_cnt, i;
491 if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
495 all_cpu_map = perf_cpu_map__new(NULL);
500 evsel->bperf_leader_prog_fd = -1;
501 evsel->bperf_leader_link_fd = -1;
504 * Step 1: hold a fd on the leader program and the bpf_link, if
505 * the program is not already gone, reload the program.
506 * Use flock() to ensure exclusive access to the perf_event_attr
509 attr_map_fd = bperf_lock_attr_map(target);
510 if (attr_map_fd < 0) {
511 pr_err("Failed to lock perf_event_attr map\n");
515 err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
517 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
522 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
523 if (evsel->bperf_leader_link_fd < 0 &&
524 bperf_reload_leader_program(evsel, attr_map_fd, &entry))
528 * The bpf_link holds reference to the leader program, and the
529 * leader program holds reference to the maps. Therefore, if
530 * link_id is valid, diff_map_id should also be valid.
532 evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
533 bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
534 assert(evsel->bperf_leader_prog_fd >= 0);
536 diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
537 assert(diff_map_fd >= 0);
540 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
541 * whether the kernel support it
543 err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
545 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
546 "Therefore, --use-bpf might show inaccurate readings\n");
550 /* Step 2: load the follower skeleton */
551 evsel->follower_skel = bperf_follower_bpf__open();
552 if (!evsel->follower_skel) {
553 pr_err("Failed to open follower skeleton\n");
557 /* attach fexit program to the leader program */
558 bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
559 evsel->bperf_leader_prog_fd, "on_switch");
561 /* connect to leader diff_reading map */
562 bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
564 /* set up reading map */
565 bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
567 /* set up follower filter based on target */
568 bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
570 err = bperf_follower_bpf__load(evsel->follower_skel);
572 pr_err("Failed to load follower skeleton\n");
573 bperf_follower_bpf__destroy(evsel->follower_skel);
574 evsel->follower_skel = NULL;
578 for (i = 0; i < filter_entry_cnt; i++) {
582 if (filter_type == BPERF_FILTER_PID ||
583 filter_type == BPERF_FILTER_TGID)
584 key = evsel->core.threads->map[i].pid;
585 else if (filter_type == BPERF_FILTER_CPU)
586 key = evsel->core.cpus->map[i];
590 filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
591 bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
594 evsel->follower_skel->bss->type = filter_type;
596 err = bperf_follower_bpf__attach(evsel->follower_skel);
599 if (err && evsel->bperf_leader_link_fd >= 0)
600 close(evsel->bperf_leader_link_fd);
601 if (err && evsel->bperf_leader_prog_fd >= 0)
602 close(evsel->bperf_leader_prog_fd);
603 if (diff_map_fd >= 0)
606 flock(attr_map_fd, LOCK_UN);
612 static int bperf__install_pe(struct evsel *evsel, int cpu, int fd)
614 struct bperf_leader_bpf *skel = evsel->leader_skel;
616 return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
621 * trigger the leader prog on each cpu, so the accum_reading map could get
622 * the latest readings.
624 static int bperf_sync_counters(struct evsel *evsel)
628 num_cpu = all_cpu_map->nr;
629 for (i = 0; i < num_cpu; i++) {
630 cpu = all_cpu_map->map[i];
631 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
636 static int bperf__enable(struct evsel *evsel)
638 evsel->follower_skel->bss->enabled = 1;
642 static int bperf__disable(struct evsel *evsel)
644 evsel->follower_skel->bss->enabled = 0;
648 static int bperf__read(struct evsel *evsel)
650 struct bperf_follower_bpf *skel = evsel->follower_skel;
651 __u32 num_cpu_bpf = cpu__max_cpu();
652 struct bpf_perf_event_value values[num_cpu_bpf];
653 int reading_map_fd, err = 0;
656 bperf_sync_counters(evsel);
657 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
659 for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
662 err = bpf_map_lookup_elem(reading_map_fd, &i, values);
665 switch (evsel->follower_skel->bss->type) {
666 case BPERF_FILTER_GLOBAL:
669 num_cpu = all_cpu_map->nr;
670 for (j = 0; j < num_cpu; j++) {
671 cpu = all_cpu_map->map[j];
672 perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
673 perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
674 perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
677 case BPERF_FILTER_CPU:
678 cpu = evsel->core.cpus->map[i];
679 perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
680 perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
681 perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
683 case BPERF_FILTER_PID:
684 case BPERF_FILTER_TGID:
685 perf_counts(evsel->counts, 0, i)->val = 0;
686 perf_counts(evsel->counts, 0, i)->ena = 0;
687 perf_counts(evsel->counts, 0, i)->run = 0;
689 for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
690 perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
691 perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
692 perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
703 static int bperf__destroy(struct evsel *evsel)
705 bperf_follower_bpf__destroy(evsel->follower_skel);
706 close(evsel->bperf_leader_prog_fd);
707 close(evsel->bperf_leader_link_fd);
712 * bperf: share hardware PMCs with BPF
714 * perf uses performance monitoring counters (PMC) to monitor system
715 * performance. The PMCs are limited hardware resources. For example,
716 * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
718 * Modern data center systems use these PMCs in many different ways:
719 * system level monitoring, (maybe nested) container level monitoring, per
720 * process monitoring, profiling (in sample mode), etc. In some cases,
721 * there are more active perf_events than available hardware PMCs. To allow
722 * all perf_events to have a chance to run, it is necessary to do expensive
723 * time multiplexing of events.
725 * On the other hand, many monitoring tools count the common metrics
726 * (cycles, instructions). It is a waste to have multiple tools create
727 * multiple perf_events of "cycles" and occupy multiple PMCs.
729 * bperf tries to reduce such wastes by allowing multiple perf_events of
730 * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
731 * of having each perf-stat session to read its own perf_events, bperf uses
732 * BPF programs to read the perf_events and aggregate readings to BPF maps.
733 * Then, the perf-stat session(s) reads the values from these BPF maps.
736 * shared progs and maps <- || -> per session progs and maps
740 * --------------- fexit || -----------------
741 * | --------||----> | follower prog |
742 * --------------- / || --- -----------------
743 * cs -> | leader prog |/ ||/ | |
744 * --> --------------- /|| -------------- ------------------
745 * / | | / || | filter map | | accum_readings |
746 * / ------------ ------------ || -------------- ------------------
747 * | | prev map | | diff map | || |
748 * | ------------ ------------ || |
750 * = \ ==================================================== | ============
754 * BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM
757 * \------ perf-stat ----------------------/
759 * The figure above shows the architecture of bperf. Note that the figure
760 * is divided into 3 regions: shared progs and maps (top left), per session
761 * progs and maps (top right), and user space (bottom).
763 * The leader prog is triggered on each context switch (cs). The leader
764 * prog reads perf_events and stores the difference (current_reading -
765 * previous_reading) to the diff map. For the same metric, e.g. "cycles",
766 * multiple perf-stat sessions share the same leader prog.
768 * Each perf-stat session creates a follower prog as fexit program to the
769 * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
770 * follower progs to the same leader prog. The follower prog checks current
771 * task and processor ID to decide whether to add the value from the diff
772 * map to its accumulated reading map (accum_readings).
774 * Finally, perf-stat user space reads the value from accum_reading map.
776 * Besides context switch, it is also necessary to trigger the leader prog
777 * before perf-stat reads the value. Otherwise, the accum_reading map may
778 * not have the latest reading from the perf_events. This is achieved by
779 * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
781 * Comment before the definition of struct perf_event_attr_map_entry
782 * describes how different sessions of perf-stat share information about
786 struct bpf_counter_ops bperf_ops = {
788 .enable = bperf__enable,
789 .disable = bperf__disable,
791 .install_pe = bperf__install_pe,
792 .destroy = bperf__destroy,
795 static inline bool bpf_counter_skip(struct evsel *evsel)
797 return list_empty(&evsel->bpf_counter_list) &&
798 evsel->follower_skel == NULL;
801 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
803 if (bpf_counter_skip(evsel))
805 return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
808 int bpf_counter__load(struct evsel *evsel, struct target *target)
811 evsel->bpf_counter_ops = &bpf_program_profiler_ops;
812 else if (target->use_bpf || evsel->bpf_counter ||
813 evsel__match_bpf_counter_events(evsel->name))
814 evsel->bpf_counter_ops = &bperf_ops;
816 if (evsel->bpf_counter_ops)
817 return evsel->bpf_counter_ops->load(evsel, target);
821 int bpf_counter__enable(struct evsel *evsel)
823 if (bpf_counter_skip(evsel))
825 return evsel->bpf_counter_ops->enable(evsel);
828 int bpf_counter__disable(struct evsel *evsel)
830 if (bpf_counter_skip(evsel))
832 return evsel->bpf_counter_ops->disable(evsel);
835 int bpf_counter__read(struct evsel *evsel)
837 if (bpf_counter_skip(evsel))
839 return evsel->bpf_counter_ops->read(evsel);
842 void bpf_counter__destroy(struct evsel *evsel)
844 if (bpf_counter_skip(evsel))
846 evsel->bpf_counter_ops->destroy(evsel);
847 evsel->bpf_counter_ops = NULL;