Merge tag 'gfs2-v5.13-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / tools / perf / util / bpf_counter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2019 Facebook */
4
5 #include <assert.h>
6 #include <limits.h>
7 #include <unistd.h>
8 #include <sys/file.h>
9 #include <sys/time.h>
10 #include <sys/resource.h>
11 #include <linux/err.h>
12 #include <linux/zalloc.h>
13 #include <bpf/bpf.h>
14 #include <bpf/btf.h>
15 #include <bpf/libbpf.h>
16 #include <api/fs/fs.h>
17 #include <perf/bpf_perf.h>
18
19 #include "bpf_counter.h"
20 #include "counts.h"
21 #include "debug.h"
22 #include "evsel.h"
23 #include "evlist.h"
24 #include "target.h"
25 #include "cpumap.h"
26 #include "thread_map.h"
27
28 #include "bpf_skel/bpf_prog_profiler.skel.h"
29 #include "bpf_skel/bperf_u.h"
30 #include "bpf_skel/bperf_leader.skel.h"
31 #include "bpf_skel/bperf_follower.skel.h"
32
33 #define ATTR_MAP_SIZE 16
34
35 static inline void *u64_to_ptr(__u64 ptr)
36 {
37         return (void *)(unsigned long)ptr;
38 }
39
40 static void set_max_rlimit(void)
41 {
42         struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
43
44         setrlimit(RLIMIT_MEMLOCK, &rinf);
45 }
46
47 static struct bpf_counter *bpf_counter_alloc(void)
48 {
49         struct bpf_counter *counter;
50
51         counter = zalloc(sizeof(*counter));
52         if (counter)
53                 INIT_LIST_HEAD(&counter->list);
54         return counter;
55 }
56
57 static int bpf_program_profiler__destroy(struct evsel *evsel)
58 {
59         struct bpf_counter *counter, *tmp;
60
61         list_for_each_entry_safe(counter, tmp,
62                                  &evsel->bpf_counter_list, list) {
63                 list_del_init(&counter->list);
64                 bpf_prog_profiler_bpf__destroy(counter->skel);
65                 free(counter);
66         }
67         assert(list_empty(&evsel->bpf_counter_list));
68
69         return 0;
70 }
71
72 static char *bpf_target_prog_name(int tgt_fd)
73 {
74         struct bpf_prog_info_linear *info_linear;
75         struct bpf_func_info *func_info;
76         const struct btf_type *t;
77         char *name = NULL;
78         struct btf *btf;
79
80         info_linear = bpf_program__get_prog_info_linear(
81                 tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
82         if (IS_ERR_OR_NULL(info_linear)) {
83                 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
84                 return NULL;
85         }
86
87         if (info_linear->info.btf_id == 0 ||
88             btf__get_from_id(info_linear->info.btf_id, &btf)) {
89                 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
90                 goto out;
91         }
92
93         func_info = u64_to_ptr(info_linear->info.func_info);
94         t = btf__type_by_id(btf, func_info[0].type_id);
95         if (!t) {
96                 pr_debug("btf %d doesn't have type %d\n",
97                          info_linear->info.btf_id, func_info[0].type_id);
98                 goto out;
99         }
100         name = strdup(btf__name_by_offset(btf, t->name_off));
101 out:
102         free(info_linear);
103         return name;
104 }
105
106 static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
107 {
108         struct bpf_prog_profiler_bpf *skel;
109         struct bpf_counter *counter;
110         struct bpf_program *prog;
111         char *prog_name;
112         int prog_fd;
113         int err;
114
115         prog_fd = bpf_prog_get_fd_by_id(prog_id);
116         if (prog_fd < 0) {
117                 pr_err("Failed to open fd for bpf prog %u\n", prog_id);
118                 return -1;
119         }
120         counter = bpf_counter_alloc();
121         if (!counter) {
122                 close(prog_fd);
123                 return -1;
124         }
125
126         skel = bpf_prog_profiler_bpf__open();
127         if (!skel) {
128                 pr_err("Failed to open bpf skeleton\n");
129                 goto err_out;
130         }
131
132         skel->rodata->num_cpu = evsel__nr_cpus(evsel);
133
134         bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
135         bpf_map__resize(skel->maps.fentry_readings, 1);
136         bpf_map__resize(skel->maps.accum_readings, 1);
137
138         prog_name = bpf_target_prog_name(prog_fd);
139         if (!prog_name) {
140                 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
141                 goto err_out;
142         }
143
144         bpf_object__for_each_program(prog, skel->obj) {
145                 err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
146                 if (err) {
147                         pr_err("bpf_program__set_attach_target failed.\n"
148                                "Does bpf prog %u have BTF?\n", prog_id);
149                         goto err_out;
150                 }
151         }
152         set_max_rlimit();
153         err = bpf_prog_profiler_bpf__load(skel);
154         if (err) {
155                 pr_err("bpf_prog_profiler_bpf__load failed\n");
156                 goto err_out;
157         }
158
159         assert(skel != NULL);
160         counter->skel = skel;
161         list_add(&counter->list, &evsel->bpf_counter_list);
162         close(prog_fd);
163         return 0;
164 err_out:
165         bpf_prog_profiler_bpf__destroy(skel);
166         free(counter);
167         close(prog_fd);
168         return -1;
169 }
170
171 static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
172 {
173         char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
174         u32 prog_id;
175         int ret;
176
177         bpf_str_ = bpf_str = strdup(target->bpf_str);
178         if (!bpf_str)
179                 return -1;
180
181         while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
182                 prog_id = strtoul(tok, &p, 10);
183                 if (prog_id == 0 || prog_id == UINT_MAX ||
184                     (*p != '\0' && *p != ',')) {
185                         pr_err("Failed to parse bpf prog ids %s\n",
186                                target->bpf_str);
187                         return -1;
188                 }
189
190                 ret = bpf_program_profiler_load_one(evsel, prog_id);
191                 if (ret) {
192                         bpf_program_profiler__destroy(evsel);
193                         free(bpf_str_);
194                         return -1;
195                 }
196                 bpf_str = NULL;
197         }
198         free(bpf_str_);
199         return 0;
200 }
201
202 static int bpf_program_profiler__enable(struct evsel *evsel)
203 {
204         struct bpf_counter *counter;
205         int ret;
206
207         list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
208                 assert(counter->skel != NULL);
209                 ret = bpf_prog_profiler_bpf__attach(counter->skel);
210                 if (ret) {
211                         bpf_program_profiler__destroy(evsel);
212                         return ret;
213                 }
214         }
215         return 0;
216 }
217
218 static int bpf_program_profiler__disable(struct evsel *evsel)
219 {
220         struct bpf_counter *counter;
221
222         list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
223                 assert(counter->skel != NULL);
224                 bpf_prog_profiler_bpf__detach(counter->skel);
225         }
226         return 0;
227 }
228
229 static int bpf_program_profiler__read(struct evsel *evsel)
230 {
231         // perf_cpu_map uses /sys/devices/system/cpu/online
232         int num_cpu = evsel__nr_cpus(evsel);
233         // BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
234         // Sometimes possible > online, like on a Ryzen 3900X that has 24
235         // threads but its possible showed 0-31 -acme
236         int num_cpu_bpf = libbpf_num_possible_cpus();
237         struct bpf_perf_event_value values[num_cpu_bpf];
238         struct bpf_counter *counter;
239         int reading_map_fd;
240         __u32 key = 0;
241         int err, cpu;
242
243         if (list_empty(&evsel->bpf_counter_list))
244                 return -EAGAIN;
245
246         for (cpu = 0; cpu < num_cpu; cpu++) {
247                 perf_counts(evsel->counts, cpu, 0)->val = 0;
248                 perf_counts(evsel->counts, cpu, 0)->ena = 0;
249                 perf_counts(evsel->counts, cpu, 0)->run = 0;
250         }
251         list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
252                 struct bpf_prog_profiler_bpf *skel = counter->skel;
253
254                 assert(skel != NULL);
255                 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
256
257                 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
258                 if (err) {
259                         pr_err("failed to read value\n");
260                         return err;
261                 }
262
263                 for (cpu = 0; cpu < num_cpu; cpu++) {
264                         perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
265                         perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
266                         perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
267                 }
268         }
269         return 0;
270 }
271
272 static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
273                                             int fd)
274 {
275         struct bpf_prog_profiler_bpf *skel;
276         struct bpf_counter *counter;
277         int ret;
278
279         list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
280                 skel = counter->skel;
281                 assert(skel != NULL);
282
283                 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
284                                           &cpu, &fd, BPF_ANY);
285                 if (ret)
286                         return ret;
287         }
288         return 0;
289 }
290
291 struct bpf_counter_ops bpf_program_profiler_ops = {
292         .load       = bpf_program_profiler__load,
293         .enable     = bpf_program_profiler__enable,
294         .disable    = bpf_program_profiler__disable,
295         .read       = bpf_program_profiler__read,
296         .destroy    = bpf_program_profiler__destroy,
297         .install_pe = bpf_program_profiler__install_pe,
298 };
299
300 static __u32 bpf_link_get_id(int fd)
301 {
302         struct bpf_link_info link_info = {0};
303         __u32 link_info_len = sizeof(link_info);
304
305         bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
306         return link_info.id;
307 }
308
309 static __u32 bpf_link_get_prog_id(int fd)
310 {
311         struct bpf_link_info link_info = {0};
312         __u32 link_info_len = sizeof(link_info);
313
314         bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
315         return link_info.prog_id;
316 }
317
318 static __u32 bpf_map_get_id(int fd)
319 {
320         struct bpf_map_info map_info = {0};
321         __u32 map_info_len = sizeof(map_info);
322
323         bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
324         return map_info.id;
325 }
326
327 static bool bperf_attr_map_compatible(int attr_map_fd)
328 {
329         struct bpf_map_info map_info = {0};
330         __u32 map_info_len = sizeof(map_info);
331         int err;
332
333         err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
334
335         if (err)
336                 return false;
337         return (map_info.key_size == sizeof(struct perf_event_attr)) &&
338                 (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
339 }
340
341 static int bperf_lock_attr_map(struct target *target)
342 {
343         char path[PATH_MAX];
344         int map_fd, err;
345
346         if (target->attr_map) {
347                 scnprintf(path, PATH_MAX, "%s", target->attr_map);
348         } else {
349                 scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
350                           BPF_PERF_DEFAULT_ATTR_MAP_PATH);
351         }
352
353         if (access(path, F_OK)) {
354                 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
355                                         sizeof(struct perf_event_attr),
356                                         sizeof(struct perf_event_attr_map_entry),
357                                         ATTR_MAP_SIZE, 0);
358                 if (map_fd < 0)
359                         return -1;
360
361                 err = bpf_obj_pin(map_fd, path);
362                 if (err) {
363                         /* someone pinned the map in parallel? */
364                         close(map_fd);
365                         map_fd = bpf_obj_get(path);
366                         if (map_fd < 0)
367                                 return -1;
368                 }
369         } else {
370                 map_fd = bpf_obj_get(path);
371                 if (map_fd < 0)
372                         return -1;
373         }
374
375         if (!bperf_attr_map_compatible(map_fd)) {
376                 close(map_fd);
377                 return -1;
378
379         }
380         err = flock(map_fd, LOCK_EX);
381         if (err) {
382                 close(map_fd);
383                 return -1;
384         }
385         return map_fd;
386 }
387
388 /* trigger the leader program on a cpu */
389 static int bperf_trigger_reading(int prog_fd, int cpu)
390 {
391         DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
392                             .ctx_in = NULL,
393                             .ctx_size_in = 0,
394                             .flags = BPF_F_TEST_RUN_ON_CPU,
395                             .cpu = cpu,
396                             .retval = 0,
397                 );
398
399         return bpf_prog_test_run_opts(prog_fd, &opts);
400 }
401
402 static int bperf_check_target(struct evsel *evsel,
403                               struct target *target,
404                               enum bperf_filter_type *filter_type,
405                               __u32 *filter_entry_cnt)
406 {
407         if (evsel->leader->core.nr_members > 1) {
408                 pr_err("bpf managed perf events do not yet support groups.\n");
409                 return -1;
410         }
411
412         /* determine filter type based on target */
413         if (target->system_wide) {
414                 *filter_type = BPERF_FILTER_GLOBAL;
415                 *filter_entry_cnt = 1;
416         } else if (target->cpu_list) {
417                 *filter_type = BPERF_FILTER_CPU;
418                 *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
419         } else if (target->tid) {
420                 *filter_type = BPERF_FILTER_PID;
421                 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
422         } else if (target->pid || evsel->evlist->workload.pid != -1) {
423                 *filter_type = BPERF_FILTER_TGID;
424                 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
425         } else {
426                 pr_err("bpf managed perf events do not yet support these targets.\n");
427                 return -1;
428         }
429
430         return 0;
431 }
432
433 static  struct perf_cpu_map *all_cpu_map;
434
435 static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
436                                        struct perf_event_attr_map_entry *entry)
437 {
438         struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
439         int link_fd, diff_map_fd, err;
440         struct bpf_link *link = NULL;
441
442         if (!skel) {
443                 pr_err("Failed to open leader skeleton\n");
444                 return -1;
445         }
446
447         bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
448         err = bperf_leader_bpf__load(skel);
449         if (err) {
450                 pr_err("Failed to load leader skeleton\n");
451                 goto out;
452         }
453
454         link = bpf_program__attach(skel->progs.on_switch);
455         if (IS_ERR(link)) {
456                 pr_err("Failed to attach leader program\n");
457                 err = PTR_ERR(link);
458                 goto out;
459         }
460
461         link_fd = bpf_link__fd(link);
462         diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
463         entry->link_id = bpf_link_get_id(link_fd);
464         entry->diff_map_id = bpf_map_get_id(diff_map_fd);
465         err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
466         assert(err == 0);
467
468         evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
469         assert(evsel->bperf_leader_link_fd >= 0);
470
471         /*
472          * save leader_skel for install_pe, which is called within
473          * following evsel__open_per_cpu call
474          */
475         evsel->leader_skel = skel;
476         evsel__open_per_cpu(evsel, all_cpu_map, -1);
477
478 out:
479         bperf_leader_bpf__destroy(skel);
480         bpf_link__destroy(link);
481         return err;
482 }
483
484 static int bperf__load(struct evsel *evsel, struct target *target)
485 {
486         struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
487         int attr_map_fd, diff_map_fd = -1, err;
488         enum bperf_filter_type filter_type;
489         __u32 filter_entry_cnt, i;
490
491         if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
492                 return -1;
493
494         if (!all_cpu_map) {
495                 all_cpu_map = perf_cpu_map__new(NULL);
496                 if (!all_cpu_map)
497                         return -1;
498         }
499
500         evsel->bperf_leader_prog_fd = -1;
501         evsel->bperf_leader_link_fd = -1;
502
503         /*
504          * Step 1: hold a fd on the leader program and the bpf_link, if
505          * the program is not already gone, reload the program.
506          * Use flock() to ensure exclusive access to the perf_event_attr
507          * map.
508          */
509         attr_map_fd = bperf_lock_attr_map(target);
510         if (attr_map_fd < 0) {
511                 pr_err("Failed to lock perf_event_attr map\n");
512                 return -1;
513         }
514
515         err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
516         if (err) {
517                 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
518                 if (err)
519                         goto out;
520         }
521
522         evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
523         if (evsel->bperf_leader_link_fd < 0 &&
524             bperf_reload_leader_program(evsel, attr_map_fd, &entry))
525                 goto out;
526
527         /*
528          * The bpf_link holds reference to the leader program, and the
529          * leader program holds reference to the maps. Therefore, if
530          * link_id is valid, diff_map_id should also be valid.
531          */
532         evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
533                 bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
534         assert(evsel->bperf_leader_prog_fd >= 0);
535
536         diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
537         assert(diff_map_fd >= 0);
538
539         /*
540          * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
541          * whether the kernel support it
542          */
543         err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
544         if (err) {
545                 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
546                        "Therefore, --use-bpf might show inaccurate readings\n");
547                 goto out;
548         }
549
550         /* Step 2: load the follower skeleton */
551         evsel->follower_skel = bperf_follower_bpf__open();
552         if (!evsel->follower_skel) {
553                 pr_err("Failed to open follower skeleton\n");
554                 goto out;
555         }
556
557         /* attach fexit program to the leader program */
558         bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
559                                        evsel->bperf_leader_prog_fd, "on_switch");
560
561         /* connect to leader diff_reading map */
562         bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
563
564         /* set up reading map */
565         bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
566                                  filter_entry_cnt);
567         /* set up follower filter based on target */
568         bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
569                                  filter_entry_cnt);
570         err = bperf_follower_bpf__load(evsel->follower_skel);
571         if (err) {
572                 pr_err("Failed to load follower skeleton\n");
573                 bperf_follower_bpf__destroy(evsel->follower_skel);
574                 evsel->follower_skel = NULL;
575                 goto out;
576         }
577
578         for (i = 0; i < filter_entry_cnt; i++) {
579                 int filter_map_fd;
580                 __u32 key;
581
582                 if (filter_type == BPERF_FILTER_PID ||
583                     filter_type == BPERF_FILTER_TGID)
584                         key = evsel->core.threads->map[i].pid;
585                 else if (filter_type == BPERF_FILTER_CPU)
586                         key = evsel->core.cpus->map[i];
587                 else
588                         break;
589
590                 filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
591                 bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
592         }
593
594         evsel->follower_skel->bss->type = filter_type;
595
596         err = bperf_follower_bpf__attach(evsel->follower_skel);
597
598 out:
599         if (err && evsel->bperf_leader_link_fd >= 0)
600                 close(evsel->bperf_leader_link_fd);
601         if (err && evsel->bperf_leader_prog_fd >= 0)
602                 close(evsel->bperf_leader_prog_fd);
603         if (diff_map_fd >= 0)
604                 close(diff_map_fd);
605
606         flock(attr_map_fd, LOCK_UN);
607         close(attr_map_fd);
608
609         return err;
610 }
611
612 static int bperf__install_pe(struct evsel *evsel, int cpu, int fd)
613 {
614         struct bperf_leader_bpf *skel = evsel->leader_skel;
615
616         return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
617                                    &cpu, &fd, BPF_ANY);
618 }
619
620 /*
621  * trigger the leader prog on each cpu, so the accum_reading map could get
622  * the latest readings.
623  */
624 static int bperf_sync_counters(struct evsel *evsel)
625 {
626         int num_cpu, i, cpu;
627
628         num_cpu = all_cpu_map->nr;
629         for (i = 0; i < num_cpu; i++) {
630                 cpu = all_cpu_map->map[i];
631                 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
632         }
633         return 0;
634 }
635
636 static int bperf__enable(struct evsel *evsel)
637 {
638         evsel->follower_skel->bss->enabled = 1;
639         return 0;
640 }
641
642 static int bperf__disable(struct evsel *evsel)
643 {
644         evsel->follower_skel->bss->enabled = 0;
645         return 0;
646 }
647
648 static int bperf__read(struct evsel *evsel)
649 {
650         struct bperf_follower_bpf *skel = evsel->follower_skel;
651         __u32 num_cpu_bpf = cpu__max_cpu();
652         struct bpf_perf_event_value values[num_cpu_bpf];
653         int reading_map_fd, err = 0;
654         __u32 i, j, num_cpu;
655
656         bperf_sync_counters(evsel);
657         reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
658
659         for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
660                 __u32 cpu;
661
662                 err = bpf_map_lookup_elem(reading_map_fd, &i, values);
663                 if (err)
664                         goto out;
665                 switch (evsel->follower_skel->bss->type) {
666                 case BPERF_FILTER_GLOBAL:
667                         assert(i == 0);
668
669                         num_cpu = all_cpu_map->nr;
670                         for (j = 0; j < num_cpu; j++) {
671                                 cpu = all_cpu_map->map[j];
672                                 perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
673                                 perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
674                                 perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
675                         }
676                         break;
677                 case BPERF_FILTER_CPU:
678                         cpu = evsel->core.cpus->map[i];
679                         perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
680                         perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
681                         perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
682                         break;
683                 case BPERF_FILTER_PID:
684                 case BPERF_FILTER_TGID:
685                         perf_counts(evsel->counts, 0, i)->val = 0;
686                         perf_counts(evsel->counts, 0, i)->ena = 0;
687                         perf_counts(evsel->counts, 0, i)->run = 0;
688
689                         for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
690                                 perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
691                                 perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
692                                 perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
693                         }
694                         break;
695                 default:
696                         break;
697                 }
698         }
699 out:
700         return err;
701 }
702
703 static int bperf__destroy(struct evsel *evsel)
704 {
705         bperf_follower_bpf__destroy(evsel->follower_skel);
706         close(evsel->bperf_leader_prog_fd);
707         close(evsel->bperf_leader_link_fd);
708         return 0;
709 }
710
711 /*
712  * bperf: share hardware PMCs with BPF
713  *
714  * perf uses performance monitoring counters (PMC) to monitor system
715  * performance. The PMCs are limited hardware resources. For example,
716  * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
717  *
718  * Modern data center systems use these PMCs in many different ways:
719  * system level monitoring, (maybe nested) container level monitoring, per
720  * process monitoring, profiling (in sample mode), etc. In some cases,
721  * there are more active perf_events than available hardware PMCs. To allow
722  * all perf_events to have a chance to run, it is necessary to do expensive
723  * time multiplexing of events.
724  *
725  * On the other hand, many monitoring tools count the common metrics
726  * (cycles, instructions). It is a waste to have multiple tools create
727  * multiple perf_events of "cycles" and occupy multiple PMCs.
728  *
729  * bperf tries to reduce such wastes by allowing multiple perf_events of
730  * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
731  * of having each perf-stat session to read its own perf_events, bperf uses
732  * BPF programs to read the perf_events and aggregate readings to BPF maps.
733  * Then, the perf-stat session(s) reads the values from these BPF maps.
734  *
735  *                                ||
736  *       shared progs and maps <- || -> per session progs and maps
737  *                                ||
738  *   ---------------              ||
739  *   | perf_events |              ||
740  *   ---------------       fexit  ||      -----------------
741  *          |             --------||----> | follower prog |
742  *       --------------- /        || ---  -----------------
743  * cs -> | leader prog |/         ||/        |         |
744  *   --> ---------------         /||  --------------  ------------------
745  *  /       |         |         / ||  | filter map |  | accum_readings |
746  * /  ------------  ------------  ||  --------------  ------------------
747  * |  | prev map |  | diff map |  ||                        |
748  * |  ------------  ------------  ||                        |
749  *  \                             ||                        |
750  * = \ ==================================================== | ============
751  *    \                                                    /   user space
752  *     \                                                  /
753  *      \                                                /
754  *    BPF_PROG_TEST_RUN                    BPF_MAP_LOOKUP_ELEM
755  *        \                                            /
756  *         \                                          /
757  *          \------  perf-stat ----------------------/
758  *
759  * The figure above shows the architecture of bperf. Note that the figure
760  * is divided into 3 regions: shared progs and maps (top left), per session
761  * progs and maps (top right), and user space (bottom).
762  *
763  * The leader prog is triggered on each context switch (cs). The leader
764  * prog reads perf_events and stores the difference (current_reading -
765  * previous_reading) to the diff map. For the same metric, e.g. "cycles",
766  * multiple perf-stat sessions share the same leader prog.
767  *
768  * Each perf-stat session creates a follower prog as fexit program to the
769  * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
770  * follower progs to the same leader prog. The follower prog checks current
771  * task and processor ID to decide whether to add the value from the diff
772  * map to its accumulated reading map (accum_readings).
773  *
774  * Finally, perf-stat user space reads the value from accum_reading map.
775  *
776  * Besides context switch, it is also necessary to trigger the leader prog
777  * before perf-stat reads the value. Otherwise, the accum_reading map may
778  * not have the latest reading from the perf_events. This is achieved by
779  * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
780  *
781  * Comment before the definition of struct perf_event_attr_map_entry
782  * describes how different sessions of perf-stat share information about
783  * the leader prog.
784  */
785
786 struct bpf_counter_ops bperf_ops = {
787         .load       = bperf__load,
788         .enable     = bperf__enable,
789         .disable    = bperf__disable,
790         .read       = bperf__read,
791         .install_pe = bperf__install_pe,
792         .destroy    = bperf__destroy,
793 };
794
795 static inline bool bpf_counter_skip(struct evsel *evsel)
796 {
797         return list_empty(&evsel->bpf_counter_list) &&
798                 evsel->follower_skel == NULL;
799 }
800
801 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
802 {
803         if (bpf_counter_skip(evsel))
804                 return 0;
805         return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
806 }
807
808 int bpf_counter__load(struct evsel *evsel, struct target *target)
809 {
810         if (target->bpf_str)
811                 evsel->bpf_counter_ops = &bpf_program_profiler_ops;
812         else if (target->use_bpf || evsel->bpf_counter ||
813                  evsel__match_bpf_counter_events(evsel->name))
814                 evsel->bpf_counter_ops = &bperf_ops;
815
816         if (evsel->bpf_counter_ops)
817                 return evsel->bpf_counter_ops->load(evsel, target);
818         return 0;
819 }
820
821 int bpf_counter__enable(struct evsel *evsel)
822 {
823         if (bpf_counter_skip(evsel))
824                 return 0;
825         return evsel->bpf_counter_ops->enable(evsel);
826 }
827
828 int bpf_counter__disable(struct evsel *evsel)
829 {
830         if (bpf_counter_skip(evsel))
831                 return 0;
832         return evsel->bpf_counter_ops->disable(evsel);
833 }
834
835 int bpf_counter__read(struct evsel *evsel)
836 {
837         if (bpf_counter_skip(evsel))
838                 return -EAGAIN;
839         return evsel->bpf_counter_ops->read(evsel);
840 }
841
842 void bpf_counter__destroy(struct evsel *evsel)
843 {
844         if (bpf_counter_skip(evsel))
845                 return;
846         evsel->bpf_counter_ops->destroy(evsel);
847         evsel->bpf_counter_ops = NULL;
848 }