Merge branch 'perf/urgent' into perf/core, to pick up fixes
[linux-2.6-microblaze.git] / tools / perf / util / bpf_counter.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_BPF_COUNTER_H
3 #define __PERF_BPF_COUNTER_H 1
4
5 #include <linux/list.h>
6 #include <sys/resource.h>
7 #include <bpf/bpf.h>
8 #include <bpf/btf.h>
9 #include <bpf/libbpf.h>
10
11 struct evsel;
12 struct target;
13 struct bpf_counter;
14
15 typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
16 typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
17                                            struct target *target);
18 typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
19                                                int cpu,
20                                                int fd);
21
22 struct bpf_counter_ops {
23         bpf_counter_evsel_target_op load;
24         bpf_counter_evsel_op enable;
25         bpf_counter_evsel_op disable;
26         bpf_counter_evsel_op read;
27         bpf_counter_evsel_op destroy;
28         bpf_counter_evsel_install_pe_op install_pe;
29 };
30
31 struct bpf_counter {
32         void *skel;
33         struct list_head list;
34 };
35
36 #ifdef HAVE_BPF_SKEL
37
38 int bpf_counter__load(struct evsel *evsel, struct target *target);
39 int bpf_counter__enable(struct evsel *evsel);
40 int bpf_counter__disable(struct evsel *evsel);
41 int bpf_counter__read(struct evsel *evsel);
42 void bpf_counter__destroy(struct evsel *evsel);
43 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
44
45 #else /* HAVE_BPF_SKEL */
46
47 #include <linux/err.h>
48
49 static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
50                                     struct target *target __maybe_unused)
51 {
52         return 0;
53 }
54
55 static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
56 {
57         return 0;
58 }
59
60 static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
61 {
62         return 0;
63 }
64
65 static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
66 {
67         return -EAGAIN;
68 }
69
70 static inline void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
71 {
72 }
73
74 static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
75                                           int cpu __maybe_unused,
76                                           int fd __maybe_unused)
77 {
78         return 0;
79 }
80
81 #endif /* HAVE_BPF_SKEL */
82
83 static inline void set_max_rlimit(void)
84 {
85         struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
86
87         setrlimit(RLIMIT_MEMLOCK, &rinf);
88 }
89
90 static inline __u32 bpf_link_get_id(int fd)
91 {
92         struct bpf_link_info link_info = { .id = 0, };
93         __u32 link_info_len = sizeof(link_info);
94
95         bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
96         return link_info.id;
97 }
98
99 static inline __u32 bpf_link_get_prog_id(int fd)
100 {
101         struct bpf_link_info link_info = { .id = 0, };
102         __u32 link_info_len = sizeof(link_info);
103
104         bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
105         return link_info.prog_id;
106 }
107
108 static inline __u32 bpf_map_get_id(int fd)
109 {
110         struct bpf_map_info map_info = { .id = 0, };
111         __u32 map_info_len = sizeof(map_info);
112
113         bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
114         return map_info.id;
115 }
116
117 /* trigger the leader program on a cpu */
118 static inline int bperf_trigger_reading(int prog_fd, int cpu)
119 {
120         DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
121                             .ctx_in = NULL,
122                             .ctx_size_in = 0,
123                             .flags = BPF_F_TEST_RUN_ON_CPU,
124                             .cpu = cpu,
125                             .retval = 0,
126                 );
127
128         return bpf_prog_test_run_opts(prog_fd, &opts);
129 }
130
131 #endif /* __PERF_BPF_COUNTER_H */