1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/bpf.h>
14 #include <linux/filter.h>
15 #include <linux/perf_event.h>
16 #include <linux/netlink.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/types.h>
19 #include <sys/types.h>
20 #include <sys/socket.h>
21 #include <sys/syscall.h>
22 #include <sys/ioctl.h>
31 #define DEBUGFS "/sys/kernel/debug/tracing/"
33 static char license[128];
34 static int kern_version;
35 static bool processed_sec[128];
36 char bpf_log_buf[BPF_LOG_BUF_SIZE];
38 int prog_fd[MAX_PROGS];
39 int event_fd[MAX_PROGS];
41 int prog_array_fd = -1;
43 struct bpf_map_data map_data[MAX_MAPS];
44 int map_data_count = 0;
46 static int populate_prog_array(const char *event, int prog_fd)
48 int ind = atoi(event), err;
50 err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
52 printf("failed to store prog_fd in prog_array\n");
58 static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
60 bool is_socket = strncmp(event, "socket", 6) == 0;
61 bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
62 bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
63 bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
64 bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0;
65 bool is_xdp = strncmp(event, "xdp", 3) == 0;
66 bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
67 bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0;
68 bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0;
69 bool is_sockops = strncmp(event, "sockops", 7) == 0;
70 bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0;
71 bool is_sk_msg = strncmp(event, "sk_msg", 6) == 0;
72 size_t insns_cnt = size / sizeof(struct bpf_insn);
73 enum bpf_prog_type prog_type;
76 struct perf_event_attr attr = {};
78 attr.type = PERF_TYPE_TRACEPOINT;
79 attr.sample_type = PERF_SAMPLE_RAW;
80 attr.sample_period = 1;
81 attr.wakeup_events = 1;
84 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
85 } else if (is_kprobe || is_kretprobe) {
86 prog_type = BPF_PROG_TYPE_KPROBE;
87 } else if (is_tracepoint) {
88 prog_type = BPF_PROG_TYPE_TRACEPOINT;
89 } else if (is_raw_tracepoint) {
90 prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT;
92 prog_type = BPF_PROG_TYPE_XDP;
93 } else if (is_perf_event) {
94 prog_type = BPF_PROG_TYPE_PERF_EVENT;
95 } else if (is_cgroup_skb) {
96 prog_type = BPF_PROG_TYPE_CGROUP_SKB;
97 } else if (is_cgroup_sk) {
98 prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
99 } else if (is_sockops) {
100 prog_type = BPF_PROG_TYPE_SOCK_OPS;
101 } else if (is_sk_skb) {
102 prog_type = BPF_PROG_TYPE_SK_SKB;
103 } else if (is_sk_msg) {
104 prog_type = BPF_PROG_TYPE_SK_MSG;
106 printf("Unknown event '%s'\n", event);
110 fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version,
111 bpf_log_buf, BPF_LOG_BUF_SIZE);
113 printf("bpf_load_program() err=%d\n%s", errno, bpf_log_buf);
117 prog_fd[prog_cnt++] = fd;
119 if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk)
122 if (is_socket || is_sockops || is_sk_skb || is_sk_msg) {
130 if (!isdigit(*event)) {
131 printf("invalid prog number\n");
134 return populate_prog_array(event, fd);
137 if (is_raw_tracepoint) {
138 efd = bpf_raw_tracepoint_open(event + 15, fd);
140 printf("tracepoint %s %s\n", event + 15, strerror(errno));
143 event_fd[prog_cnt - 1] = efd;
147 if (is_kprobe || is_kretprobe) {
154 printf("event name cannot be empty\n");
159 return populate_prog_array(event, fd);
161 snprintf(buf, sizeof(buf),
162 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
163 is_kprobe ? 'p' : 'r', event, event);
166 printf("failed to create kprobe '%s' error '%s'\n",
167 event, strerror(errno));
171 strcpy(buf, DEBUGFS);
172 strcat(buf, "events/kprobes/");
175 } else if (is_tracepoint) {
179 printf("event name cannot be empty\n");
182 strcpy(buf, DEBUGFS);
183 strcat(buf, "events/");
188 efd = open(buf, O_RDONLY, 0);
190 printf("failed to open event %s\n", event);
194 err = read(efd, buf, sizeof(buf));
195 if (err < 0 || err >= sizeof(buf)) {
196 printf("read from '%s' failed '%s'\n", event, strerror(errno));
206 efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
208 printf("event %d fd %d err %s\n", id, efd, strerror(errno));
211 event_fd[prog_cnt - 1] = efd;
212 err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
214 printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
218 err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
220 printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
228 static int load_maps(struct bpf_map_data *maps, int nr_maps,
229 fixup_map_cb fixup_map)
233 for (i = 0; i < nr_maps; i++) {
235 fixup_map(&maps[i], i);
236 /* Allow userspace to assign map FD prior to creation */
237 if (maps[i].fd != -1) {
238 map_fd[i] = maps[i].fd;
243 numa_node = maps[i].def.map_flags & BPF_F_NUMA_NODE ?
244 maps[i].def.numa_node : -1;
246 if (maps[i].def.type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
247 maps[i].def.type == BPF_MAP_TYPE_HASH_OF_MAPS) {
248 int inner_map_fd = map_fd[maps[i].def.inner_map_idx];
250 map_fd[i] = bpf_create_map_in_map_node(maps[i].def.type,
252 maps[i].def.key_size,
254 maps[i].def.max_entries,
255 maps[i].def.map_flags,
258 map_fd[i] = bpf_create_map_node(maps[i].def.type,
260 maps[i].def.key_size,
261 maps[i].def.value_size,
262 maps[i].def.max_entries,
263 maps[i].def.map_flags,
267 printf("failed to create a map: %d %s\n",
268 errno, strerror(errno));
271 maps[i].fd = map_fd[i];
273 if (maps[i].def.type == BPF_MAP_TYPE_PROG_ARRAY)
274 prog_array_fd = map_fd[i];
279 static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname,
280 GElf_Shdr *shdr, Elf_Data **data)
284 scn = elf_getscn(elf, i);
288 if (gelf_getshdr(scn, shdr) != shdr)
291 *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name);
292 if (!*shname || !shdr->sh_size)
295 *data = elf_getdata(scn, 0);
296 if (!*data || elf_getdata(scn, *data) != NULL)
302 static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
303 GElf_Shdr *shdr, struct bpf_insn *insn,
304 struct bpf_map_data *maps, int nr_maps)
308 nrels = shdr->sh_size / shdr->sh_entsize;
310 for (i = 0; i < nrels; i++) {
313 unsigned int insn_idx;
317 gelf_getrel(data, i, &rel);
319 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
321 gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym);
323 if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
324 printf("invalid relo for insn[%d].code 0x%x\n",
325 insn_idx, insn[insn_idx].code);
328 insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
330 /* Match FD relocation against recorded map_data[] offset */
331 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
332 if (maps[map_idx].elf_offset == sym.st_value) {
338 insn[insn_idx].imm = maps[map_idx].fd;
340 printf("invalid relo for insn[%d] no map_data match\n",
349 static int cmp_symbols(const void *l, const void *r)
351 const GElf_Sym *lsym = (const GElf_Sym *)l;
352 const GElf_Sym *rsym = (const GElf_Sym *)r;
354 if (lsym->st_value < rsym->st_value)
356 else if (lsym->st_value > rsym->st_value)
362 static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx,
363 Elf *elf, Elf_Data *symbols, int strtabidx)
365 int map_sz_elf, map_sz_copy;
366 bool validate_zero = false;
378 /* Get data for maps section via elf index */
379 scn = elf_getscn(elf, maps_shndx);
381 data_maps = elf_getdata(scn, NULL);
382 if (!scn || !data_maps) {
383 printf("Failed to get Elf_Data from maps section %d\n",
388 /* For each map get corrosponding symbol table entry */
389 sym = calloc(MAX_MAPS+1, sizeof(GElf_Sym));
390 for (i = 0, nr_maps = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
391 assert(nr_maps < MAX_MAPS+1);
392 if (!gelf_getsym(symbols, i, &sym[nr_maps]))
394 if (sym[nr_maps].st_shndx != maps_shndx)
396 /* Only increment iif maps section */
400 /* Align to map_fd[] order, via sort on offset in sym.st_value */
401 qsort(sym, nr_maps, sizeof(GElf_Sym), cmp_symbols);
403 /* Keeping compatible with ELF maps section changes
404 * ------------------------------------------------
405 * The program size of struct bpf_map_def is known by loader
406 * code, but struct stored in ELF file can be different.
408 * Unfortunately sym[i].st_size is zero. To calculate the
409 * struct size stored in the ELF file, assume all struct have
410 * the same size, and simply divide with number of map
413 map_sz_elf = data_maps->d_size / nr_maps;
414 map_sz_copy = sizeof(struct bpf_map_def);
415 if (map_sz_elf < map_sz_copy) {
417 * Backward compat, loading older ELF file with
418 * smaller struct, keeping remaining bytes zero.
420 map_sz_copy = map_sz_elf;
421 } else if (map_sz_elf > map_sz_copy) {
423 * Forward compat, loading newer ELF file with larger
424 * struct with unknown features. Assume zero means
425 * feature not used. Thus, validate rest of struct
428 validate_zero = true;
431 /* Memcpy relevant part of ELF maps data to loader maps */
432 for (i = 0; i < nr_maps; i++) {
433 unsigned char *addr, *end;
434 struct bpf_map_def *def;
435 const char *map_name;
438 map_name = elf_strptr(elf, strtabidx, sym[i].st_name);
439 maps[i].name = strdup(map_name);
441 printf("strdup(%s): %s(%d)\n", map_name,
442 strerror(errno), errno);
447 /* Symbol value is offset into ELF maps section data area */
448 offset = sym[i].st_value;
449 def = (struct bpf_map_def *)(data_maps->d_buf + offset);
450 maps[i].elf_offset = offset;
451 memset(&maps[i].def, 0, sizeof(struct bpf_map_def));
452 memcpy(&maps[i].def, def, map_sz_copy);
454 /* Verify no newer features were requested */
456 addr = (unsigned char*) def + map_sz_copy;
457 end = (unsigned char*) def + map_sz_elf;
458 for (; addr < end; addr++) {
471 static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
473 int fd, i, ret, maps_shndx = -1, strtabidx = -1;
476 GElf_Shdr shdr, shdr_prog;
477 Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL;
478 char *shname, *shname_prog;
481 /* reset global variables */
483 memset(license, 0, sizeof(license));
484 memset(processed_sec, 0, sizeof(processed_sec));
486 if (elf_version(EV_CURRENT) == EV_NONE)
489 fd = open(path, O_RDONLY, 0);
493 elf = elf_begin(fd, ELF_C_READ, NULL);
498 if (gelf_getehdr(elf, &ehdr) != &ehdr)
501 /* clear all kprobes */
502 i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
504 /* scan over all elf sections to get license and map info */
505 for (i = 1; i < ehdr.e_shnum; i++) {
507 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
510 if (0) /* helpful for llvm debugging */
511 printf("section %d:%s data %p size %zd link %d flags %d\n",
512 i, shname, data->d_buf, data->d_size,
513 shdr.sh_link, (int) shdr.sh_flags);
515 if (strcmp(shname, "license") == 0) {
516 processed_sec[i] = true;
517 memcpy(license, data->d_buf, data->d_size);
518 } else if (strcmp(shname, "version") == 0) {
519 processed_sec[i] = true;
520 if (data->d_size != sizeof(int)) {
521 printf("invalid size of version section %zd\n",
525 memcpy(&kern_version, data->d_buf, sizeof(int));
526 } else if (strcmp(shname, "maps") == 0) {
531 for (j = 0; j < MAX_MAPS; j++)
533 } else if (shdr.sh_type == SHT_SYMTAB) {
534 strtabidx = shdr.sh_link;
542 printf("missing SHT_SYMTAB section\n");
547 nr_maps = load_elf_maps_section(map_data, maps_shndx,
548 elf, symbols, strtabidx);
550 printf("Error: Failed loading ELF maps (errno:%d):%s\n",
551 nr_maps, strerror(-nr_maps));
555 if (load_maps(map_data, nr_maps, fixup_map))
557 map_data_count = nr_maps;
559 processed_sec[maps_shndx] = true;
562 /* process all relo sections, and rewrite bpf insns for maps */
563 for (i = 1; i < ehdr.e_shnum; i++) {
564 if (processed_sec[i])
567 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
570 if (shdr.sh_type == SHT_REL) {
571 struct bpf_insn *insns;
573 /* locate prog sec that need map fixup (relocations) */
574 if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog,
575 &shdr_prog, &data_prog))
578 if (shdr_prog.sh_type != SHT_PROGBITS ||
579 !(shdr_prog.sh_flags & SHF_EXECINSTR))
582 insns = (struct bpf_insn *) data_prog->d_buf;
583 processed_sec[i] = true; /* relo section */
585 if (parse_relo_and_apply(data, symbols, &shdr, insns,
592 for (i = 1; i < ehdr.e_shnum; i++) {
594 if (processed_sec[i])
597 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
600 if (memcmp(shname, "kprobe/", 7) == 0 ||
601 memcmp(shname, "kretprobe/", 10) == 0 ||
602 memcmp(shname, "tracepoint/", 11) == 0 ||
603 memcmp(shname, "raw_tracepoint/", 15) == 0 ||
604 memcmp(shname, "xdp", 3) == 0 ||
605 memcmp(shname, "perf_event", 10) == 0 ||
606 memcmp(shname, "socket", 6) == 0 ||
607 memcmp(shname, "cgroup/", 7) == 0 ||
608 memcmp(shname, "sockops", 7) == 0 ||
609 memcmp(shname, "sk_skb", 6) == 0 ||
610 memcmp(shname, "sk_msg", 6) == 0) {
611 ret = load_and_attach(shname, data->d_buf,
624 int load_bpf_file(char *path)
626 return do_load_bpf_file(path, NULL);
629 int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map)
631 return do_load_bpf_file(path, fixup_map);
634 void read_trace_pipe(void)
638 trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
643 static char buf[4096];
646 sz = read(trace_fd, buf, sizeof(buf));
654 #define MAX_SYMS 300000
655 static struct ksym syms[MAX_SYMS];
658 static int ksym_cmp(const void *p1, const void *p2)
660 return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
663 int load_kallsyms(void)
665 FILE *f = fopen("/proc/kallsyms", "r");
666 char func[256], buf[256];
675 if (!fgets(buf, sizeof(buf), f))
677 if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
681 syms[i].addr = (long) addr;
682 syms[i].name = strdup(func);
686 qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
690 struct ksym *ksym_search(long key)
692 int start = 0, end = sym_cnt;
695 while (start < end) {
696 size_t mid = start + (end - start) / 2;
698 result = key - syms[mid].addr;
707 if (start >= 1 && syms[start - 1].addr < key &&
708 key < syms[start].addr)
710 return &syms[start - 1];
712 /* out of range. return _stext */