e2a3cf4378140f2ccabe45df70d40accc63d7bb3
[linux-2.6-microblaze.git] / tools / lib / bpf / libbpf.c
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  * Copyright (C) 2019 Isovalent, Inc.
11  */
12
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/list.h>
35 #include <linux/limits.h>
36 #include <linux/perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <linux/version.h>
39 #include <sys/epoll.h>
40 #include <sys/ioctl.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/types.h>
44 #include <sys/vfs.h>
45 #include <sys/utsname.h>
46 #include <sys/resource.h>
47 #include <libelf.h>
48 #include <gelf.h>
49 #include <zlib.h>
50
51 #include "libbpf.h"
52 #include "bpf.h"
53 #include "btf.h"
54 #include "str_error.h"
55 #include "libbpf_internal.h"
56 #include "hashmap.h"
57
58 #ifndef BPF_FS_MAGIC
59 #define BPF_FS_MAGIC            0xcafe4a11
60 #endif
61
62 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
63
64 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
65  * compilation if user enables corresponding warning. Disable it explicitly.
66  */
67 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
68
69 #define __printf(a, b)  __attribute__((format(printf, a, b)))
70
71 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
72 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
73
74 static int __base_pr(enum libbpf_print_level level, const char *format,
75                      va_list args)
76 {
77         if (level == LIBBPF_DEBUG)
78                 return 0;
79
80         return vfprintf(stderr, format, args);
81 }
82
83 static libbpf_print_fn_t __libbpf_pr = __base_pr;
84
85 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
86 {
87         libbpf_print_fn_t old_print_fn = __libbpf_pr;
88
89         __libbpf_pr = fn;
90         return old_print_fn;
91 }
92
93 __printf(2, 3)
94 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
95 {
96         va_list args;
97
98         if (!__libbpf_pr)
99                 return;
100
101         va_start(args, format);
102         __libbpf_pr(level, format, args);
103         va_end(args);
104 }
105
106 static void pr_perm_msg(int err)
107 {
108         struct rlimit limit;
109         char buf[100];
110
111         if (err != -EPERM || geteuid() != 0)
112                 return;
113
114         err = getrlimit(RLIMIT_MEMLOCK, &limit);
115         if (err)
116                 return;
117
118         if (limit.rlim_cur == RLIM_INFINITY)
119                 return;
120
121         if (limit.rlim_cur < 1024)
122                 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
123         else if (limit.rlim_cur < 1024*1024)
124                 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
125         else
126                 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
127
128         pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
129                 buf);
130 }
131
132 #define STRERR_BUFSIZE  128
133
134 /* Copied from tools/perf/util/util.h */
135 #ifndef zfree
136 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
137 #endif
138
139 #ifndef zclose
140 # define zclose(fd) ({                  \
141         int ___err = 0;                 \
142         if ((fd) >= 0)                  \
143                 ___err = close((fd));   \
144         fd = -1;                        \
145         ___err; })
146 #endif
147
148 static inline __u64 ptr_to_u64(const void *ptr)
149 {
150         return (__u64) (unsigned long) ptr;
151 }
152
153 enum kern_feature_id {
154         /* v4.14: kernel support for program & map names. */
155         FEAT_PROG_NAME,
156         /* v5.2: kernel support for global data sections. */
157         FEAT_GLOBAL_DATA,
158         /* BTF support */
159         FEAT_BTF,
160         /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
161         FEAT_BTF_FUNC,
162         /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
163         FEAT_BTF_DATASEC,
164         /* BTF_FUNC_GLOBAL is supported */
165         FEAT_BTF_GLOBAL_FUNC,
166         /* BPF_F_MMAPABLE is supported for arrays */
167         FEAT_ARRAY_MMAP,
168         /* kernel support for expected_attach_type in BPF_PROG_LOAD */
169         FEAT_EXP_ATTACH_TYPE,
170         /* bpf_probe_read_{kernel,user}[_str] helpers */
171         FEAT_PROBE_READ_KERN,
172         /* BPF_PROG_BIND_MAP is supported */
173         FEAT_PROG_BIND_MAP,
174         /* Kernel support for module BTFs */
175         FEAT_MODULE_BTF,
176         /* BTF_KIND_FLOAT support */
177         FEAT_BTF_FLOAT,
178         __FEAT_CNT,
179 };
180
181 static bool kernel_supports(enum kern_feature_id feat_id);
182
183 enum reloc_type {
184         RELO_LD64,
185         RELO_CALL,
186         RELO_DATA,
187         RELO_EXTERN_VAR,
188         RELO_EXTERN_FUNC,
189         RELO_SUBPROG_ADDR,
190 };
191
192 struct reloc_desc {
193         enum reloc_type type;
194         int insn_idx;
195         int map_idx;
196         int sym_off;
197 };
198
199 struct bpf_sec_def;
200
201 typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
202                                         struct bpf_program *prog);
203
204 struct bpf_sec_def {
205         const char *sec;
206         size_t len;
207         enum bpf_prog_type prog_type;
208         enum bpf_attach_type expected_attach_type;
209         bool is_exp_attach_type_optional;
210         bool is_attachable;
211         bool is_attach_btf;
212         bool is_sleepable;
213         attach_fn_t attach_fn;
214 };
215
216 /*
217  * bpf_prog should be a better name but it has been used in
218  * linux/filter.h.
219  */
220 struct bpf_program {
221         const struct bpf_sec_def *sec_def;
222         char *sec_name;
223         size_t sec_idx;
224         /* this program's instruction offset (in number of instructions)
225          * within its containing ELF section
226          */
227         size_t sec_insn_off;
228         /* number of original instructions in ELF section belonging to this
229          * program, not taking into account subprogram instructions possible
230          * appended later during relocation
231          */
232         size_t sec_insn_cnt;
233         /* Offset (in number of instructions) of the start of instruction
234          * belonging to this BPF program  within its containing main BPF
235          * program. For the entry-point (main) BPF program, this is always
236          * zero. For a sub-program, this gets reset before each of main BPF
237          * programs are processed and relocated and is used to determined
238          * whether sub-program was already appended to the main program, and
239          * if yes, at which instruction offset.
240          */
241         size_t sub_insn_off;
242
243         char *name;
244         /* sec_name with / replaced by _; makes recursive pinning
245          * in bpf_object__pin_programs easier
246          */
247         char *pin_name;
248
249         /* instructions that belong to BPF program; insns[0] is located at
250          * sec_insn_off instruction within its ELF section in ELF file, so
251          * when mapping ELF file instruction index to the local instruction,
252          * one needs to subtract sec_insn_off; and vice versa.
253          */
254         struct bpf_insn *insns;
255         /* actual number of instruction in this BPF program's image; for
256          * entry-point BPF programs this includes the size of main program
257          * itself plus all the used sub-programs, appended at the end
258          */
259         size_t insns_cnt;
260
261         struct reloc_desc *reloc_desc;
262         int nr_reloc;
263         int log_level;
264
265         struct {
266                 int nr;
267                 int *fds;
268         } instances;
269         bpf_program_prep_t preprocessor;
270
271         struct bpf_object *obj;
272         void *priv;
273         bpf_program_clear_priv_t clear_priv;
274
275         bool load;
276         bool mark_btf_static;
277         enum bpf_prog_type type;
278         enum bpf_attach_type expected_attach_type;
279         int prog_ifindex;
280         __u32 attach_btf_obj_fd;
281         __u32 attach_btf_id;
282         __u32 attach_prog_fd;
283         void *func_info;
284         __u32 func_info_rec_size;
285         __u32 func_info_cnt;
286
287         void *line_info;
288         __u32 line_info_rec_size;
289         __u32 line_info_cnt;
290         __u32 prog_flags;
291 };
292
293 struct bpf_struct_ops {
294         const char *tname;
295         const struct btf_type *type;
296         struct bpf_program **progs;
297         __u32 *kern_func_off;
298         /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
299         void *data;
300         /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
301          *      btf_vmlinux's format.
302          * struct bpf_struct_ops_tcp_congestion_ops {
303          *      [... some other kernel fields ...]
304          *      struct tcp_congestion_ops data;
305          * }
306          * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
307          * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
308          * from "data".
309          */
310         void *kern_vdata;
311         __u32 type_id;
312 };
313
314 #define DATA_SEC ".data"
315 #define BSS_SEC ".bss"
316 #define RODATA_SEC ".rodata"
317 #define KCONFIG_SEC ".kconfig"
318 #define KSYMS_SEC ".ksyms"
319 #define STRUCT_OPS_SEC ".struct_ops"
320
321 enum libbpf_map_type {
322         LIBBPF_MAP_UNSPEC,
323         LIBBPF_MAP_DATA,
324         LIBBPF_MAP_BSS,
325         LIBBPF_MAP_RODATA,
326         LIBBPF_MAP_KCONFIG,
327 };
328
329 static const char * const libbpf_type_to_btf_name[] = {
330         [LIBBPF_MAP_DATA]       = DATA_SEC,
331         [LIBBPF_MAP_BSS]        = BSS_SEC,
332         [LIBBPF_MAP_RODATA]     = RODATA_SEC,
333         [LIBBPF_MAP_KCONFIG]    = KCONFIG_SEC,
334 };
335
336 struct bpf_map {
337         char *name;
338         int fd;
339         int sec_idx;
340         size_t sec_offset;
341         int map_ifindex;
342         int inner_map_fd;
343         struct bpf_map_def def;
344         __u32 numa_node;
345         __u32 btf_var_idx;
346         __u32 btf_key_type_id;
347         __u32 btf_value_type_id;
348         __u32 btf_vmlinux_value_type_id;
349         void *priv;
350         bpf_map_clear_priv_t clear_priv;
351         enum libbpf_map_type libbpf_type;
352         void *mmaped;
353         struct bpf_struct_ops *st_ops;
354         struct bpf_map *inner_map;
355         void **init_slots;
356         int init_slots_sz;
357         char *pin_path;
358         bool pinned;
359         bool reused;
360 };
361
362 enum extern_type {
363         EXT_UNKNOWN,
364         EXT_KCFG,
365         EXT_KSYM,
366 };
367
368 enum kcfg_type {
369         KCFG_UNKNOWN,
370         KCFG_CHAR,
371         KCFG_BOOL,
372         KCFG_INT,
373         KCFG_TRISTATE,
374         KCFG_CHAR_ARR,
375 };
376
377 struct extern_desc {
378         enum extern_type type;
379         int sym_idx;
380         int btf_id;
381         int sec_btf_id;
382         const char *name;
383         bool is_set;
384         bool is_weak;
385         union {
386                 struct {
387                         enum kcfg_type type;
388                         int sz;
389                         int align;
390                         int data_off;
391                         bool is_signed;
392                 } kcfg;
393                 struct {
394                         unsigned long long addr;
395
396                         /* target btf_id of the corresponding kernel var. */
397                         int kernel_btf_obj_fd;
398                         int kernel_btf_id;
399
400                         /* local btf_id of the ksym extern's type. */
401                         __u32 type_id;
402                 } ksym;
403         };
404 };
405
406 static LIST_HEAD(bpf_objects_list);
407
408 struct module_btf {
409         struct btf *btf;
410         char *name;
411         __u32 id;
412         int fd;
413 };
414
415 struct bpf_object {
416         char name[BPF_OBJ_NAME_LEN];
417         char license[64];
418         __u32 kern_version;
419
420         struct bpf_program *programs;
421         size_t nr_programs;
422         struct bpf_map *maps;
423         size_t nr_maps;
424         size_t maps_cap;
425
426         char *kconfig;
427         struct extern_desc *externs;
428         int nr_extern;
429         int kconfig_map_idx;
430         int rodata_map_idx;
431
432         bool loaded;
433         bool has_subcalls;
434
435         /*
436          * Information when doing elf related work. Only valid if fd
437          * is valid.
438          */
439         struct {
440                 int fd;
441                 const void *obj_buf;
442                 size_t obj_buf_sz;
443                 Elf *elf;
444                 GElf_Ehdr ehdr;
445                 Elf_Data *symbols;
446                 Elf_Data *data;
447                 Elf_Data *rodata;
448                 Elf_Data *bss;
449                 Elf_Data *st_ops_data;
450                 size_t shstrndx; /* section index for section name strings */
451                 size_t strtabidx;
452                 struct {
453                         GElf_Shdr shdr;
454                         Elf_Data *data;
455                 } *reloc_sects;
456                 int nr_reloc_sects;
457                 int maps_shndx;
458                 int btf_maps_shndx;
459                 __u32 btf_maps_sec_btf_id;
460                 int text_shndx;
461                 int symbols_shndx;
462                 int data_shndx;
463                 int rodata_shndx;
464                 int bss_shndx;
465                 int st_ops_shndx;
466         } efile;
467         /*
468          * All loaded bpf_object is linked in a list, which is
469          * hidden to caller. bpf_objects__<func> handlers deal with
470          * all objects.
471          */
472         struct list_head list;
473
474         struct btf *btf;
475         struct btf_ext *btf_ext;
476
477         /* Parse and load BTF vmlinux if any of the programs in the object need
478          * it at load time.
479          */
480         struct btf *btf_vmlinux;
481         /* vmlinux BTF override for CO-RE relocations */
482         struct btf *btf_vmlinux_override;
483         /* Lazily initialized kernel module BTFs */
484         struct module_btf *btf_modules;
485         bool btf_modules_loaded;
486         size_t btf_module_cnt;
487         size_t btf_module_cap;
488
489         void *priv;
490         bpf_object_clear_priv_t clear_priv;
491
492         char path[];
493 };
494 #define obj_elf_valid(o)        ((o)->efile.elf)
495
496 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
497 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
498 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
499 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
500 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
501 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
502 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
503
504 void bpf_program__unload(struct bpf_program *prog)
505 {
506         int i;
507
508         if (!prog)
509                 return;
510
511         /*
512          * If the object is opened but the program was never loaded,
513          * it is possible that prog->instances.nr == -1.
514          */
515         if (prog->instances.nr > 0) {
516                 for (i = 0; i < prog->instances.nr; i++)
517                         zclose(prog->instances.fds[i]);
518         } else if (prog->instances.nr != -1) {
519                 pr_warn("Internal error: instances.nr is %d\n",
520                         prog->instances.nr);
521         }
522
523         prog->instances.nr = -1;
524         zfree(&prog->instances.fds);
525
526         zfree(&prog->func_info);
527         zfree(&prog->line_info);
528 }
529
530 static void bpf_program__exit(struct bpf_program *prog)
531 {
532         if (!prog)
533                 return;
534
535         if (prog->clear_priv)
536                 prog->clear_priv(prog, prog->priv);
537
538         prog->priv = NULL;
539         prog->clear_priv = NULL;
540
541         bpf_program__unload(prog);
542         zfree(&prog->name);
543         zfree(&prog->sec_name);
544         zfree(&prog->pin_name);
545         zfree(&prog->insns);
546         zfree(&prog->reloc_desc);
547
548         prog->nr_reloc = 0;
549         prog->insns_cnt = 0;
550         prog->sec_idx = -1;
551 }
552
553 static char *__bpf_program__pin_name(struct bpf_program *prog)
554 {
555         char *name, *p;
556
557         name = p = strdup(prog->sec_name);
558         while ((p = strchr(p, '/')))
559                 *p = '_';
560
561         return name;
562 }
563
564 static bool insn_is_subprog_call(const struct bpf_insn *insn)
565 {
566         return BPF_CLASS(insn->code) == BPF_JMP &&
567                BPF_OP(insn->code) == BPF_CALL &&
568                BPF_SRC(insn->code) == BPF_K &&
569                insn->src_reg == BPF_PSEUDO_CALL &&
570                insn->dst_reg == 0 &&
571                insn->off == 0;
572 }
573
574 static bool is_ldimm64_insn(struct bpf_insn *insn)
575 {
576         return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
577 }
578
579 static bool is_call_insn(const struct bpf_insn *insn)
580 {
581         return insn->code == (BPF_JMP | BPF_CALL);
582 }
583
584 static bool insn_is_pseudo_func(struct bpf_insn *insn)
585 {
586         return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
587 }
588
589 static int
590 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
591                       const char *name, size_t sec_idx, const char *sec_name,
592                       size_t sec_off, void *insn_data, size_t insn_data_sz)
593 {
594         if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
595                 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
596                         sec_name, name, sec_off, insn_data_sz);
597                 return -EINVAL;
598         }
599
600         memset(prog, 0, sizeof(*prog));
601         prog->obj = obj;
602
603         prog->sec_idx = sec_idx;
604         prog->sec_insn_off = sec_off / BPF_INSN_SZ;
605         prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
606         /* insns_cnt can later be increased by appending used subprograms */
607         prog->insns_cnt = prog->sec_insn_cnt;
608
609         prog->type = BPF_PROG_TYPE_UNSPEC;
610         prog->load = true;
611
612         prog->instances.fds = NULL;
613         prog->instances.nr = -1;
614
615         prog->sec_name = strdup(sec_name);
616         if (!prog->sec_name)
617                 goto errout;
618
619         prog->name = strdup(name);
620         if (!prog->name)
621                 goto errout;
622
623         prog->pin_name = __bpf_program__pin_name(prog);
624         if (!prog->pin_name)
625                 goto errout;
626
627         prog->insns = malloc(insn_data_sz);
628         if (!prog->insns)
629                 goto errout;
630         memcpy(prog->insns, insn_data, insn_data_sz);
631
632         return 0;
633 errout:
634         pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
635         bpf_program__exit(prog);
636         return -ENOMEM;
637 }
638
639 static int
640 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
641                          const char *sec_name, int sec_idx)
642 {
643         Elf_Data *symbols = obj->efile.symbols;
644         struct bpf_program *prog, *progs;
645         void *data = sec_data->d_buf;
646         size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
647         int nr_progs, err, i;
648         const char *name;
649         GElf_Sym sym;
650
651         progs = obj->programs;
652         nr_progs = obj->nr_programs;
653         nr_syms = symbols->d_size / sizeof(GElf_Sym);
654         sec_off = 0;
655
656         for (i = 0; i < nr_syms; i++) {
657                 if (!gelf_getsym(symbols, i, &sym))
658                         continue;
659                 if (sym.st_shndx != sec_idx)
660                         continue;
661                 if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
662                         continue;
663
664                 prog_sz = sym.st_size;
665                 sec_off = sym.st_value;
666
667                 name = elf_sym_str(obj, sym.st_name);
668                 if (!name) {
669                         pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
670                                 sec_name, sec_off);
671                         return -LIBBPF_ERRNO__FORMAT;
672                 }
673
674                 if (sec_off + prog_sz > sec_sz) {
675                         pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
676                                 sec_name, sec_off);
677                         return -LIBBPF_ERRNO__FORMAT;
678                 }
679
680                 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
681                          sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
682
683                 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
684                 if (!progs) {
685                         /*
686                          * In this case the original obj->programs
687                          * is still valid, so don't need special treat for
688                          * bpf_close_object().
689                          */
690                         pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
691                                 sec_name, name);
692                         return -ENOMEM;
693                 }
694                 obj->programs = progs;
695
696                 prog = &progs[nr_progs];
697
698                 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
699                                             sec_off, data + sec_off, prog_sz);
700                 if (err)
701                         return err;
702
703                 /* if function is a global/weak symbol, but has hidden
704                  * visibility (STV_HIDDEN), mark its BTF FUNC as static to
705                  * enable more permissive BPF verification mode with more
706                  * outside context available to BPF verifier
707                  */
708                 if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
709                     && GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN)
710                         prog->mark_btf_static = true;
711
712                 nr_progs++;
713                 obj->nr_programs = nr_progs;
714         }
715
716         return 0;
717 }
718
719 static __u32 get_kernel_version(void)
720 {
721         __u32 major, minor, patch;
722         struct utsname info;
723
724         uname(&info);
725         if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
726                 return 0;
727         return KERNEL_VERSION(major, minor, patch);
728 }
729
730 static const struct btf_member *
731 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
732 {
733         struct btf_member *m;
734         int i;
735
736         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
737                 if (btf_member_bit_offset(t, i) == bit_offset)
738                         return m;
739         }
740
741         return NULL;
742 }
743
744 static const struct btf_member *
745 find_member_by_name(const struct btf *btf, const struct btf_type *t,
746                     const char *name)
747 {
748         struct btf_member *m;
749         int i;
750
751         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
752                 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
753                         return m;
754         }
755
756         return NULL;
757 }
758
759 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
760 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
761                                    const char *name, __u32 kind);
762
763 static int
764 find_struct_ops_kern_types(const struct btf *btf, const char *tname,
765                            const struct btf_type **type, __u32 *type_id,
766                            const struct btf_type **vtype, __u32 *vtype_id,
767                            const struct btf_member **data_member)
768 {
769         const struct btf_type *kern_type, *kern_vtype;
770         const struct btf_member *kern_data_member;
771         __s32 kern_vtype_id, kern_type_id;
772         __u32 i;
773
774         kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
775         if (kern_type_id < 0) {
776                 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
777                         tname);
778                 return kern_type_id;
779         }
780         kern_type = btf__type_by_id(btf, kern_type_id);
781
782         /* Find the corresponding "map_value" type that will be used
783          * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
784          * find "struct bpf_struct_ops_tcp_congestion_ops" from the
785          * btf_vmlinux.
786          */
787         kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
788                                                 tname, BTF_KIND_STRUCT);
789         if (kern_vtype_id < 0) {
790                 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
791                         STRUCT_OPS_VALUE_PREFIX, tname);
792                 return kern_vtype_id;
793         }
794         kern_vtype = btf__type_by_id(btf, kern_vtype_id);
795
796         /* Find "struct tcp_congestion_ops" from
797          * struct bpf_struct_ops_tcp_congestion_ops {
798          *      [ ... ]
799          *      struct tcp_congestion_ops data;
800          * }
801          */
802         kern_data_member = btf_members(kern_vtype);
803         for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
804                 if (kern_data_member->type == kern_type_id)
805                         break;
806         }
807         if (i == btf_vlen(kern_vtype)) {
808                 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
809                         tname, STRUCT_OPS_VALUE_PREFIX, tname);
810                 return -EINVAL;
811         }
812
813         *type = kern_type;
814         *type_id = kern_type_id;
815         *vtype = kern_vtype;
816         *vtype_id = kern_vtype_id;
817         *data_member = kern_data_member;
818
819         return 0;
820 }
821
822 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
823 {
824         return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
825 }
826
827 /* Init the map's fields that depend on kern_btf */
828 static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
829                                          const struct btf *btf,
830                                          const struct btf *kern_btf)
831 {
832         const struct btf_member *member, *kern_member, *kern_data_member;
833         const struct btf_type *type, *kern_type, *kern_vtype;
834         __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
835         struct bpf_struct_ops *st_ops;
836         void *data, *kern_data;
837         const char *tname;
838         int err;
839
840         st_ops = map->st_ops;
841         type = st_ops->type;
842         tname = st_ops->tname;
843         err = find_struct_ops_kern_types(kern_btf, tname,
844                                          &kern_type, &kern_type_id,
845                                          &kern_vtype, &kern_vtype_id,
846                                          &kern_data_member);
847         if (err)
848                 return err;
849
850         pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
851                  map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
852
853         map->def.value_size = kern_vtype->size;
854         map->btf_vmlinux_value_type_id = kern_vtype_id;
855
856         st_ops->kern_vdata = calloc(1, kern_vtype->size);
857         if (!st_ops->kern_vdata)
858                 return -ENOMEM;
859
860         data = st_ops->data;
861         kern_data_off = kern_data_member->offset / 8;
862         kern_data = st_ops->kern_vdata + kern_data_off;
863
864         member = btf_members(type);
865         for (i = 0; i < btf_vlen(type); i++, member++) {
866                 const struct btf_type *mtype, *kern_mtype;
867                 __u32 mtype_id, kern_mtype_id;
868                 void *mdata, *kern_mdata;
869                 __s64 msize, kern_msize;
870                 __u32 moff, kern_moff;
871                 __u32 kern_member_idx;
872                 const char *mname;
873
874                 mname = btf__name_by_offset(btf, member->name_off);
875                 kern_member = find_member_by_name(kern_btf, kern_type, mname);
876                 if (!kern_member) {
877                         pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
878                                 map->name, mname);
879                         return -ENOTSUP;
880                 }
881
882                 kern_member_idx = kern_member - btf_members(kern_type);
883                 if (btf_member_bitfield_size(type, i) ||
884                     btf_member_bitfield_size(kern_type, kern_member_idx)) {
885                         pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
886                                 map->name, mname);
887                         return -ENOTSUP;
888                 }
889
890                 moff = member->offset / 8;
891                 kern_moff = kern_member->offset / 8;
892
893                 mdata = data + moff;
894                 kern_mdata = kern_data + kern_moff;
895
896                 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
897                 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
898                                                     &kern_mtype_id);
899                 if (BTF_INFO_KIND(mtype->info) !=
900                     BTF_INFO_KIND(kern_mtype->info)) {
901                         pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
902                                 map->name, mname, BTF_INFO_KIND(mtype->info),
903                                 BTF_INFO_KIND(kern_mtype->info));
904                         return -ENOTSUP;
905                 }
906
907                 if (btf_is_ptr(mtype)) {
908                         struct bpf_program *prog;
909
910                         prog = st_ops->progs[i];
911                         if (!prog)
912                                 continue;
913
914                         kern_mtype = skip_mods_and_typedefs(kern_btf,
915                                                             kern_mtype->type,
916                                                             &kern_mtype_id);
917
918                         /* mtype->type must be a func_proto which was
919                          * guaranteed in bpf_object__collect_st_ops_relos(),
920                          * so only check kern_mtype for func_proto here.
921                          */
922                         if (!btf_is_func_proto(kern_mtype)) {
923                                 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
924                                         map->name, mname);
925                                 return -ENOTSUP;
926                         }
927
928                         prog->attach_btf_id = kern_type_id;
929                         prog->expected_attach_type = kern_member_idx;
930
931                         st_ops->kern_func_off[i] = kern_data_off + kern_moff;
932
933                         pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
934                                  map->name, mname, prog->name, moff,
935                                  kern_moff);
936
937                         continue;
938                 }
939
940                 msize = btf__resolve_size(btf, mtype_id);
941                 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
942                 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
943                         pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
944                                 map->name, mname, (ssize_t)msize,
945                                 (ssize_t)kern_msize);
946                         return -ENOTSUP;
947                 }
948
949                 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
950                          map->name, mname, (unsigned int)msize,
951                          moff, kern_moff);
952                 memcpy(kern_mdata, mdata, msize);
953         }
954
955         return 0;
956 }
957
958 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
959 {
960         struct bpf_map *map;
961         size_t i;
962         int err;
963
964         for (i = 0; i < obj->nr_maps; i++) {
965                 map = &obj->maps[i];
966
967                 if (!bpf_map__is_struct_ops(map))
968                         continue;
969
970                 err = bpf_map__init_kern_struct_ops(map, obj->btf,
971                                                     obj->btf_vmlinux);
972                 if (err)
973                         return err;
974         }
975
976         return 0;
977 }
978
979 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
980 {
981         const struct btf_type *type, *datasec;
982         const struct btf_var_secinfo *vsi;
983         struct bpf_struct_ops *st_ops;
984         const char *tname, *var_name;
985         __s32 type_id, datasec_id;
986         const struct btf *btf;
987         struct bpf_map *map;
988         __u32 i;
989
990         if (obj->efile.st_ops_shndx == -1)
991                 return 0;
992
993         btf = obj->btf;
994         datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
995                                             BTF_KIND_DATASEC);
996         if (datasec_id < 0) {
997                 pr_warn("struct_ops init: DATASEC %s not found\n",
998                         STRUCT_OPS_SEC);
999                 return -EINVAL;
1000         }
1001
1002         datasec = btf__type_by_id(btf, datasec_id);
1003         vsi = btf_var_secinfos(datasec);
1004         for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1005                 type = btf__type_by_id(obj->btf, vsi->type);
1006                 var_name = btf__name_by_offset(obj->btf, type->name_off);
1007
1008                 type_id = btf__resolve_type(obj->btf, vsi->type);
1009                 if (type_id < 0) {
1010                         pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1011                                 vsi->type, STRUCT_OPS_SEC);
1012                         return -EINVAL;
1013                 }
1014
1015                 type = btf__type_by_id(obj->btf, type_id);
1016                 tname = btf__name_by_offset(obj->btf, type->name_off);
1017                 if (!tname[0]) {
1018                         pr_warn("struct_ops init: anonymous type is not supported\n");
1019                         return -ENOTSUP;
1020                 }
1021                 if (!btf_is_struct(type)) {
1022                         pr_warn("struct_ops init: %s is not a struct\n", tname);
1023                         return -EINVAL;
1024                 }
1025
1026                 map = bpf_object__add_map(obj);
1027                 if (IS_ERR(map))
1028                         return PTR_ERR(map);
1029
1030                 map->sec_idx = obj->efile.st_ops_shndx;
1031                 map->sec_offset = vsi->offset;
1032                 map->name = strdup(var_name);
1033                 if (!map->name)
1034                         return -ENOMEM;
1035
1036                 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1037                 map->def.key_size = sizeof(int);
1038                 map->def.value_size = type->size;
1039                 map->def.max_entries = 1;
1040
1041                 map->st_ops = calloc(1, sizeof(*map->st_ops));
1042                 if (!map->st_ops)
1043                         return -ENOMEM;
1044                 st_ops = map->st_ops;
1045                 st_ops->data = malloc(type->size);
1046                 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1047                 st_ops->kern_func_off = malloc(btf_vlen(type) *
1048                                                sizeof(*st_ops->kern_func_off));
1049                 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1050                         return -ENOMEM;
1051
1052                 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1053                         pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1054                                 var_name, STRUCT_OPS_SEC);
1055                         return -EINVAL;
1056                 }
1057
1058                 memcpy(st_ops->data,
1059                        obj->efile.st_ops_data->d_buf + vsi->offset,
1060                        type->size);
1061                 st_ops->tname = tname;
1062                 st_ops->type = type;
1063                 st_ops->type_id = type_id;
1064
1065                 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1066                          tname, type_id, var_name, vsi->offset);
1067         }
1068
1069         return 0;
1070 }
1071
1072 static struct bpf_object *bpf_object__new(const char *path,
1073                                           const void *obj_buf,
1074                                           size_t obj_buf_sz,
1075                                           const char *obj_name)
1076 {
1077         struct bpf_object *obj;
1078         char *end;
1079
1080         obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1081         if (!obj) {
1082                 pr_warn("alloc memory failed for %s\n", path);
1083                 return ERR_PTR(-ENOMEM);
1084         }
1085
1086         strcpy(obj->path, path);
1087         if (obj_name) {
1088                 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1089                 obj->name[sizeof(obj->name) - 1] = 0;
1090         } else {
1091                 /* Using basename() GNU version which doesn't modify arg. */
1092                 strncpy(obj->name, basename((void *)path),
1093                         sizeof(obj->name) - 1);
1094                 end = strchr(obj->name, '.');
1095                 if (end)
1096                         *end = 0;
1097         }
1098
1099         obj->efile.fd = -1;
1100         /*
1101          * Caller of this function should also call
1102          * bpf_object__elf_finish() after data collection to return
1103          * obj_buf to user. If not, we should duplicate the buffer to
1104          * avoid user freeing them before elf finish.
1105          */
1106         obj->efile.obj_buf = obj_buf;
1107         obj->efile.obj_buf_sz = obj_buf_sz;
1108         obj->efile.maps_shndx = -1;
1109         obj->efile.btf_maps_shndx = -1;
1110         obj->efile.data_shndx = -1;
1111         obj->efile.rodata_shndx = -1;
1112         obj->efile.bss_shndx = -1;
1113         obj->efile.st_ops_shndx = -1;
1114         obj->kconfig_map_idx = -1;
1115         obj->rodata_map_idx = -1;
1116
1117         obj->kern_version = get_kernel_version();
1118         obj->loaded = false;
1119
1120         INIT_LIST_HEAD(&obj->list);
1121         list_add(&obj->list, &bpf_objects_list);
1122         return obj;
1123 }
1124
1125 static void bpf_object__elf_finish(struct bpf_object *obj)
1126 {
1127         if (!obj_elf_valid(obj))
1128                 return;
1129
1130         if (obj->efile.elf) {
1131                 elf_end(obj->efile.elf);
1132                 obj->efile.elf = NULL;
1133         }
1134         obj->efile.symbols = NULL;
1135         obj->efile.data = NULL;
1136         obj->efile.rodata = NULL;
1137         obj->efile.bss = NULL;
1138         obj->efile.st_ops_data = NULL;
1139
1140         zfree(&obj->efile.reloc_sects);
1141         obj->efile.nr_reloc_sects = 0;
1142         zclose(obj->efile.fd);
1143         obj->efile.obj_buf = NULL;
1144         obj->efile.obj_buf_sz = 0;
1145 }
1146
1147 static int bpf_object__elf_init(struct bpf_object *obj)
1148 {
1149         int err = 0;
1150         GElf_Ehdr *ep;
1151
1152         if (obj_elf_valid(obj)) {
1153                 pr_warn("elf: init internal error\n");
1154                 return -LIBBPF_ERRNO__LIBELF;
1155         }
1156
1157         if (obj->efile.obj_buf_sz > 0) {
1158                 /*
1159                  * obj_buf should have been validated by
1160                  * bpf_object__open_buffer().
1161                  */
1162                 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1163                                             obj->efile.obj_buf_sz);
1164         } else {
1165                 obj->efile.fd = open(obj->path, O_RDONLY);
1166                 if (obj->efile.fd < 0) {
1167                         char errmsg[STRERR_BUFSIZE], *cp;
1168
1169                         err = -errno;
1170                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1171                         pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1172                         return err;
1173                 }
1174
1175                 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1176         }
1177
1178         if (!obj->efile.elf) {
1179                 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1180                 err = -LIBBPF_ERRNO__LIBELF;
1181                 goto errout;
1182         }
1183
1184         if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1185                 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1186                 err = -LIBBPF_ERRNO__FORMAT;
1187                 goto errout;
1188         }
1189         ep = &obj->efile.ehdr;
1190
1191         if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1192                 pr_warn("elf: failed to get section names section index for %s: %s\n",
1193                         obj->path, elf_errmsg(-1));
1194                 err = -LIBBPF_ERRNO__FORMAT;
1195                 goto errout;
1196         }
1197
1198         /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1199         if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1200                 pr_warn("elf: failed to get section names strings from %s: %s\n",
1201                         obj->path, elf_errmsg(-1));
1202                 err = -LIBBPF_ERRNO__FORMAT;
1203                 goto errout;
1204         }
1205
1206         /* Old LLVM set e_machine to EM_NONE */
1207         if (ep->e_type != ET_REL ||
1208             (ep->e_machine && ep->e_machine != EM_BPF)) {
1209                 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1210                 err = -LIBBPF_ERRNO__FORMAT;
1211                 goto errout;
1212         }
1213
1214         return 0;
1215 errout:
1216         bpf_object__elf_finish(obj);
1217         return err;
1218 }
1219
1220 static int bpf_object__check_endianness(struct bpf_object *obj)
1221 {
1222 #if __BYTE_ORDER == __LITTLE_ENDIAN
1223         if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1224                 return 0;
1225 #elif __BYTE_ORDER == __BIG_ENDIAN
1226         if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1227                 return 0;
1228 #else
1229 # error "Unrecognized __BYTE_ORDER__"
1230 #endif
1231         pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1232         return -LIBBPF_ERRNO__ENDIAN;
1233 }
1234
1235 static int
1236 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1237 {
1238         memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1239         pr_debug("license of %s is %s\n", obj->path, obj->license);
1240         return 0;
1241 }
1242
1243 static int
1244 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1245 {
1246         __u32 kver;
1247
1248         if (size != sizeof(kver)) {
1249                 pr_warn("invalid kver section in %s\n", obj->path);
1250                 return -LIBBPF_ERRNO__FORMAT;
1251         }
1252         memcpy(&kver, data, sizeof(kver));
1253         obj->kern_version = kver;
1254         pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1255         return 0;
1256 }
1257
1258 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1259 {
1260         if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1261             type == BPF_MAP_TYPE_HASH_OF_MAPS)
1262                 return true;
1263         return false;
1264 }
1265
1266 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1267                              __u32 *size)
1268 {
1269         int ret = -ENOENT;
1270
1271         *size = 0;
1272         if (!name) {
1273                 return -EINVAL;
1274         } else if (!strcmp(name, DATA_SEC)) {
1275                 if (obj->efile.data)
1276                         *size = obj->efile.data->d_size;
1277         } else if (!strcmp(name, BSS_SEC)) {
1278                 if (obj->efile.bss)
1279                         *size = obj->efile.bss->d_size;
1280         } else if (!strcmp(name, RODATA_SEC)) {
1281                 if (obj->efile.rodata)
1282                         *size = obj->efile.rodata->d_size;
1283         } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1284                 if (obj->efile.st_ops_data)
1285                         *size = obj->efile.st_ops_data->d_size;
1286         } else {
1287                 Elf_Scn *scn = elf_sec_by_name(obj, name);
1288                 Elf_Data *data = elf_sec_data(obj, scn);
1289
1290                 if (data) {
1291                         ret = 0; /* found it */
1292                         *size = data->d_size;
1293                 }
1294         }
1295
1296         return *size ? 0 : ret;
1297 }
1298
1299 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1300                                 __u32 *off)
1301 {
1302         Elf_Data *symbols = obj->efile.symbols;
1303         const char *sname;
1304         size_t si;
1305
1306         if (!name || !off)
1307                 return -EINVAL;
1308
1309         for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1310                 GElf_Sym sym;
1311
1312                 if (!gelf_getsym(symbols, si, &sym))
1313                         continue;
1314                 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1315                     GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1316                         continue;
1317
1318                 sname = elf_sym_str(obj, sym.st_name);
1319                 if (!sname) {
1320                         pr_warn("failed to get sym name string for var %s\n",
1321                                 name);
1322                         return -EIO;
1323                 }
1324                 if (strcmp(name, sname) == 0) {
1325                         *off = sym.st_value;
1326                         return 0;
1327                 }
1328         }
1329
1330         return -ENOENT;
1331 }
1332
1333 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1334 {
1335         struct bpf_map *new_maps;
1336         size_t new_cap;
1337         int i;
1338
1339         if (obj->nr_maps < obj->maps_cap)
1340                 return &obj->maps[obj->nr_maps++];
1341
1342         new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1343         new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1344         if (!new_maps) {
1345                 pr_warn("alloc maps for object failed\n");
1346                 return ERR_PTR(-ENOMEM);
1347         }
1348
1349         obj->maps_cap = new_cap;
1350         obj->maps = new_maps;
1351
1352         /* zero out new maps */
1353         memset(obj->maps + obj->nr_maps, 0,
1354                (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1355         /*
1356          * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1357          * when failure (zclose won't close negative fd)).
1358          */
1359         for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1360                 obj->maps[i].fd = -1;
1361                 obj->maps[i].inner_map_fd = -1;
1362         }
1363
1364         return &obj->maps[obj->nr_maps++];
1365 }
1366
1367 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1368 {
1369         long page_sz = sysconf(_SC_PAGE_SIZE);
1370         size_t map_sz;
1371
1372         map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1373         map_sz = roundup(map_sz, page_sz);
1374         return map_sz;
1375 }
1376
1377 static char *internal_map_name(struct bpf_object *obj,
1378                                enum libbpf_map_type type)
1379 {
1380         char map_name[BPF_OBJ_NAME_LEN], *p;
1381         const char *sfx = libbpf_type_to_btf_name[type];
1382         int sfx_len = max((size_t)7, strlen(sfx));
1383         int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1384                           strlen(obj->name));
1385
1386         snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1387                  sfx_len, libbpf_type_to_btf_name[type]);
1388
1389         /* sanitise map name to characters allowed by kernel */
1390         for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1391                 if (!isalnum(*p) && *p != '_' && *p != '.')
1392                         *p = '_';
1393
1394         return strdup(map_name);
1395 }
1396
1397 static int
1398 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1399                               int sec_idx, void *data, size_t data_sz)
1400 {
1401         struct bpf_map_def *def;
1402         struct bpf_map *map;
1403         int err;
1404
1405         map = bpf_object__add_map(obj);
1406         if (IS_ERR(map))
1407                 return PTR_ERR(map);
1408
1409         map->libbpf_type = type;
1410         map->sec_idx = sec_idx;
1411         map->sec_offset = 0;
1412         map->name = internal_map_name(obj, type);
1413         if (!map->name) {
1414                 pr_warn("failed to alloc map name\n");
1415                 return -ENOMEM;
1416         }
1417
1418         def = &map->def;
1419         def->type = BPF_MAP_TYPE_ARRAY;
1420         def->key_size = sizeof(int);
1421         def->value_size = data_sz;
1422         def->max_entries = 1;
1423         def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1424                          ? BPF_F_RDONLY_PROG : 0;
1425         def->map_flags |= BPF_F_MMAPABLE;
1426
1427         pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1428                  map->name, map->sec_idx, map->sec_offset, def->map_flags);
1429
1430         map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1431                            MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1432         if (map->mmaped == MAP_FAILED) {
1433                 err = -errno;
1434                 map->mmaped = NULL;
1435                 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1436                         map->name, err);
1437                 zfree(&map->name);
1438                 return err;
1439         }
1440
1441         if (data)
1442                 memcpy(map->mmaped, data, data_sz);
1443
1444         pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1445         return 0;
1446 }
1447
1448 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1449 {
1450         int err;
1451
1452         /*
1453          * Populate obj->maps with libbpf internal maps.
1454          */
1455         if (obj->efile.data_shndx >= 0) {
1456                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1457                                                     obj->efile.data_shndx,
1458                                                     obj->efile.data->d_buf,
1459                                                     obj->efile.data->d_size);
1460                 if (err)
1461                         return err;
1462         }
1463         if (obj->efile.rodata_shndx >= 0) {
1464                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1465                                                     obj->efile.rodata_shndx,
1466                                                     obj->efile.rodata->d_buf,
1467                                                     obj->efile.rodata->d_size);
1468                 if (err)
1469                         return err;
1470
1471                 obj->rodata_map_idx = obj->nr_maps - 1;
1472         }
1473         if (obj->efile.bss_shndx >= 0) {
1474                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1475                                                     obj->efile.bss_shndx,
1476                                                     NULL,
1477                                                     obj->efile.bss->d_size);
1478                 if (err)
1479                         return err;
1480         }
1481         return 0;
1482 }
1483
1484
1485 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1486                                                const void *name)
1487 {
1488         int i;
1489
1490         for (i = 0; i < obj->nr_extern; i++) {
1491                 if (strcmp(obj->externs[i].name, name) == 0)
1492                         return &obj->externs[i];
1493         }
1494         return NULL;
1495 }
1496
1497 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1498                               char value)
1499 {
1500         switch (ext->kcfg.type) {
1501         case KCFG_BOOL:
1502                 if (value == 'm') {
1503                         pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1504                                 ext->name, value);
1505                         return -EINVAL;
1506                 }
1507                 *(bool *)ext_val = value == 'y' ? true : false;
1508                 break;
1509         case KCFG_TRISTATE:
1510                 if (value == 'y')
1511                         *(enum libbpf_tristate *)ext_val = TRI_YES;
1512                 else if (value == 'm')
1513                         *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1514                 else /* value == 'n' */
1515                         *(enum libbpf_tristate *)ext_val = TRI_NO;
1516                 break;
1517         case KCFG_CHAR:
1518                 *(char *)ext_val = value;
1519                 break;
1520         case KCFG_UNKNOWN:
1521         case KCFG_INT:
1522         case KCFG_CHAR_ARR:
1523         default:
1524                 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1525                         ext->name, value);
1526                 return -EINVAL;
1527         }
1528         ext->is_set = true;
1529         return 0;
1530 }
1531
1532 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1533                               const char *value)
1534 {
1535         size_t len;
1536
1537         if (ext->kcfg.type != KCFG_CHAR_ARR) {
1538                 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1539                 return -EINVAL;
1540         }
1541
1542         len = strlen(value);
1543         if (value[len - 1] != '"') {
1544                 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1545                         ext->name, value);
1546                 return -EINVAL;
1547         }
1548
1549         /* strip quotes */
1550         len -= 2;
1551         if (len >= ext->kcfg.sz) {
1552                 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1553                         ext->name, value, len, ext->kcfg.sz - 1);
1554                 len = ext->kcfg.sz - 1;
1555         }
1556         memcpy(ext_val, value + 1, len);
1557         ext_val[len] = '\0';
1558         ext->is_set = true;
1559         return 0;
1560 }
1561
1562 static int parse_u64(const char *value, __u64 *res)
1563 {
1564         char *value_end;
1565         int err;
1566
1567         errno = 0;
1568         *res = strtoull(value, &value_end, 0);
1569         if (errno) {
1570                 err = -errno;
1571                 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1572                 return err;
1573         }
1574         if (*value_end) {
1575                 pr_warn("failed to parse '%s' as integer completely\n", value);
1576                 return -EINVAL;
1577         }
1578         return 0;
1579 }
1580
1581 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1582 {
1583         int bit_sz = ext->kcfg.sz * 8;
1584
1585         if (ext->kcfg.sz == 8)
1586                 return true;
1587
1588         /* Validate that value stored in u64 fits in integer of `ext->sz`
1589          * bytes size without any loss of information. If the target integer
1590          * is signed, we rely on the following limits of integer type of
1591          * Y bits and subsequent transformation:
1592          *
1593          *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
1594          *            0 <= X + 2^(Y-1) <= 2^Y - 1
1595          *            0 <= X + 2^(Y-1) <  2^Y
1596          *
1597          *  For unsigned target integer, check that all the (64 - Y) bits are
1598          *  zero.
1599          */
1600         if (ext->kcfg.is_signed)
1601                 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1602         else
1603                 return (v >> bit_sz) == 0;
1604 }
1605
1606 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1607                               __u64 value)
1608 {
1609         if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1610                 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1611                         ext->name, (unsigned long long)value);
1612                 return -EINVAL;
1613         }
1614         if (!is_kcfg_value_in_range(ext, value)) {
1615                 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1616                         ext->name, (unsigned long long)value, ext->kcfg.sz);
1617                 return -ERANGE;
1618         }
1619         switch (ext->kcfg.sz) {
1620                 case 1: *(__u8 *)ext_val = value; break;
1621                 case 2: *(__u16 *)ext_val = value; break;
1622                 case 4: *(__u32 *)ext_val = value; break;
1623                 case 8: *(__u64 *)ext_val = value; break;
1624                 default:
1625                         return -EINVAL;
1626         }
1627         ext->is_set = true;
1628         return 0;
1629 }
1630
1631 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1632                                             char *buf, void *data)
1633 {
1634         struct extern_desc *ext;
1635         char *sep, *value;
1636         int len, err = 0;
1637         void *ext_val;
1638         __u64 num;
1639
1640         if (strncmp(buf, "CONFIG_", 7))
1641                 return 0;
1642
1643         sep = strchr(buf, '=');
1644         if (!sep) {
1645                 pr_warn("failed to parse '%s': no separator\n", buf);
1646                 return -EINVAL;
1647         }
1648
1649         /* Trim ending '\n' */
1650         len = strlen(buf);
1651         if (buf[len - 1] == '\n')
1652                 buf[len - 1] = '\0';
1653         /* Split on '=' and ensure that a value is present. */
1654         *sep = '\0';
1655         if (!sep[1]) {
1656                 *sep = '=';
1657                 pr_warn("failed to parse '%s': no value\n", buf);
1658                 return -EINVAL;
1659         }
1660
1661         ext = find_extern_by_name(obj, buf);
1662         if (!ext || ext->is_set)
1663                 return 0;
1664
1665         ext_val = data + ext->kcfg.data_off;
1666         value = sep + 1;
1667
1668         switch (*value) {
1669         case 'y': case 'n': case 'm':
1670                 err = set_kcfg_value_tri(ext, ext_val, *value);
1671                 break;
1672         case '"':
1673                 err = set_kcfg_value_str(ext, ext_val, value);
1674                 break;
1675         default:
1676                 /* assume integer */
1677                 err = parse_u64(value, &num);
1678                 if (err) {
1679                         pr_warn("extern (kcfg) %s=%s should be integer\n",
1680                                 ext->name, value);
1681                         return err;
1682                 }
1683                 err = set_kcfg_value_num(ext, ext_val, num);
1684                 break;
1685         }
1686         if (err)
1687                 return err;
1688         pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1689         return 0;
1690 }
1691
1692 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1693 {
1694         char buf[PATH_MAX];
1695         struct utsname uts;
1696         int len, err = 0;
1697         gzFile file;
1698
1699         uname(&uts);
1700         len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1701         if (len < 0)
1702                 return -EINVAL;
1703         else if (len >= PATH_MAX)
1704                 return -ENAMETOOLONG;
1705
1706         /* gzopen also accepts uncompressed files. */
1707         file = gzopen(buf, "r");
1708         if (!file)
1709                 file = gzopen("/proc/config.gz", "r");
1710
1711         if (!file) {
1712                 pr_warn("failed to open system Kconfig\n");
1713                 return -ENOENT;
1714         }
1715
1716         while (gzgets(file, buf, sizeof(buf))) {
1717                 err = bpf_object__process_kconfig_line(obj, buf, data);
1718                 if (err) {
1719                         pr_warn("error parsing system Kconfig line '%s': %d\n",
1720                                 buf, err);
1721                         goto out;
1722                 }
1723         }
1724
1725 out:
1726         gzclose(file);
1727         return err;
1728 }
1729
1730 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1731                                         const char *config, void *data)
1732 {
1733         char buf[PATH_MAX];
1734         int err = 0;
1735         FILE *file;
1736
1737         file = fmemopen((void *)config, strlen(config), "r");
1738         if (!file) {
1739                 err = -errno;
1740                 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1741                 return err;
1742         }
1743
1744         while (fgets(buf, sizeof(buf), file)) {
1745                 err = bpf_object__process_kconfig_line(obj, buf, data);
1746                 if (err) {
1747                         pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1748                                 buf, err);
1749                         break;
1750                 }
1751         }
1752
1753         fclose(file);
1754         return err;
1755 }
1756
1757 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1758 {
1759         struct extern_desc *last_ext = NULL, *ext;
1760         size_t map_sz;
1761         int i, err;
1762
1763         for (i = 0; i < obj->nr_extern; i++) {
1764                 ext = &obj->externs[i];
1765                 if (ext->type == EXT_KCFG)
1766                         last_ext = ext;
1767         }
1768
1769         if (!last_ext)
1770                 return 0;
1771
1772         map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1773         err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1774                                             obj->efile.symbols_shndx,
1775                                             NULL, map_sz);
1776         if (err)
1777                 return err;
1778
1779         obj->kconfig_map_idx = obj->nr_maps - 1;
1780
1781         return 0;
1782 }
1783
1784 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1785 {
1786         Elf_Data *symbols = obj->efile.symbols;
1787         int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1788         Elf_Data *data = NULL;
1789         Elf_Scn *scn;
1790
1791         if (obj->efile.maps_shndx < 0)
1792                 return 0;
1793
1794         if (!symbols)
1795                 return -EINVAL;
1796
1797
1798         scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1799         data = elf_sec_data(obj, scn);
1800         if (!scn || !data) {
1801                 pr_warn("elf: failed to get legacy map definitions for %s\n",
1802                         obj->path);
1803                 return -EINVAL;
1804         }
1805
1806         /*
1807          * Count number of maps. Each map has a name.
1808          * Array of maps is not supported: only the first element is
1809          * considered.
1810          *
1811          * TODO: Detect array of map and report error.
1812          */
1813         nr_syms = symbols->d_size / sizeof(GElf_Sym);
1814         for (i = 0; i < nr_syms; i++) {
1815                 GElf_Sym sym;
1816
1817                 if (!gelf_getsym(symbols, i, &sym))
1818                         continue;
1819                 if (sym.st_shndx != obj->efile.maps_shndx)
1820                         continue;
1821                 nr_maps++;
1822         }
1823         /* Assume equally sized map definitions */
1824         pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1825                  nr_maps, data->d_size, obj->path);
1826
1827         if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1828                 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1829                         obj->path);
1830                 return -EINVAL;
1831         }
1832         map_def_sz = data->d_size / nr_maps;
1833
1834         /* Fill obj->maps using data in "maps" section.  */
1835         for (i = 0; i < nr_syms; i++) {
1836                 GElf_Sym sym;
1837                 const char *map_name;
1838                 struct bpf_map_def *def;
1839                 struct bpf_map *map;
1840
1841                 if (!gelf_getsym(symbols, i, &sym))
1842                         continue;
1843                 if (sym.st_shndx != obj->efile.maps_shndx)
1844                         continue;
1845
1846                 map = bpf_object__add_map(obj);
1847                 if (IS_ERR(map))
1848                         return PTR_ERR(map);
1849
1850                 map_name = elf_sym_str(obj, sym.st_name);
1851                 if (!map_name) {
1852                         pr_warn("failed to get map #%d name sym string for obj %s\n",
1853                                 i, obj->path);
1854                         return -LIBBPF_ERRNO__FORMAT;
1855                 }
1856
1857                 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1858                 map->sec_idx = sym.st_shndx;
1859                 map->sec_offset = sym.st_value;
1860                 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1861                          map_name, map->sec_idx, map->sec_offset);
1862                 if (sym.st_value + map_def_sz > data->d_size) {
1863                         pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1864                                 obj->path, map_name);
1865                         return -EINVAL;
1866                 }
1867
1868                 map->name = strdup(map_name);
1869                 if (!map->name) {
1870                         pr_warn("failed to alloc map name\n");
1871                         return -ENOMEM;
1872                 }
1873                 pr_debug("map %d is \"%s\"\n", i, map->name);
1874                 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1875                 /*
1876                  * If the definition of the map in the object file fits in
1877                  * bpf_map_def, copy it.  Any extra fields in our version
1878                  * of bpf_map_def will default to zero as a result of the
1879                  * calloc above.
1880                  */
1881                 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1882                         memcpy(&map->def, def, map_def_sz);
1883                 } else {
1884                         /*
1885                          * Here the map structure being read is bigger than what
1886                          * we expect, truncate if the excess bits are all zero.
1887                          * If they are not zero, reject this map as
1888                          * incompatible.
1889                          */
1890                         char *b;
1891
1892                         for (b = ((char *)def) + sizeof(struct bpf_map_def);
1893                              b < ((char *)def) + map_def_sz; b++) {
1894                                 if (*b != 0) {
1895                                         pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1896                                                 obj->path, map_name);
1897                                         if (strict)
1898                                                 return -EINVAL;
1899                                 }
1900                         }
1901                         memcpy(&map->def, def, sizeof(struct bpf_map_def));
1902                 }
1903         }
1904         return 0;
1905 }
1906
1907 const struct btf_type *
1908 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1909 {
1910         const struct btf_type *t = btf__type_by_id(btf, id);
1911
1912         if (res_id)
1913                 *res_id = id;
1914
1915         while (btf_is_mod(t) || btf_is_typedef(t)) {
1916                 if (res_id)
1917                         *res_id = t->type;
1918                 t = btf__type_by_id(btf, t->type);
1919         }
1920
1921         return t;
1922 }
1923
1924 static const struct btf_type *
1925 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1926 {
1927         const struct btf_type *t;
1928
1929         t = skip_mods_and_typedefs(btf, id, NULL);
1930         if (!btf_is_ptr(t))
1931                 return NULL;
1932
1933         t = skip_mods_and_typedefs(btf, t->type, res_id);
1934
1935         return btf_is_func_proto(t) ? t : NULL;
1936 }
1937
1938 static const char *__btf_kind_str(__u16 kind)
1939 {
1940         switch (kind) {
1941         case BTF_KIND_UNKN: return "void";
1942         case BTF_KIND_INT: return "int";
1943         case BTF_KIND_PTR: return "ptr";
1944         case BTF_KIND_ARRAY: return "array";
1945         case BTF_KIND_STRUCT: return "struct";
1946         case BTF_KIND_UNION: return "union";
1947         case BTF_KIND_ENUM: return "enum";
1948         case BTF_KIND_FWD: return "fwd";
1949         case BTF_KIND_TYPEDEF: return "typedef";
1950         case BTF_KIND_VOLATILE: return "volatile";
1951         case BTF_KIND_CONST: return "const";
1952         case BTF_KIND_RESTRICT: return "restrict";
1953         case BTF_KIND_FUNC: return "func";
1954         case BTF_KIND_FUNC_PROTO: return "func_proto";
1955         case BTF_KIND_VAR: return "var";
1956         case BTF_KIND_DATASEC: return "datasec";
1957         case BTF_KIND_FLOAT: return "float";
1958         default: return "unknown";
1959         }
1960 }
1961
1962 const char *btf_kind_str(const struct btf_type *t)
1963 {
1964         return __btf_kind_str(btf_kind(t));
1965 }
1966
1967 /*
1968  * Fetch integer attribute of BTF map definition. Such attributes are
1969  * represented using a pointer to an array, in which dimensionality of array
1970  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1971  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1972  * type definition, while using only sizeof(void *) space in ELF data section.
1973  */
1974 static bool get_map_field_int(const char *map_name, const struct btf *btf,
1975                               const struct btf_member *m, __u32 *res)
1976 {
1977         const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1978         const char *name = btf__name_by_offset(btf, m->name_off);
1979         const struct btf_array *arr_info;
1980         const struct btf_type *arr_t;
1981
1982         if (!btf_is_ptr(t)) {
1983                 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
1984                         map_name, name, btf_kind_str(t));
1985                 return false;
1986         }
1987
1988         arr_t = btf__type_by_id(btf, t->type);
1989         if (!arr_t) {
1990                 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1991                         map_name, name, t->type);
1992                 return false;
1993         }
1994         if (!btf_is_array(arr_t)) {
1995                 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
1996                         map_name, name, btf_kind_str(arr_t));
1997                 return false;
1998         }
1999         arr_info = btf_array(arr_t);
2000         *res = arr_info->nelems;
2001         return true;
2002 }
2003
2004 static int build_map_pin_path(struct bpf_map *map, const char *path)
2005 {
2006         char buf[PATH_MAX];
2007         int len;
2008
2009         if (!path)
2010                 path = "/sys/fs/bpf";
2011
2012         len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2013         if (len < 0)
2014                 return -EINVAL;
2015         else if (len >= PATH_MAX)
2016                 return -ENAMETOOLONG;
2017
2018         return bpf_map__set_pin_path(map, buf);
2019 }
2020
2021 int parse_btf_map_def(const char *map_name, struct btf *btf,
2022                       const struct btf_type *def_t, bool strict,
2023                       struct btf_map_def *map_def, struct btf_map_def *inner_def)
2024 {
2025         const struct btf_type *t;
2026         const struct btf_member *m;
2027         bool is_inner = inner_def == NULL;
2028         int vlen, i;
2029
2030         vlen = btf_vlen(def_t);
2031         m = btf_members(def_t);
2032         for (i = 0; i < vlen; i++, m++) {
2033                 const char *name = btf__name_by_offset(btf, m->name_off);
2034
2035                 if (!name) {
2036                         pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2037                         return -EINVAL;
2038                 }
2039                 if (strcmp(name, "type") == 0) {
2040                         if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2041                                 return -EINVAL;
2042                         map_def->parts |= MAP_DEF_MAP_TYPE;
2043                 } else if (strcmp(name, "max_entries") == 0) {
2044                         if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2045                                 return -EINVAL;
2046                         map_def->parts |= MAP_DEF_MAX_ENTRIES;
2047                 } else if (strcmp(name, "map_flags") == 0) {
2048                         if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2049                                 return -EINVAL;
2050                         map_def->parts |= MAP_DEF_MAP_FLAGS;
2051                 } else if (strcmp(name, "numa_node") == 0) {
2052                         if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2053                                 return -EINVAL;
2054                         map_def->parts |= MAP_DEF_NUMA_NODE;
2055                 } else if (strcmp(name, "key_size") == 0) {
2056                         __u32 sz;
2057
2058                         if (!get_map_field_int(map_name, btf, m, &sz))
2059                                 return -EINVAL;
2060                         if (map_def->key_size && map_def->key_size != sz) {
2061                                 pr_warn("map '%s': conflicting key size %u != %u.\n",
2062                                         map_name, map_def->key_size, sz);
2063                                 return -EINVAL;
2064                         }
2065                         map_def->key_size = sz;
2066                         map_def->parts |= MAP_DEF_KEY_SIZE;
2067                 } else if (strcmp(name, "key") == 0) {
2068                         __s64 sz;
2069
2070                         t = btf__type_by_id(btf, m->type);
2071                         if (!t) {
2072                                 pr_warn("map '%s': key type [%d] not found.\n",
2073                                         map_name, m->type);
2074                                 return -EINVAL;
2075                         }
2076                         if (!btf_is_ptr(t)) {
2077                                 pr_warn("map '%s': key spec is not PTR: %s.\n",
2078                                         map_name, btf_kind_str(t));
2079                                 return -EINVAL;
2080                         }
2081                         sz = btf__resolve_size(btf, t->type);
2082                         if (sz < 0) {
2083                                 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2084                                         map_name, t->type, (ssize_t)sz);
2085                                 return sz;
2086                         }
2087                         if (map_def->key_size && map_def->key_size != sz) {
2088                                 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2089                                         map_name, map_def->key_size, (ssize_t)sz);
2090                                 return -EINVAL;
2091                         }
2092                         map_def->key_size = sz;
2093                         map_def->key_type_id = t->type;
2094                         map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2095                 } else if (strcmp(name, "value_size") == 0) {
2096                         __u32 sz;
2097
2098                         if (!get_map_field_int(map_name, btf, m, &sz))
2099                                 return -EINVAL;
2100                         if (map_def->value_size && map_def->value_size != sz) {
2101                                 pr_warn("map '%s': conflicting value size %u != %u.\n",
2102                                         map_name, map_def->value_size, sz);
2103                                 return -EINVAL;
2104                         }
2105                         map_def->value_size = sz;
2106                         map_def->parts |= MAP_DEF_VALUE_SIZE;
2107                 } else if (strcmp(name, "value") == 0) {
2108                         __s64 sz;
2109
2110                         t = btf__type_by_id(btf, m->type);
2111                         if (!t) {
2112                                 pr_warn("map '%s': value type [%d] not found.\n",
2113                                         map_name, m->type);
2114                                 return -EINVAL;
2115                         }
2116                         if (!btf_is_ptr(t)) {
2117                                 pr_warn("map '%s': value spec is not PTR: %s.\n",
2118                                         map_name, btf_kind_str(t));
2119                                 return -EINVAL;
2120                         }
2121                         sz = btf__resolve_size(btf, t->type);
2122                         if (sz < 0) {
2123                                 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2124                                         map_name, t->type, (ssize_t)sz);
2125                                 return sz;
2126                         }
2127                         if (map_def->value_size && map_def->value_size != sz) {
2128                                 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2129                                         map_name, map_def->value_size, (ssize_t)sz);
2130                                 return -EINVAL;
2131                         }
2132                         map_def->value_size = sz;
2133                         map_def->value_type_id = t->type;
2134                         map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2135                 }
2136                 else if (strcmp(name, "values") == 0) {
2137                         char inner_map_name[128];
2138                         int err;
2139
2140                         if (is_inner) {
2141                                 pr_warn("map '%s': multi-level inner maps not supported.\n",
2142                                         map_name);
2143                                 return -ENOTSUP;
2144                         }
2145                         if (i != vlen - 1) {
2146                                 pr_warn("map '%s': '%s' member should be last.\n",
2147                                         map_name, name);
2148                                 return -EINVAL;
2149                         }
2150                         if (!bpf_map_type__is_map_in_map(map_def->map_type)) {
2151                                 pr_warn("map '%s': should be map-in-map.\n",
2152                                         map_name);
2153                                 return -ENOTSUP;
2154                         }
2155                         if (map_def->value_size && map_def->value_size != 4) {
2156                                 pr_warn("map '%s': conflicting value size %u != 4.\n",
2157                                         map_name, map_def->value_size);
2158                                 return -EINVAL;
2159                         }
2160                         map_def->value_size = 4;
2161                         t = btf__type_by_id(btf, m->type);
2162                         if (!t) {
2163                                 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2164                                         map_name, m->type);
2165                                 return -EINVAL;
2166                         }
2167                         if (!btf_is_array(t) || btf_array(t)->nelems) {
2168                                 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2169                                         map_name);
2170                                 return -EINVAL;
2171                         }
2172                         t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2173                         if (!btf_is_ptr(t)) {
2174                                 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2175                                         map_name, btf_kind_str(t));
2176                                 return -EINVAL;
2177                         }
2178                         t = skip_mods_and_typedefs(btf, t->type, NULL);
2179                         if (!btf_is_struct(t)) {
2180                                 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2181                                         map_name, btf_kind_str(t));
2182                                 return -EINVAL;
2183                         }
2184
2185                         snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2186                         err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2187                         if (err)
2188                                 return err;
2189
2190                         map_def->parts |= MAP_DEF_INNER_MAP;
2191                 } else if (strcmp(name, "pinning") == 0) {
2192                         __u32 val;
2193
2194                         if (is_inner) {
2195                                 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2196                                 return -EINVAL;
2197                         }
2198                         if (!get_map_field_int(map_name, btf, m, &val))
2199                                 return -EINVAL;
2200                         if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2201                                 pr_warn("map '%s': invalid pinning value %u.\n",
2202                                         map_name, val);
2203                                 return -EINVAL;
2204                         }
2205                         map_def->pinning = val;
2206                         map_def->parts |= MAP_DEF_PINNING;
2207                 } else {
2208                         if (strict) {
2209                                 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2210                                 return -ENOTSUP;
2211                         }
2212                         pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2213                 }
2214         }
2215
2216         if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2217                 pr_warn("map '%s': map type isn't specified.\n", map_name);
2218                 return -EINVAL;
2219         }
2220
2221         return 0;
2222 }
2223
2224 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2225 {
2226         map->def.type = def->map_type;
2227         map->def.key_size = def->key_size;
2228         map->def.value_size = def->value_size;
2229         map->def.max_entries = def->max_entries;
2230         map->def.map_flags = def->map_flags;
2231
2232         map->numa_node = def->numa_node;
2233         map->btf_key_type_id = def->key_type_id;
2234         map->btf_value_type_id = def->value_type_id;
2235
2236         if (def->parts & MAP_DEF_MAP_TYPE)
2237                 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2238
2239         if (def->parts & MAP_DEF_KEY_TYPE)
2240                 pr_debug("map '%s': found key [%u], sz = %u.\n",
2241                          map->name, def->key_type_id, def->key_size);
2242         else if (def->parts & MAP_DEF_KEY_SIZE)
2243                 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2244
2245         if (def->parts & MAP_DEF_VALUE_TYPE)
2246                 pr_debug("map '%s': found value [%u], sz = %u.\n",
2247                          map->name, def->value_type_id, def->value_size);
2248         else if (def->parts & MAP_DEF_VALUE_SIZE)
2249                 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2250
2251         if (def->parts & MAP_DEF_MAX_ENTRIES)
2252                 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2253         if (def->parts & MAP_DEF_MAP_FLAGS)
2254                 pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
2255         if (def->parts & MAP_DEF_PINNING)
2256                 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2257         if (def->parts & MAP_DEF_NUMA_NODE)
2258                 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2259
2260         if (def->parts & MAP_DEF_INNER_MAP)
2261                 pr_debug("map '%s': found inner map definition.\n", map->name);
2262 }
2263
2264 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2265                                          const struct btf_type *sec,
2266                                          int var_idx, int sec_idx,
2267                                          const Elf_Data *data, bool strict,
2268                                          const char *pin_root_path)
2269 {
2270         struct btf_map_def map_def = {}, inner_def = {};
2271         const struct btf_type *var, *def;
2272         const struct btf_var_secinfo *vi;
2273         const struct btf_var *var_extra;
2274         const char *map_name;
2275         struct bpf_map *map;
2276         int err;
2277
2278         vi = btf_var_secinfos(sec) + var_idx;
2279         var = btf__type_by_id(obj->btf, vi->type);
2280         var_extra = btf_var(var);
2281         map_name = btf__name_by_offset(obj->btf, var->name_off);
2282
2283         if (map_name == NULL || map_name[0] == '\0') {
2284                 pr_warn("map #%d: empty name.\n", var_idx);
2285                 return -EINVAL;
2286         }
2287         if ((__u64)vi->offset + vi->size > data->d_size) {
2288                 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2289                 return -EINVAL;
2290         }
2291         if (!btf_is_var(var)) {
2292                 pr_warn("map '%s': unexpected var kind %s.\n",
2293                         map_name, btf_kind_str(var));
2294                 return -EINVAL;
2295         }
2296         if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2297             var_extra->linkage != BTF_VAR_STATIC) {
2298                 pr_warn("map '%s': unsupported var linkage %u.\n",
2299                         map_name, var_extra->linkage);
2300                 return -EOPNOTSUPP;
2301         }
2302
2303         def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2304         if (!btf_is_struct(def)) {
2305                 pr_warn("map '%s': unexpected def kind %s.\n",
2306                         map_name, btf_kind_str(var));
2307                 return -EINVAL;
2308         }
2309         if (def->size > vi->size) {
2310                 pr_warn("map '%s': invalid def size.\n", map_name);
2311                 return -EINVAL;
2312         }
2313
2314         map = bpf_object__add_map(obj);
2315         if (IS_ERR(map))
2316                 return PTR_ERR(map);
2317         map->name = strdup(map_name);
2318         if (!map->name) {
2319                 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2320                 return -ENOMEM;
2321         }
2322         map->libbpf_type = LIBBPF_MAP_UNSPEC;
2323         map->def.type = BPF_MAP_TYPE_UNSPEC;
2324         map->sec_idx = sec_idx;
2325         map->sec_offset = vi->offset;
2326         map->btf_var_idx = var_idx;
2327         pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2328                  map_name, map->sec_idx, map->sec_offset);
2329
2330         err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2331         if (err)
2332                 return err;
2333
2334         fill_map_from_def(map, &map_def);
2335
2336         if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2337                 err = build_map_pin_path(map, pin_root_path);
2338                 if (err) {
2339                         pr_warn("map '%s': couldn't build pin path.\n", map->name);
2340                         return err;
2341                 }
2342         }
2343
2344         if (map_def.parts & MAP_DEF_INNER_MAP) {
2345                 map->inner_map = calloc(1, sizeof(*map->inner_map));
2346                 if (!map->inner_map)
2347                         return -ENOMEM;
2348                 map->inner_map->fd = -1;
2349                 map->inner_map->sec_idx = sec_idx;
2350                 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2351                 if (!map->inner_map->name)
2352                         return -ENOMEM;
2353                 sprintf(map->inner_map->name, "%s.inner", map_name);
2354
2355                 fill_map_from_def(map->inner_map, &inner_def);
2356         }
2357
2358         return 0;
2359 }
2360
2361 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2362                                           const char *pin_root_path)
2363 {
2364         const struct btf_type *sec = NULL;
2365         int nr_types, i, vlen, err;
2366         const struct btf_type *t;
2367         const char *name;
2368         Elf_Data *data;
2369         Elf_Scn *scn;
2370
2371         if (obj->efile.btf_maps_shndx < 0)
2372                 return 0;
2373
2374         scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2375         data = elf_sec_data(obj, scn);
2376         if (!scn || !data) {
2377                 pr_warn("elf: failed to get %s map definitions for %s\n",
2378                         MAPS_ELF_SEC, obj->path);
2379                 return -EINVAL;
2380         }
2381
2382         nr_types = btf__get_nr_types(obj->btf);
2383         for (i = 1; i <= nr_types; i++) {
2384                 t = btf__type_by_id(obj->btf, i);
2385                 if (!btf_is_datasec(t))
2386                         continue;
2387                 name = btf__name_by_offset(obj->btf, t->name_off);
2388                 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2389                         sec = t;
2390                         obj->efile.btf_maps_sec_btf_id = i;
2391                         break;
2392                 }
2393         }
2394
2395         if (!sec) {
2396                 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2397                 return -ENOENT;
2398         }
2399
2400         vlen = btf_vlen(sec);
2401         for (i = 0; i < vlen; i++) {
2402                 err = bpf_object__init_user_btf_map(obj, sec, i,
2403                                                     obj->efile.btf_maps_shndx,
2404                                                     data, strict,
2405                                                     pin_root_path);
2406                 if (err)
2407                         return err;
2408         }
2409
2410         return 0;
2411 }
2412
2413 static int bpf_object__init_maps(struct bpf_object *obj,
2414                                  const struct bpf_object_open_opts *opts)
2415 {
2416         const char *pin_root_path;
2417         bool strict;
2418         int err;
2419
2420         strict = !OPTS_GET(opts, relaxed_maps, false);
2421         pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2422
2423         err = bpf_object__init_user_maps(obj, strict);
2424         err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2425         err = err ?: bpf_object__init_global_data_maps(obj);
2426         err = err ?: bpf_object__init_kconfig_map(obj);
2427         err = err ?: bpf_object__init_struct_ops_maps(obj);
2428         if (err)
2429                 return err;
2430
2431         return 0;
2432 }
2433
2434 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2435 {
2436         GElf_Shdr sh;
2437
2438         if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2439                 return false;
2440
2441         return sh.sh_flags & SHF_EXECINSTR;
2442 }
2443
2444 static bool btf_needs_sanitization(struct bpf_object *obj)
2445 {
2446         bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2447         bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2448         bool has_float = kernel_supports(FEAT_BTF_FLOAT);
2449         bool has_func = kernel_supports(FEAT_BTF_FUNC);
2450
2451         return !has_func || !has_datasec || !has_func_global || !has_float;
2452 }
2453
2454 static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2455 {
2456         bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2457         bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2458         bool has_float = kernel_supports(FEAT_BTF_FLOAT);
2459         bool has_func = kernel_supports(FEAT_BTF_FUNC);
2460         struct btf_type *t;
2461         int i, j, vlen;
2462
2463         for (i = 1; i <= btf__get_nr_types(btf); i++) {
2464                 t = (struct btf_type *)btf__type_by_id(btf, i);
2465
2466                 if (!has_datasec && btf_is_var(t)) {
2467                         /* replace VAR with INT */
2468                         t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2469                         /*
2470                          * using size = 1 is the safest choice, 4 will be too
2471                          * big and cause kernel BTF validation failure if
2472                          * original variable took less than 4 bytes
2473                          */
2474                         t->size = 1;
2475                         *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2476                 } else if (!has_datasec && btf_is_datasec(t)) {
2477                         /* replace DATASEC with STRUCT */
2478                         const struct btf_var_secinfo *v = btf_var_secinfos(t);
2479                         struct btf_member *m = btf_members(t);
2480                         struct btf_type *vt;
2481                         char *name;
2482
2483                         name = (char *)btf__name_by_offset(btf, t->name_off);
2484                         while (*name) {
2485                                 if (*name == '.')
2486                                         *name = '_';
2487                                 name++;
2488                         }
2489
2490                         vlen = btf_vlen(t);
2491                         t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2492                         for (j = 0; j < vlen; j++, v++, m++) {
2493                                 /* order of field assignments is important */
2494                                 m->offset = v->offset * 8;
2495                                 m->type = v->type;
2496                                 /* preserve variable name as member name */
2497                                 vt = (void *)btf__type_by_id(btf, v->type);
2498                                 m->name_off = vt->name_off;
2499                         }
2500                 } else if (!has_func && btf_is_func_proto(t)) {
2501                         /* replace FUNC_PROTO with ENUM */
2502                         vlen = btf_vlen(t);
2503                         t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2504                         t->size = sizeof(__u32); /* kernel enforced */
2505                 } else if (!has_func && btf_is_func(t)) {
2506                         /* replace FUNC with TYPEDEF */
2507                         t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2508                 } else if (!has_func_global && btf_is_func(t)) {
2509                         /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2510                         t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2511                 } else if (!has_float && btf_is_float(t)) {
2512                         /* replace FLOAT with an equally-sized empty STRUCT;
2513                          * since C compilers do not accept e.g. "float" as a
2514                          * valid struct name, make it anonymous
2515                          */
2516                         t->name_off = 0;
2517                         t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2518                 }
2519         }
2520 }
2521
2522 static bool libbpf_needs_btf(const struct bpf_object *obj)
2523 {
2524         return obj->efile.btf_maps_shndx >= 0 ||
2525                obj->efile.st_ops_shndx >= 0 ||
2526                obj->nr_extern > 0;
2527 }
2528
2529 static bool kernel_needs_btf(const struct bpf_object *obj)
2530 {
2531         return obj->efile.st_ops_shndx >= 0;
2532 }
2533
2534 static int bpf_object__init_btf(struct bpf_object *obj,
2535                                 Elf_Data *btf_data,
2536                                 Elf_Data *btf_ext_data)
2537 {
2538         int err = -ENOENT;
2539
2540         if (btf_data) {
2541                 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2542                 if (IS_ERR(obj->btf)) {
2543                         err = PTR_ERR(obj->btf);
2544                         obj->btf = NULL;
2545                         pr_warn("Error loading ELF section %s: %d.\n",
2546                                 BTF_ELF_SEC, err);
2547                         goto out;
2548                 }
2549                 /* enforce 8-byte pointers for BPF-targeted BTFs */
2550                 btf__set_pointer_size(obj->btf, 8);
2551                 err = 0;
2552         }
2553         if (btf_ext_data) {
2554                 if (!obj->btf) {
2555                         pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2556                                  BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2557                         goto out;
2558                 }
2559                 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2560                                             btf_ext_data->d_size);
2561                 if (IS_ERR(obj->btf_ext)) {
2562                         pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2563                                 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2564                         obj->btf_ext = NULL;
2565                         goto out;
2566                 }
2567         }
2568 out:
2569         if (err && libbpf_needs_btf(obj)) {
2570                 pr_warn("BTF is required, but is missing or corrupted.\n");
2571                 return err;
2572         }
2573         return 0;
2574 }
2575
2576 static int bpf_object__finalize_btf(struct bpf_object *obj)
2577 {
2578         int err;
2579
2580         if (!obj->btf)
2581                 return 0;
2582
2583         err = btf__finalize_data(obj, obj->btf);
2584         if (err) {
2585                 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2586                 return err;
2587         }
2588
2589         return 0;
2590 }
2591
2592 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2593 {
2594         if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2595             prog->type == BPF_PROG_TYPE_LSM)
2596                 return true;
2597
2598         /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2599          * also need vmlinux BTF
2600          */
2601         if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2602                 return true;
2603
2604         return false;
2605 }
2606
2607 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2608 {
2609         struct bpf_program *prog;
2610         int i;
2611
2612         /* CO-RE relocations need kernel BTF */
2613         if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2614                 return true;
2615
2616         /* Support for typed ksyms needs kernel BTF */
2617         for (i = 0; i < obj->nr_extern; i++) {
2618                 const struct extern_desc *ext;
2619
2620                 ext = &obj->externs[i];
2621                 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2622                         return true;
2623         }
2624
2625         bpf_object__for_each_program(prog, obj) {
2626                 if (!prog->load)
2627                         continue;
2628                 if (prog_needs_vmlinux_btf(prog))
2629                         return true;
2630         }
2631
2632         return false;
2633 }
2634
2635 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2636 {
2637         int err;
2638
2639         /* btf_vmlinux could be loaded earlier */
2640         if (obj->btf_vmlinux)
2641                 return 0;
2642
2643         if (!force && !obj_needs_vmlinux_btf(obj))
2644                 return 0;
2645
2646         obj->btf_vmlinux = libbpf_find_kernel_btf();
2647         if (IS_ERR(obj->btf_vmlinux)) {
2648                 err = PTR_ERR(obj->btf_vmlinux);
2649                 pr_warn("Error loading vmlinux BTF: %d\n", err);
2650                 obj->btf_vmlinux = NULL;
2651                 return err;
2652         }
2653         return 0;
2654 }
2655
2656 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2657 {
2658         struct btf *kern_btf = obj->btf;
2659         bool btf_mandatory, sanitize;
2660         int i, err = 0;
2661
2662         if (!obj->btf)
2663                 return 0;
2664
2665         if (!kernel_supports(FEAT_BTF)) {
2666                 if (kernel_needs_btf(obj)) {
2667                         err = -EOPNOTSUPP;
2668                         goto report;
2669                 }
2670                 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2671                 return 0;
2672         }
2673
2674         /* Even though some subprogs are global/weak, user might prefer more
2675          * permissive BPF verification process that BPF verifier performs for
2676          * static functions, taking into account more context from the caller
2677          * functions. In such case, they need to mark such subprogs with
2678          * __attribute__((visibility("hidden"))) and libbpf will adjust
2679          * corresponding FUNC BTF type to be marked as static and trigger more
2680          * involved BPF verification process.
2681          */
2682         for (i = 0; i < obj->nr_programs; i++) {
2683                 struct bpf_program *prog = &obj->programs[i];
2684                 struct btf_type *t;
2685                 const char *name;
2686                 int j, n;
2687
2688                 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
2689                         continue;
2690
2691                 n = btf__get_nr_types(obj->btf);
2692                 for (j = 1; j <= n; j++) {
2693                         t = btf_type_by_id(obj->btf, j);
2694                         if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
2695                                 continue;
2696
2697                         name = btf__str_by_offset(obj->btf, t->name_off);
2698                         if (strcmp(name, prog->name) != 0)
2699                                 continue;
2700
2701                         t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
2702                         break;
2703                 }
2704         }
2705
2706         sanitize = btf_needs_sanitization(obj);
2707         if (sanitize) {
2708                 const void *raw_data;
2709                 __u32 sz;
2710
2711                 /* clone BTF to sanitize a copy and leave the original intact */
2712                 raw_data = btf__get_raw_data(obj->btf, &sz);
2713                 kern_btf = btf__new(raw_data, sz);
2714                 if (IS_ERR(kern_btf))
2715                         return PTR_ERR(kern_btf);
2716
2717                 /* enforce 8-byte pointers for BPF-targeted BTFs */
2718                 btf__set_pointer_size(obj->btf, 8);
2719                 bpf_object__sanitize_btf(obj, kern_btf);
2720         }
2721
2722         err = btf__load(kern_btf);
2723         if (sanitize) {
2724                 if (!err) {
2725                         /* move fd to libbpf's BTF */
2726                         btf__set_fd(obj->btf, btf__fd(kern_btf));
2727                         btf__set_fd(kern_btf, -1);
2728                 }
2729                 btf__free(kern_btf);
2730         }
2731 report:
2732         if (err) {
2733                 btf_mandatory = kernel_needs_btf(obj);
2734                 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2735                         btf_mandatory ? "BTF is mandatory, can't proceed."
2736                                       : "BTF is optional, ignoring.");
2737                 if (!btf_mandatory)
2738                         err = 0;
2739         }
2740         return err;
2741 }
2742
2743 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2744 {
2745         const char *name;
2746
2747         name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2748         if (!name) {
2749                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2750                         off, obj->path, elf_errmsg(-1));
2751                 return NULL;
2752         }
2753
2754         return name;
2755 }
2756
2757 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2758 {
2759         const char *name;
2760
2761         name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2762         if (!name) {
2763                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2764                         off, obj->path, elf_errmsg(-1));
2765                 return NULL;
2766         }
2767
2768         return name;
2769 }
2770
2771 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2772 {
2773         Elf_Scn *scn;
2774
2775         scn = elf_getscn(obj->efile.elf, idx);
2776         if (!scn) {
2777                 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2778                         idx, obj->path, elf_errmsg(-1));
2779                 return NULL;
2780         }
2781         return scn;
2782 }
2783
2784 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2785 {
2786         Elf_Scn *scn = NULL;
2787         Elf *elf = obj->efile.elf;
2788         const char *sec_name;
2789
2790         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2791                 sec_name = elf_sec_name(obj, scn);
2792                 if (!sec_name)
2793                         return NULL;
2794
2795                 if (strcmp(sec_name, name) != 0)
2796                         continue;
2797
2798                 return scn;
2799         }
2800         return NULL;
2801 }
2802
2803 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2804 {
2805         if (!scn)
2806                 return -EINVAL;
2807
2808         if (gelf_getshdr(scn, hdr) != hdr) {
2809                 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2810                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2811                 return -EINVAL;
2812         }
2813
2814         return 0;
2815 }
2816
2817 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2818 {
2819         const char *name;
2820         GElf_Shdr sh;
2821
2822         if (!scn)
2823                 return NULL;
2824
2825         if (elf_sec_hdr(obj, scn, &sh))
2826                 return NULL;
2827
2828         name = elf_sec_str(obj, sh.sh_name);
2829         if (!name) {
2830                 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2831                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2832                 return NULL;
2833         }
2834
2835         return name;
2836 }
2837
2838 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2839 {
2840         Elf_Data *data;
2841
2842         if (!scn)
2843                 return NULL;
2844
2845         data = elf_getdata(scn, 0);
2846         if (!data) {
2847                 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2848                         elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2849                         obj->path, elf_errmsg(-1));
2850                 return NULL;
2851         }
2852
2853         return data;
2854 }
2855
2856 static bool is_sec_name_dwarf(const char *name)
2857 {
2858         /* approximation, but the actual list is too long */
2859         return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2860 }
2861
2862 static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2863 {
2864         /* no special handling of .strtab */
2865         if (hdr->sh_type == SHT_STRTAB)
2866                 return true;
2867
2868         /* ignore .llvm_addrsig section as well */
2869         if (hdr->sh_type == SHT_LLVM_ADDRSIG)
2870                 return true;
2871
2872         /* no subprograms will lead to an empty .text section, ignore it */
2873         if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2874             strcmp(name, ".text") == 0)
2875                 return true;
2876
2877         /* DWARF sections */
2878         if (is_sec_name_dwarf(name))
2879                 return true;
2880
2881         if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2882                 name += sizeof(".rel") - 1;
2883                 /* DWARF section relocations */
2884                 if (is_sec_name_dwarf(name))
2885                         return true;
2886
2887                 /* .BTF and .BTF.ext don't need relocations */
2888                 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2889                     strcmp(name, BTF_EXT_ELF_SEC) == 0)
2890                         return true;
2891         }
2892
2893         return false;
2894 }
2895
2896 static int cmp_progs(const void *_a, const void *_b)
2897 {
2898         const struct bpf_program *a = _a;
2899         const struct bpf_program *b = _b;
2900
2901         if (a->sec_idx != b->sec_idx)
2902                 return a->sec_idx < b->sec_idx ? -1 : 1;
2903
2904         /* sec_insn_off can't be the same within the section */
2905         return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2906 }
2907
2908 static int bpf_object__elf_collect(struct bpf_object *obj)
2909 {
2910         Elf *elf = obj->efile.elf;
2911         Elf_Data *btf_ext_data = NULL;
2912         Elf_Data *btf_data = NULL;
2913         int idx = 0, err = 0;
2914         const char *name;
2915         Elf_Data *data;
2916         Elf_Scn *scn;
2917         GElf_Shdr sh;
2918
2919         /* a bunch of ELF parsing functionality depends on processing symbols,
2920          * so do the first pass and find the symbol table
2921          */
2922         scn = NULL;
2923         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2924                 if (elf_sec_hdr(obj, scn, &sh))
2925                         return -LIBBPF_ERRNO__FORMAT;
2926
2927                 if (sh.sh_type == SHT_SYMTAB) {
2928                         if (obj->efile.symbols) {
2929                                 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2930                                 return -LIBBPF_ERRNO__FORMAT;
2931                         }
2932
2933                         data = elf_sec_data(obj, scn);
2934                         if (!data)
2935                                 return -LIBBPF_ERRNO__FORMAT;
2936
2937                         obj->efile.symbols = data;
2938                         obj->efile.symbols_shndx = elf_ndxscn(scn);
2939                         obj->efile.strtabidx = sh.sh_link;
2940                 }
2941         }
2942
2943         scn = NULL;
2944         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2945                 idx++;
2946
2947                 if (elf_sec_hdr(obj, scn, &sh))
2948                         return -LIBBPF_ERRNO__FORMAT;
2949
2950                 name = elf_sec_str(obj, sh.sh_name);
2951                 if (!name)
2952                         return -LIBBPF_ERRNO__FORMAT;
2953
2954                 if (ignore_elf_section(&sh, name))
2955                         continue;
2956
2957                 data = elf_sec_data(obj, scn);
2958                 if (!data)
2959                         return -LIBBPF_ERRNO__FORMAT;
2960
2961                 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2962                          idx, name, (unsigned long)data->d_size,
2963                          (int)sh.sh_link, (unsigned long)sh.sh_flags,
2964                          (int)sh.sh_type);
2965
2966                 if (strcmp(name, "license") == 0) {
2967                         err = bpf_object__init_license(obj, data->d_buf, data->d_size);
2968                         if (err)
2969                                 return err;
2970                 } else if (strcmp(name, "version") == 0) {
2971                         err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
2972                         if (err)
2973                                 return err;
2974                 } else if (strcmp(name, "maps") == 0) {
2975                         obj->efile.maps_shndx = idx;
2976                 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2977                         obj->efile.btf_maps_shndx = idx;
2978                 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2979                         btf_data = data;
2980                 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
2981                         btf_ext_data = data;
2982                 } else if (sh.sh_type == SHT_SYMTAB) {
2983                         /* already processed during the first pass above */
2984                 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2985                         if (sh.sh_flags & SHF_EXECINSTR) {
2986                                 if (strcmp(name, ".text") == 0)
2987                                         obj->efile.text_shndx = idx;
2988                                 err = bpf_object__add_programs(obj, data, name, idx);
2989                                 if (err)
2990                                         return err;
2991                         } else if (strcmp(name, DATA_SEC) == 0) {
2992                                 obj->efile.data = data;
2993                                 obj->efile.data_shndx = idx;
2994                         } else if (strcmp(name, RODATA_SEC) == 0) {
2995                                 obj->efile.rodata = data;
2996                                 obj->efile.rodata_shndx = idx;
2997                         } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2998                                 obj->efile.st_ops_data = data;
2999                                 obj->efile.st_ops_shndx = idx;
3000                         } else {
3001                                 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3002                                         idx, name);
3003                         }
3004                 } else if (sh.sh_type == SHT_REL) {
3005                         int nr_sects = obj->efile.nr_reloc_sects;
3006                         void *sects = obj->efile.reloc_sects;
3007                         int sec = sh.sh_info; /* points to other section */
3008
3009                         /* Only do relo for section with exec instructions */
3010                         if (!section_have_execinstr(obj, sec) &&
3011                             strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3012                             strcmp(name, ".rel" MAPS_ELF_SEC)) {
3013                                 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3014                                         idx, name, sec,
3015                                         elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
3016                                 continue;
3017                         }
3018
3019                         sects = libbpf_reallocarray(sects, nr_sects + 1,
3020                                                     sizeof(*obj->efile.reloc_sects));
3021                         if (!sects)
3022                                 return -ENOMEM;
3023
3024                         obj->efile.reloc_sects = sects;
3025                         obj->efile.nr_reloc_sects++;
3026
3027                         obj->efile.reloc_sects[nr_sects].shdr = sh;
3028                         obj->efile.reloc_sects[nr_sects].data = data;
3029                 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
3030                         obj->efile.bss = data;
3031                         obj->efile.bss_shndx = idx;
3032                 } else {
3033                         pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3034                                 (size_t)sh.sh_size);
3035                 }
3036         }
3037
3038         if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3039                 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3040                 return -LIBBPF_ERRNO__FORMAT;
3041         }
3042
3043         /* sort BPF programs by section name and in-section instruction offset
3044          * for faster search */
3045         qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3046
3047         return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3048 }
3049
3050 static bool sym_is_extern(const GElf_Sym *sym)
3051 {
3052         int bind = GELF_ST_BIND(sym->st_info);
3053         /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3054         return sym->st_shndx == SHN_UNDEF &&
3055                (bind == STB_GLOBAL || bind == STB_WEAK) &&
3056                GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
3057 }
3058
3059 static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
3060 {
3061         int bind = GELF_ST_BIND(sym->st_info);
3062         int type = GELF_ST_TYPE(sym->st_info);
3063
3064         /* in .text section */
3065         if (sym->st_shndx != text_shndx)
3066                 return false;
3067
3068         /* local function */
3069         if (bind == STB_LOCAL && type == STT_SECTION)
3070                 return true;
3071
3072         /* global function */
3073         return bind == STB_GLOBAL && type == STT_FUNC;
3074 }
3075
3076 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3077 {
3078         const struct btf_type *t;
3079         const char *tname;
3080         int i, n;
3081
3082         if (!btf)
3083                 return -ESRCH;
3084
3085         n = btf__get_nr_types(btf);
3086         for (i = 1; i <= n; i++) {
3087                 t = btf__type_by_id(btf, i);
3088
3089                 if (!btf_is_var(t) && !btf_is_func(t))
3090                         continue;
3091
3092                 tname = btf__name_by_offset(btf, t->name_off);
3093                 if (strcmp(tname, ext_name))
3094                         continue;
3095
3096                 if (btf_is_var(t) &&
3097                     btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3098                         return -EINVAL;
3099
3100                 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3101                         return -EINVAL;
3102
3103                 return i;
3104         }
3105
3106         return -ENOENT;
3107 }
3108
3109 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3110         const struct btf_var_secinfo *vs;
3111         const struct btf_type *t;
3112         int i, j, n;
3113
3114         if (!btf)
3115                 return -ESRCH;
3116
3117         n = btf__get_nr_types(btf);
3118         for (i = 1; i <= n; i++) {
3119                 t = btf__type_by_id(btf, i);
3120
3121                 if (!btf_is_datasec(t))
3122                         continue;
3123
3124                 vs = btf_var_secinfos(t);
3125                 for (j = 0; j < btf_vlen(t); j++, vs++) {
3126                         if (vs->type == ext_btf_id)
3127                                 return i;
3128                 }
3129         }
3130
3131         return -ENOENT;
3132 }
3133
3134 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3135                                      bool *is_signed)
3136 {
3137         const struct btf_type *t;
3138         const char *name;
3139
3140         t = skip_mods_and_typedefs(btf, id, NULL);
3141         name = btf__name_by_offset(btf, t->name_off);
3142
3143         if (is_signed)
3144                 *is_signed = false;
3145         switch (btf_kind(t)) {
3146         case BTF_KIND_INT: {
3147                 int enc = btf_int_encoding(t);
3148
3149                 if (enc & BTF_INT_BOOL)
3150                         return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3151                 if (is_signed)
3152                         *is_signed = enc & BTF_INT_SIGNED;
3153                 if (t->size == 1)
3154                         return KCFG_CHAR;
3155                 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3156                         return KCFG_UNKNOWN;
3157                 return KCFG_INT;
3158         }
3159         case BTF_KIND_ENUM:
3160                 if (t->size != 4)
3161                         return KCFG_UNKNOWN;
3162                 if (strcmp(name, "libbpf_tristate"))
3163                         return KCFG_UNKNOWN;
3164                 return KCFG_TRISTATE;
3165         case BTF_KIND_ARRAY:
3166                 if (btf_array(t)->nelems == 0)
3167                         return KCFG_UNKNOWN;
3168                 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3169                         return KCFG_UNKNOWN;
3170                 return KCFG_CHAR_ARR;
3171         default:
3172                 return KCFG_UNKNOWN;
3173         }
3174 }
3175
3176 static int cmp_externs(const void *_a, const void *_b)
3177 {
3178         const struct extern_desc *a = _a;
3179         const struct extern_desc *b = _b;
3180
3181         if (a->type != b->type)
3182                 return a->type < b->type ? -1 : 1;
3183
3184         if (a->type == EXT_KCFG) {
3185                 /* descending order by alignment requirements */
3186                 if (a->kcfg.align != b->kcfg.align)
3187                         return a->kcfg.align > b->kcfg.align ? -1 : 1;
3188                 /* ascending order by size, within same alignment class */
3189                 if (a->kcfg.sz != b->kcfg.sz)
3190                         return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3191         }
3192
3193         /* resolve ties by name */
3194         return strcmp(a->name, b->name);
3195 }
3196
3197 static int find_int_btf_id(const struct btf *btf)
3198 {
3199         const struct btf_type *t;
3200         int i, n;
3201
3202         n = btf__get_nr_types(btf);
3203         for (i = 1; i <= n; i++) {
3204                 t = btf__type_by_id(btf, i);
3205
3206                 if (btf_is_int(t) && btf_int_bits(t) == 32)
3207                         return i;
3208         }
3209
3210         return 0;
3211 }
3212
3213 static int add_dummy_ksym_var(struct btf *btf)
3214 {
3215         int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3216         const struct btf_var_secinfo *vs;
3217         const struct btf_type *sec;
3218
3219         sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3220                                             BTF_KIND_DATASEC);
3221         if (sec_btf_id < 0)
3222                 return 0;
3223
3224         sec = btf__type_by_id(btf, sec_btf_id);
3225         vs = btf_var_secinfos(sec);
3226         for (i = 0; i < btf_vlen(sec); i++, vs++) {
3227                 const struct btf_type *vt;
3228
3229                 vt = btf__type_by_id(btf, vs->type);
3230                 if (btf_is_func(vt))
3231                         break;
3232         }
3233
3234         /* No func in ksyms sec.  No need to add dummy var. */
3235         if (i == btf_vlen(sec))
3236                 return 0;
3237
3238         int_btf_id = find_int_btf_id(btf);
3239         dummy_var_btf_id = btf__add_var(btf,
3240                                         "dummy_ksym",
3241                                         BTF_VAR_GLOBAL_ALLOCATED,
3242                                         int_btf_id);
3243         if (dummy_var_btf_id < 0)
3244                 pr_warn("cannot create a dummy_ksym var\n");
3245
3246         return dummy_var_btf_id;
3247 }
3248
3249 static int bpf_object__collect_externs(struct bpf_object *obj)
3250 {
3251         struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3252         const struct btf_type *t;
3253         struct extern_desc *ext;
3254         int i, n, off, dummy_var_btf_id;
3255         const char *ext_name, *sec_name;
3256         Elf_Scn *scn;
3257         GElf_Shdr sh;
3258
3259         if (!obj->efile.symbols)
3260                 return 0;
3261
3262         scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3263         if (elf_sec_hdr(obj, scn, &sh))
3264                 return -LIBBPF_ERRNO__FORMAT;
3265
3266         dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3267         if (dummy_var_btf_id < 0)
3268                 return dummy_var_btf_id;
3269
3270         n = sh.sh_size / sh.sh_entsize;
3271         pr_debug("looking for externs among %d symbols...\n", n);
3272
3273         for (i = 0; i < n; i++) {
3274                 GElf_Sym sym;
3275
3276                 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3277                         return -LIBBPF_ERRNO__FORMAT;
3278                 if (!sym_is_extern(&sym))
3279                         continue;
3280                 ext_name = elf_sym_str(obj, sym.st_name);
3281                 if (!ext_name || !ext_name[0])
3282                         continue;
3283
3284                 ext = obj->externs;
3285                 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3286                 if (!ext)
3287                         return -ENOMEM;
3288                 obj->externs = ext;
3289                 ext = &ext[obj->nr_extern];
3290                 memset(ext, 0, sizeof(*ext));
3291                 obj->nr_extern++;
3292
3293                 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3294                 if (ext->btf_id <= 0) {
3295                         pr_warn("failed to find BTF for extern '%s': %d\n",
3296                                 ext_name, ext->btf_id);
3297                         return ext->btf_id;
3298                 }
3299                 t = btf__type_by_id(obj->btf, ext->btf_id);
3300                 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3301                 ext->sym_idx = i;
3302                 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3303
3304                 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3305                 if (ext->sec_btf_id <= 0) {
3306                         pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3307                                 ext_name, ext->btf_id, ext->sec_btf_id);
3308                         return ext->sec_btf_id;
3309                 }
3310                 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3311                 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3312
3313                 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3314                         if (btf_is_func(t)) {
3315                                 pr_warn("extern function %s is unsupported under %s section\n",
3316                                         ext->name, KCONFIG_SEC);
3317                                 return -ENOTSUP;
3318                         }
3319                         kcfg_sec = sec;
3320                         ext->type = EXT_KCFG;
3321                         ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3322                         if (ext->kcfg.sz <= 0) {
3323                                 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3324                                         ext_name, ext->kcfg.sz);
3325                                 return ext->kcfg.sz;
3326                         }
3327                         ext->kcfg.align = btf__align_of(obj->btf, t->type);
3328                         if (ext->kcfg.align <= 0) {
3329                                 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3330                                         ext_name, ext->kcfg.align);
3331                                 return -EINVAL;
3332                         }
3333                         ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3334                                                         &ext->kcfg.is_signed);
3335                         if (ext->kcfg.type == KCFG_UNKNOWN) {
3336                                 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3337                                 return -ENOTSUP;
3338                         }
3339                 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3340                         if (btf_is_func(t) && ext->is_weak) {
3341                                 pr_warn("extern weak function %s is unsupported\n",
3342                                         ext->name);
3343                                 return -ENOTSUP;
3344                         }
3345                         ksym_sec = sec;
3346                         ext->type = EXT_KSYM;
3347                         skip_mods_and_typedefs(obj->btf, t->type,
3348                                                &ext->ksym.type_id);
3349                 } else {
3350                         pr_warn("unrecognized extern section '%s'\n", sec_name);
3351                         return -ENOTSUP;
3352                 }
3353         }
3354         pr_debug("collected %d externs total\n", obj->nr_extern);
3355
3356         if (!obj->nr_extern)
3357                 return 0;
3358
3359         /* sort externs by type, for kcfg ones also by (align, size, name) */
3360         qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3361
3362         /* for .ksyms section, we need to turn all externs into allocated
3363          * variables in BTF to pass kernel verification; we do this by
3364          * pretending that each extern is a 8-byte variable
3365          */
3366         if (ksym_sec) {
3367                 /* find existing 4-byte integer type in BTF to use for fake
3368                  * extern variables in DATASEC
3369                  */
3370                 int int_btf_id = find_int_btf_id(obj->btf);
3371                 /* For extern function, a dummy_var added earlier
3372                  * will be used to replace the vs->type and
3373                  * its name string will be used to refill
3374                  * the missing param's name.
3375                  */
3376                 const struct btf_type *dummy_var;
3377
3378                 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3379                 for (i = 0; i < obj->nr_extern; i++) {
3380                         ext = &obj->externs[i];
3381                         if (ext->type != EXT_KSYM)
3382                                 continue;
3383                         pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3384                                  i, ext->sym_idx, ext->name);
3385                 }
3386
3387                 sec = ksym_sec;
3388                 n = btf_vlen(sec);
3389                 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3390                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3391                         struct btf_type *vt;
3392
3393                         vt = (void *)btf__type_by_id(obj->btf, vs->type);
3394                         ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3395                         ext = find_extern_by_name(obj, ext_name);
3396                         if (!ext) {
3397                                 pr_warn("failed to find extern definition for BTF %s '%s'\n",
3398                                         btf_kind_str(vt), ext_name);
3399                                 return -ESRCH;
3400                         }
3401                         if (btf_is_func(vt)) {
3402                                 const struct btf_type *func_proto;
3403                                 struct btf_param *param;
3404                                 int j;
3405
3406                                 func_proto = btf__type_by_id(obj->btf,
3407                                                              vt->type);
3408                                 param = btf_params(func_proto);
3409                                 /* Reuse the dummy_var string if the
3410                                  * func proto does not have param name.
3411                                  */
3412                                 for (j = 0; j < btf_vlen(func_proto); j++)
3413                                         if (param[j].type && !param[j].name_off)
3414                                                 param[j].name_off =
3415                                                         dummy_var->name_off;
3416                                 vs->type = dummy_var_btf_id;
3417                                 vt->info &= ~0xffff;
3418                                 vt->info |= BTF_FUNC_GLOBAL;
3419                         } else {
3420                                 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3421                                 vt->type = int_btf_id;
3422                         }
3423                         vs->offset = off;
3424                         vs->size = sizeof(int);
3425                 }
3426                 sec->size = off;
3427         }
3428
3429         if (kcfg_sec) {
3430                 sec = kcfg_sec;
3431                 /* for kcfg externs calculate their offsets within a .kconfig map */
3432                 off = 0;
3433                 for (i = 0; i < obj->nr_extern; i++) {
3434                         ext = &obj->externs[i];
3435                         if (ext->type != EXT_KCFG)
3436                                 continue;
3437
3438                         ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3439                         off = ext->kcfg.data_off + ext->kcfg.sz;
3440                         pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3441                                  i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3442                 }
3443                 sec->size = off;
3444                 n = btf_vlen(sec);
3445                 for (i = 0; i < n; i++) {
3446                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3447
3448                         t = btf__type_by_id(obj->btf, vs->type);
3449                         ext_name = btf__name_by_offset(obj->btf, t->name_off);
3450                         ext = find_extern_by_name(obj, ext_name);
3451                         if (!ext) {
3452                                 pr_warn("failed to find extern definition for BTF var '%s'\n",
3453                                         ext_name);
3454                                 return -ESRCH;
3455                         }
3456                         btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3457                         vs->offset = ext->kcfg.data_off;
3458                 }
3459         }
3460         return 0;
3461 }
3462
3463 struct bpf_program *
3464 bpf_object__find_program_by_title(const struct bpf_object *obj,
3465                                   const char *title)
3466 {
3467         struct bpf_program *pos;
3468
3469         bpf_object__for_each_program(pos, obj) {
3470                 if (pos->sec_name && !strcmp(pos->sec_name, title))
3471                         return pos;
3472         }
3473         return NULL;
3474 }
3475
3476 static bool prog_is_subprog(const struct bpf_object *obj,
3477                             const struct bpf_program *prog)
3478 {
3479         /* For legacy reasons, libbpf supports an entry-point BPF programs
3480          * without SEC() attribute, i.e., those in the .text section. But if
3481          * there are 2 or more such programs in the .text section, they all
3482          * must be subprograms called from entry-point BPF programs in
3483          * designated SEC()'tions, otherwise there is no way to distinguish
3484          * which of those programs should be loaded vs which are a subprogram.
3485          * Similarly, if there is a function/program in .text and at least one
3486          * other BPF program with custom SEC() attribute, then we just assume
3487          * .text programs are subprograms (even if they are not called from
3488          * other programs), because libbpf never explicitly supported mixing
3489          * SEC()-designated BPF programs and .text entry-point BPF programs.
3490          */
3491         return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3492 }
3493
3494 struct bpf_program *
3495 bpf_object__find_program_by_name(const struct bpf_object *obj,
3496                                  const char *name)
3497 {
3498         struct bpf_program *prog;
3499
3500         bpf_object__for_each_program(prog, obj) {
3501                 if (prog_is_subprog(obj, prog))
3502                         continue;
3503                 if (!strcmp(prog->name, name))
3504                         return prog;
3505         }
3506         return NULL;
3507 }
3508
3509 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3510                                       int shndx)
3511 {
3512         return shndx == obj->efile.data_shndx ||
3513                shndx == obj->efile.bss_shndx ||
3514                shndx == obj->efile.rodata_shndx;
3515 }
3516
3517 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3518                                       int shndx)
3519 {
3520         return shndx == obj->efile.maps_shndx ||
3521                shndx == obj->efile.btf_maps_shndx;
3522 }
3523
3524 static enum libbpf_map_type
3525 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3526 {
3527         if (shndx == obj->efile.data_shndx)
3528                 return LIBBPF_MAP_DATA;
3529         else if (shndx == obj->efile.bss_shndx)
3530                 return LIBBPF_MAP_BSS;
3531         else if (shndx == obj->efile.rodata_shndx)
3532                 return LIBBPF_MAP_RODATA;
3533         else if (shndx == obj->efile.symbols_shndx)
3534                 return LIBBPF_MAP_KCONFIG;
3535         else
3536                 return LIBBPF_MAP_UNSPEC;
3537 }
3538
3539 static int bpf_program__record_reloc(struct bpf_program *prog,
3540                                      struct reloc_desc *reloc_desc,
3541                                      __u32 insn_idx, const char *sym_name,
3542                                      const GElf_Sym *sym, const GElf_Rel *rel)
3543 {
3544         struct bpf_insn *insn = &prog->insns[insn_idx];
3545         size_t map_idx, nr_maps = prog->obj->nr_maps;
3546         struct bpf_object *obj = prog->obj;
3547         __u32 shdr_idx = sym->st_shndx;
3548         enum libbpf_map_type type;
3549         const char *sym_sec_name;
3550         struct bpf_map *map;
3551
3552         if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3553                 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3554                         prog->name, sym_name, insn_idx, insn->code);
3555                 return -LIBBPF_ERRNO__RELOC;
3556         }
3557
3558         if (sym_is_extern(sym)) {
3559                 int sym_idx = GELF_R_SYM(rel->r_info);
3560                 int i, n = obj->nr_extern;
3561                 struct extern_desc *ext;
3562
3563                 for (i = 0; i < n; i++) {
3564                         ext = &obj->externs[i];
3565                         if (ext->sym_idx == sym_idx)
3566                                 break;
3567                 }
3568                 if (i >= n) {
3569                         pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3570                                 prog->name, sym_name, sym_idx);
3571                         return -LIBBPF_ERRNO__RELOC;
3572                 }
3573                 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3574                          prog->name, i, ext->name, ext->sym_idx, insn_idx);
3575                 if (insn->code == (BPF_JMP | BPF_CALL))
3576                         reloc_desc->type = RELO_EXTERN_FUNC;
3577                 else
3578                         reloc_desc->type = RELO_EXTERN_VAR;
3579                 reloc_desc->insn_idx = insn_idx;
3580                 reloc_desc->sym_off = i; /* sym_off stores extern index */
3581                 return 0;
3582         }
3583
3584         /* sub-program call relocation */
3585         if (is_call_insn(insn)) {
3586                 if (insn->src_reg != BPF_PSEUDO_CALL) {
3587                         pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3588                         return -LIBBPF_ERRNO__RELOC;
3589                 }
3590                 /* text_shndx can be 0, if no default "main" program exists */
3591                 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3592                         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3593                         pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3594                                 prog->name, sym_name, sym_sec_name);
3595                         return -LIBBPF_ERRNO__RELOC;
3596                 }
3597                 if (sym->st_value % BPF_INSN_SZ) {
3598                         pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3599                                 prog->name, sym_name, (size_t)sym->st_value);
3600                         return -LIBBPF_ERRNO__RELOC;
3601                 }
3602                 reloc_desc->type = RELO_CALL;
3603                 reloc_desc->insn_idx = insn_idx;
3604                 reloc_desc->sym_off = sym->st_value;
3605                 return 0;
3606         }
3607
3608         if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3609                 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3610                         prog->name, sym_name, shdr_idx);
3611                 return -LIBBPF_ERRNO__RELOC;
3612         }
3613
3614         /* loading subprog addresses */
3615         if (sym_is_subprog(sym, obj->efile.text_shndx)) {
3616                 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
3617                  * local_func: sym->st_value = 0, insn->imm = offset in the section.
3618                  */
3619                 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
3620                         pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
3621                                 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
3622                         return -LIBBPF_ERRNO__RELOC;
3623                 }
3624
3625                 reloc_desc->type = RELO_SUBPROG_ADDR;
3626                 reloc_desc->insn_idx = insn_idx;
3627                 reloc_desc->sym_off = sym->st_value;
3628                 return 0;
3629         }
3630
3631         type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3632         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3633
3634         /* generic map reference relocation */
3635         if (type == LIBBPF_MAP_UNSPEC) {
3636                 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3637                         pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3638                                 prog->name, sym_name, sym_sec_name);
3639                         return -LIBBPF_ERRNO__RELOC;
3640                 }
3641                 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3642                         map = &obj->maps[map_idx];
3643                         if (map->libbpf_type != type ||
3644                             map->sec_idx != sym->st_shndx ||
3645                             map->sec_offset != sym->st_value)
3646                                 continue;
3647                         pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3648                                  prog->name, map_idx, map->name, map->sec_idx,
3649                                  map->sec_offset, insn_idx);
3650                         break;
3651                 }
3652                 if (map_idx >= nr_maps) {
3653                         pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3654                                 prog->name, sym_sec_name, (size_t)sym->st_value);
3655                         return -LIBBPF_ERRNO__RELOC;
3656                 }
3657                 reloc_desc->type = RELO_LD64;
3658                 reloc_desc->insn_idx = insn_idx;
3659                 reloc_desc->map_idx = map_idx;
3660                 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
3661                 return 0;
3662         }
3663
3664         /* global data map relocation */
3665         if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3666                 pr_warn("prog '%s': bad data relo against section '%s'\n",
3667                         prog->name, sym_sec_name);
3668                 return -LIBBPF_ERRNO__RELOC;
3669         }
3670         for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3671                 map = &obj->maps[map_idx];
3672                 if (map->libbpf_type != type)
3673                         continue;
3674                 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3675                          prog->name, map_idx, map->name, map->sec_idx,
3676                          map->sec_offset, insn_idx);
3677                 break;
3678         }
3679         if (map_idx >= nr_maps) {
3680                 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3681                         prog->name, sym_sec_name);
3682                 return -LIBBPF_ERRNO__RELOC;
3683         }
3684
3685         reloc_desc->type = RELO_DATA;
3686         reloc_desc->insn_idx = insn_idx;
3687         reloc_desc->map_idx = map_idx;
3688         reloc_desc->sym_off = sym->st_value;
3689         return 0;
3690 }
3691
3692 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3693 {
3694         return insn_idx >= prog->sec_insn_off &&
3695                insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3696 }
3697
3698 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3699                                                  size_t sec_idx, size_t insn_idx)
3700 {
3701         int l = 0, r = obj->nr_programs - 1, m;
3702         struct bpf_program *prog;
3703
3704         while (l < r) {
3705                 m = l + (r - l + 1) / 2;
3706                 prog = &obj->programs[m];
3707
3708                 if (prog->sec_idx < sec_idx ||
3709                     (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3710                         l = m;
3711                 else
3712                         r = m - 1;
3713         }
3714         /* matching program could be at index l, but it still might be the
3715          * wrong one, so we need to double check conditions for the last time
3716          */
3717         prog = &obj->programs[l];
3718         if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3719                 return prog;
3720         return NULL;
3721 }
3722
3723 static int
3724 bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3725 {
3726         Elf_Data *symbols = obj->efile.symbols;
3727         const char *relo_sec_name, *sec_name;
3728         size_t sec_idx = shdr->sh_info;
3729         struct bpf_program *prog;
3730         struct reloc_desc *relos;
3731         int err, i, nrels;
3732         const char *sym_name;
3733         __u32 insn_idx;
3734         Elf_Scn *scn;
3735         Elf_Data *scn_data;
3736         GElf_Sym sym;
3737         GElf_Rel rel;
3738
3739         scn = elf_sec_by_idx(obj, sec_idx);
3740         scn_data = elf_sec_data(obj, scn);
3741
3742         relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3743         sec_name = elf_sec_name(obj, scn);
3744         if (!relo_sec_name || !sec_name)
3745                 return -EINVAL;
3746
3747         pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3748                  relo_sec_name, sec_idx, sec_name);
3749         nrels = shdr->sh_size / shdr->sh_entsize;
3750
3751         for (i = 0; i < nrels; i++) {
3752                 if (!gelf_getrel(data, i, &rel)) {
3753                         pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3754                         return -LIBBPF_ERRNO__FORMAT;
3755                 }
3756                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3757                         pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3758                                 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3759                         return -LIBBPF_ERRNO__FORMAT;
3760                 }
3761
3762                 if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) {
3763                         pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3764                                 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3765                         return -LIBBPF_ERRNO__FORMAT;
3766                 }
3767
3768                 insn_idx = rel.r_offset / BPF_INSN_SZ;
3769                 /* relocations against static functions are recorded as
3770                  * relocations against the section that contains a function;
3771                  * in such case, symbol will be STT_SECTION and sym.st_name
3772                  * will point to empty string (0), so fetch section name
3773                  * instead
3774                  */
3775                 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3776                         sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3777                 else
3778                         sym_name = elf_sym_str(obj, sym.st_name);
3779                 sym_name = sym_name ?: "<?";
3780
3781                 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3782                          relo_sec_name, i, insn_idx, sym_name);
3783
3784                 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3785                 if (!prog) {
3786                         pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
3787                                 relo_sec_name, i, sec_name, insn_idx);
3788                         continue;
3789                 }
3790
3791                 relos = libbpf_reallocarray(prog->reloc_desc,
3792                                             prog->nr_reloc + 1, sizeof(*relos));
3793                 if (!relos)
3794                         return -ENOMEM;
3795                 prog->reloc_desc = relos;
3796
3797                 /* adjust insn_idx to local BPF program frame of reference */
3798                 insn_idx -= prog->sec_insn_off;
3799                 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3800                                                 insn_idx, sym_name, &sym, &rel);
3801                 if (err)
3802                         return err;
3803
3804                 prog->nr_reloc++;
3805         }
3806         return 0;
3807 }
3808
3809 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3810 {
3811         struct bpf_map_def *def = &map->def;
3812         __u32 key_type_id = 0, value_type_id = 0;
3813         int ret;
3814
3815         /* if it's BTF-defined map, we don't need to search for type IDs.
3816          * For struct_ops map, it does not need btf_key_type_id and
3817          * btf_value_type_id.
3818          */
3819         if (map->sec_idx == obj->efile.btf_maps_shndx ||
3820             bpf_map__is_struct_ops(map))
3821                 return 0;
3822
3823         if (!bpf_map__is_internal(map)) {
3824                 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3825                                            def->value_size, &key_type_id,
3826                                            &value_type_id);
3827         } else {
3828                 /*
3829                  * LLVM annotates global data differently in BTF, that is,
3830                  * only as '.data', '.bss' or '.rodata'.
3831                  */
3832                 ret = btf__find_by_name(obj->btf,
3833                                 libbpf_type_to_btf_name[map->libbpf_type]);
3834         }
3835         if (ret < 0)
3836                 return ret;
3837
3838         map->btf_key_type_id = key_type_id;
3839         map->btf_value_type_id = bpf_map__is_internal(map) ?
3840                                  ret : value_type_id;
3841         return 0;
3842 }
3843
3844 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3845 {
3846         struct bpf_map_info info = {};
3847         __u32 len = sizeof(info);
3848         int new_fd, err;
3849         char *new_name;
3850
3851         err = bpf_obj_get_info_by_fd(fd, &info, &len);
3852         if (err)
3853                 return err;
3854
3855         new_name = strdup(info.name);
3856         if (!new_name)
3857                 return -errno;
3858
3859         new_fd = open("/", O_RDONLY | O_CLOEXEC);
3860         if (new_fd < 0) {
3861                 err = -errno;
3862                 goto err_free_new_name;
3863         }
3864
3865         new_fd = dup3(fd, new_fd, O_CLOEXEC);
3866         if (new_fd < 0) {
3867                 err = -errno;
3868                 goto err_close_new_fd;
3869         }
3870
3871         err = zclose(map->fd);
3872         if (err) {
3873                 err = -errno;
3874                 goto err_close_new_fd;
3875         }
3876         free(map->name);
3877
3878         map->fd = new_fd;
3879         map->name = new_name;
3880         map->def.type = info.type;
3881         map->def.key_size = info.key_size;
3882         map->def.value_size = info.value_size;
3883         map->def.max_entries = info.max_entries;
3884         map->def.map_flags = info.map_flags;
3885         map->btf_key_type_id = info.btf_key_type_id;
3886         map->btf_value_type_id = info.btf_value_type_id;
3887         map->reused = true;
3888
3889         return 0;
3890
3891 err_close_new_fd:
3892         close(new_fd);
3893 err_free_new_name:
3894         free(new_name);
3895         return err;
3896 }
3897
3898 __u32 bpf_map__max_entries(const struct bpf_map *map)
3899 {
3900         return map->def.max_entries;
3901 }
3902
3903 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
3904 {
3905         if (!bpf_map_type__is_map_in_map(map->def.type))
3906                 return NULL;
3907
3908         return map->inner_map;
3909 }
3910
3911 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3912 {
3913         if (map->fd >= 0)
3914                 return -EBUSY;
3915         map->def.max_entries = max_entries;
3916         return 0;
3917 }
3918
3919 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3920 {
3921         if (!map || !max_entries)
3922                 return -EINVAL;
3923
3924         return bpf_map__set_max_entries(map, max_entries);
3925 }
3926
3927 static int
3928 bpf_object__probe_loading(struct bpf_object *obj)
3929 {
3930         struct bpf_load_program_attr attr;
3931         char *cp, errmsg[STRERR_BUFSIZE];
3932         struct bpf_insn insns[] = {
3933                 BPF_MOV64_IMM(BPF_REG_0, 0),
3934                 BPF_EXIT_INSN(),
3935         };
3936         int ret;
3937
3938         /* make sure basic loading works */
3939
3940         memset(&attr, 0, sizeof(attr));
3941         attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3942         attr.insns = insns;
3943         attr.insns_cnt = ARRAY_SIZE(insns);
3944         attr.license = "GPL";
3945
3946         ret = bpf_load_program_xattr(&attr, NULL, 0);
3947         if (ret < 0) {
3948                 ret = errno;
3949                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3950                 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3951                         "program. Make sure your kernel supports BPF "
3952                         "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3953                         "set to big enough value.\n", __func__, cp, ret);
3954                 return -ret;
3955         }
3956         close(ret);
3957
3958         return 0;
3959 }
3960
3961 static int probe_fd(int fd)
3962 {
3963         if (fd >= 0)
3964                 close(fd);
3965         return fd >= 0;
3966 }
3967
3968 static int probe_kern_prog_name(void)
3969 {
3970         struct bpf_load_program_attr attr;
3971         struct bpf_insn insns[] = {
3972                 BPF_MOV64_IMM(BPF_REG_0, 0),
3973                 BPF_EXIT_INSN(),
3974         };
3975         int ret;
3976
3977         /* make sure loading with name works */
3978
3979         memset(&attr, 0, sizeof(attr));
3980         attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3981         attr.insns = insns;
3982         attr.insns_cnt = ARRAY_SIZE(insns);
3983         attr.license = "GPL";
3984         attr.name = "test";
3985         ret = bpf_load_program_xattr(&attr, NULL, 0);
3986         return probe_fd(ret);
3987 }
3988
3989 static int probe_kern_global_data(void)
3990 {
3991         struct bpf_load_program_attr prg_attr;
3992         struct bpf_create_map_attr map_attr;
3993         char *cp, errmsg[STRERR_BUFSIZE];
3994         struct bpf_insn insns[] = {
3995                 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3996                 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3997                 BPF_MOV64_IMM(BPF_REG_0, 0),
3998                 BPF_EXIT_INSN(),
3999         };
4000         int ret, map;
4001
4002         memset(&map_attr, 0, sizeof(map_attr));
4003         map_attr.map_type = BPF_MAP_TYPE_ARRAY;
4004         map_attr.key_size = sizeof(int);
4005         map_attr.value_size = 32;
4006         map_attr.max_entries = 1;
4007
4008         map = bpf_create_map_xattr(&map_attr);
4009         if (map < 0) {
4010                 ret = -errno;
4011                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4012                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4013                         __func__, cp, -ret);
4014                 return ret;
4015         }
4016
4017         insns[0].imm = map;
4018
4019         memset(&prg_attr, 0, sizeof(prg_attr));
4020         prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4021         prg_attr.insns = insns;
4022         prg_attr.insns_cnt = ARRAY_SIZE(insns);
4023         prg_attr.license = "GPL";
4024
4025         ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
4026         close(map);
4027         return probe_fd(ret);
4028 }
4029
4030 static int probe_kern_btf(void)
4031 {
4032         static const char strs[] = "\0int";
4033         __u32 types[] = {
4034                 /* int */
4035                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4036         };
4037
4038         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4039                                              strs, sizeof(strs)));
4040 }
4041
4042 static int probe_kern_btf_func(void)
4043 {
4044         static const char strs[] = "\0int\0x\0a";
4045         /* void x(int a) {} */
4046         __u32 types[] = {
4047                 /* int */
4048                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4049                 /* FUNC_PROTO */                                /* [2] */
4050                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4051                 BTF_PARAM_ENC(7, 1),
4052                 /* FUNC x */                                    /* [3] */
4053                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4054         };
4055
4056         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4057                                              strs, sizeof(strs)));
4058 }
4059
4060 static int probe_kern_btf_func_global(void)
4061 {
4062         static const char strs[] = "\0int\0x\0a";
4063         /* static void x(int a) {} */
4064         __u32 types[] = {
4065                 /* int */
4066                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4067                 /* FUNC_PROTO */                                /* [2] */
4068                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4069                 BTF_PARAM_ENC(7, 1),
4070                 /* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
4071                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4072         };
4073
4074         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4075                                              strs, sizeof(strs)));
4076 }
4077
4078 static int probe_kern_btf_datasec(void)
4079 {
4080         static const char strs[] = "\0x\0.data";
4081         /* static int a; */
4082         __u32 types[] = {
4083                 /* int */
4084                 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4085                 /* VAR x */                                     /* [2] */
4086                 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4087                 BTF_VAR_STATIC,
4088                 /* DATASEC val */                               /* [3] */
4089                 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4090                 BTF_VAR_SECINFO_ENC(2, 0, 4),
4091         };
4092
4093         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4094                                              strs, sizeof(strs)));
4095 }
4096
4097 static int probe_kern_btf_float(void)
4098 {
4099         static const char strs[] = "\0float";
4100         __u32 types[] = {
4101                 /* float */
4102                 BTF_TYPE_FLOAT_ENC(1, 4),
4103         };
4104
4105         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4106                                              strs, sizeof(strs)));
4107 }
4108
4109 static int probe_kern_array_mmap(void)
4110 {
4111         struct bpf_create_map_attr attr = {
4112                 .map_type = BPF_MAP_TYPE_ARRAY,
4113                 .map_flags = BPF_F_MMAPABLE,
4114                 .key_size = sizeof(int),
4115                 .value_size = sizeof(int),
4116                 .max_entries = 1,
4117         };
4118
4119         return probe_fd(bpf_create_map_xattr(&attr));
4120 }
4121
4122 static int probe_kern_exp_attach_type(void)
4123 {
4124         struct bpf_load_program_attr attr;
4125         struct bpf_insn insns[] = {
4126                 BPF_MOV64_IMM(BPF_REG_0, 0),
4127                 BPF_EXIT_INSN(),
4128         };
4129
4130         memset(&attr, 0, sizeof(attr));
4131         /* use any valid combination of program type and (optional)
4132          * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
4133          * to see if kernel supports expected_attach_type field for
4134          * BPF_PROG_LOAD command
4135          */
4136         attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
4137         attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
4138         attr.insns = insns;
4139         attr.insns_cnt = ARRAY_SIZE(insns);
4140         attr.license = "GPL";
4141
4142         return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
4143 }
4144
4145 static int probe_kern_probe_read_kernel(void)
4146 {
4147         struct bpf_load_program_attr attr;
4148         struct bpf_insn insns[] = {
4149                 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),   /* r1 = r10 (fp) */
4150                 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),  /* r1 += -8 */
4151                 BPF_MOV64_IMM(BPF_REG_2, 8),            /* r2 = 8 */
4152                 BPF_MOV64_IMM(BPF_REG_3, 0),            /* r3 = 0 */
4153                 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4154                 BPF_EXIT_INSN(),
4155         };
4156
4157         memset(&attr, 0, sizeof(attr));
4158         attr.prog_type = BPF_PROG_TYPE_KPROBE;
4159         attr.insns = insns;
4160         attr.insns_cnt = ARRAY_SIZE(insns);
4161         attr.license = "GPL";
4162
4163         return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
4164 }
4165
4166 static int probe_prog_bind_map(void)
4167 {
4168         struct bpf_load_program_attr prg_attr;
4169         struct bpf_create_map_attr map_attr;
4170         char *cp, errmsg[STRERR_BUFSIZE];
4171         struct bpf_insn insns[] = {
4172                 BPF_MOV64_IMM(BPF_REG_0, 0),
4173                 BPF_EXIT_INSN(),
4174         };
4175         int ret, map, prog;
4176
4177         memset(&map_attr, 0, sizeof(map_attr));
4178         map_attr.map_type = BPF_MAP_TYPE_ARRAY;
4179         map_attr.key_size = sizeof(int);
4180         map_attr.value_size = 32;
4181         map_attr.max_entries = 1;
4182
4183         map = bpf_create_map_xattr(&map_attr);
4184         if (map < 0) {
4185                 ret = -errno;
4186                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4187                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4188                         __func__, cp, -ret);
4189                 return ret;
4190         }
4191
4192         memset(&prg_attr, 0, sizeof(prg_attr));
4193         prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4194         prg_attr.insns = insns;
4195         prg_attr.insns_cnt = ARRAY_SIZE(insns);
4196         prg_attr.license = "GPL";
4197
4198         prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
4199         if (prog < 0) {
4200                 close(map);
4201                 return 0;
4202         }
4203
4204         ret = bpf_prog_bind_map(prog, map, NULL);
4205
4206         close(map);
4207         close(prog);
4208
4209         return ret >= 0;
4210 }
4211
4212 static int probe_module_btf(void)
4213 {
4214         static const char strs[] = "\0int";
4215         __u32 types[] = {
4216                 /* int */
4217                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4218         };
4219         struct bpf_btf_info info;
4220         __u32 len = sizeof(info);
4221         char name[16];
4222         int fd, err;
4223
4224         fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4225         if (fd < 0)
4226                 return 0; /* BTF not supported at all */
4227
4228         memset(&info, 0, sizeof(info));
4229         info.name = ptr_to_u64(name);
4230         info.name_len = sizeof(name);
4231
4232         /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4233          * kernel's module BTF support coincides with support for
4234          * name/name_len fields in struct bpf_btf_info.
4235          */
4236         err = bpf_obj_get_info_by_fd(fd, &info, &len);
4237         close(fd);
4238         return !err;
4239 }
4240
4241 enum kern_feature_result {
4242         FEAT_UNKNOWN = 0,
4243         FEAT_SUPPORTED = 1,
4244         FEAT_MISSING = 2,
4245 };
4246
4247 typedef int (*feature_probe_fn)(void);
4248
4249 static struct kern_feature_desc {
4250         const char *desc;
4251         feature_probe_fn probe;
4252         enum kern_feature_result res;
4253 } feature_probes[__FEAT_CNT] = {
4254         [FEAT_PROG_NAME] = {
4255                 "BPF program name", probe_kern_prog_name,
4256         },
4257         [FEAT_GLOBAL_DATA] = {
4258                 "global variables", probe_kern_global_data,
4259         },
4260         [FEAT_BTF] = {
4261                 "minimal BTF", probe_kern_btf,
4262         },
4263         [FEAT_BTF_FUNC] = {
4264                 "BTF functions", probe_kern_btf_func,
4265         },
4266         [FEAT_BTF_GLOBAL_FUNC] = {
4267                 "BTF global function", probe_kern_btf_func_global,
4268         },
4269         [FEAT_BTF_DATASEC] = {
4270                 "BTF data section and variable", probe_kern_btf_datasec,
4271         },
4272         [FEAT_ARRAY_MMAP] = {
4273                 "ARRAY map mmap()", probe_kern_array_mmap,
4274         },
4275         [FEAT_EXP_ATTACH_TYPE] = {
4276                 "BPF_PROG_LOAD expected_attach_type attribute",
4277                 probe_kern_exp_attach_type,
4278         },
4279         [FEAT_PROBE_READ_KERN] = {
4280                 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4281         },
4282         [FEAT_PROG_BIND_MAP] = {
4283                 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4284         },
4285         [FEAT_MODULE_BTF] = {
4286                 "module BTF support", probe_module_btf,
4287         },
4288         [FEAT_BTF_FLOAT] = {
4289                 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4290         },
4291 };
4292
4293 static bool kernel_supports(enum kern_feature_id feat_id)
4294 {
4295         struct kern_feature_desc *feat = &feature_probes[feat_id];
4296         int ret;
4297
4298         if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4299                 ret = feat->probe();
4300                 if (ret > 0) {
4301                         WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4302                 } else if (ret == 0) {
4303                         WRITE_ONCE(feat->res, FEAT_MISSING);
4304                 } else {
4305                         pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4306                         WRITE_ONCE(feat->res, FEAT_MISSING);
4307                 }
4308         }
4309
4310         return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4311 }
4312
4313 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4314 {
4315         struct bpf_map_info map_info = {};
4316         char msg[STRERR_BUFSIZE];
4317         __u32 map_info_len;
4318
4319         map_info_len = sizeof(map_info);
4320
4321         if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4322                 pr_warn("failed to get map info for map FD %d: %s\n",
4323                         map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4324                 return false;
4325         }
4326
4327         return (map_info.type == map->def.type &&
4328                 map_info.key_size == map->def.key_size &&
4329                 map_info.value_size == map->def.value_size &&
4330                 map_info.max_entries == map->def.max_entries &&
4331                 map_info.map_flags == map->def.map_flags);
4332 }
4333
4334 static int
4335 bpf_object__reuse_map(struct bpf_map *map)
4336 {
4337         char *cp, errmsg[STRERR_BUFSIZE];
4338         int err, pin_fd;
4339
4340         pin_fd = bpf_obj_get(map->pin_path);
4341         if (pin_fd < 0) {
4342                 err = -errno;
4343                 if (err == -ENOENT) {
4344                         pr_debug("found no pinned map to reuse at '%s'\n",
4345                                  map->pin_path);
4346                         return 0;
4347                 }
4348
4349                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4350                 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4351                         map->pin_path, cp);
4352                 return err;
4353         }
4354
4355         if (!map_is_reuse_compat(map, pin_fd)) {
4356                 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4357                         map->pin_path);
4358                 close(pin_fd);
4359                 return -EINVAL;
4360         }
4361
4362         err = bpf_map__reuse_fd(map, pin_fd);
4363         if (err) {
4364                 close(pin_fd);
4365                 return err;
4366         }
4367         map->pinned = true;
4368         pr_debug("reused pinned map at '%s'\n", map->pin_path);
4369
4370         return 0;
4371 }
4372
4373 static int
4374 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4375 {
4376         enum libbpf_map_type map_type = map->libbpf_type;
4377         char *cp, errmsg[STRERR_BUFSIZE];
4378         int err, zero = 0;
4379
4380         err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4381         if (err) {
4382                 err = -errno;
4383                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4384                 pr_warn("Error setting initial map(%s) contents: %s\n",
4385                         map->name, cp);
4386                 return err;
4387         }
4388
4389         /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4390         if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4391                 err = bpf_map_freeze(map->fd);
4392                 if (err) {
4393                         err = -errno;
4394                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4395                         pr_warn("Error freezing map(%s) as read-only: %s\n",
4396                                 map->name, cp);
4397                         return err;
4398                 }
4399         }
4400         return 0;
4401 }
4402
4403 static void bpf_map__destroy(struct bpf_map *map);
4404
4405 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4406 {
4407         struct bpf_create_map_attr create_attr;
4408         struct bpf_map_def *def = &map->def;
4409
4410         memset(&create_attr, 0, sizeof(create_attr));
4411
4412         if (kernel_supports(FEAT_PROG_NAME))
4413                 create_attr.name = map->name;
4414         create_attr.map_ifindex = map->map_ifindex;
4415         create_attr.map_type = def->type;
4416         create_attr.map_flags = def->map_flags;
4417         create_attr.key_size = def->key_size;
4418         create_attr.value_size = def->value_size;
4419         create_attr.numa_node = map->numa_node;
4420
4421         if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4422                 int nr_cpus;
4423
4424                 nr_cpus = libbpf_num_possible_cpus();
4425                 if (nr_cpus < 0) {
4426                         pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4427                                 map->name, nr_cpus);
4428                         return nr_cpus;
4429                 }
4430                 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4431                 create_attr.max_entries = nr_cpus;
4432         } else {
4433                 create_attr.max_entries = def->max_entries;
4434         }
4435
4436         if (bpf_map__is_struct_ops(map))
4437                 create_attr.btf_vmlinux_value_type_id =
4438                         map->btf_vmlinux_value_type_id;
4439
4440         create_attr.btf_fd = 0;
4441         create_attr.btf_key_type_id = 0;
4442         create_attr.btf_value_type_id = 0;
4443         if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4444                 create_attr.btf_fd = btf__fd(obj->btf);
4445                 create_attr.btf_key_type_id = map->btf_key_type_id;
4446                 create_attr.btf_value_type_id = map->btf_value_type_id;
4447         }
4448
4449         if (bpf_map_type__is_map_in_map(def->type)) {
4450                 if (map->inner_map) {
4451                         int err;
4452
4453                         err = bpf_object__create_map(obj, map->inner_map);
4454                         if (err) {
4455                                 pr_warn("map '%s': failed to create inner map: %d\n",
4456                                         map->name, err);
4457                                 return err;
4458                         }
4459                         map->inner_map_fd = bpf_map__fd(map->inner_map);
4460                 }
4461                 if (map->inner_map_fd >= 0)
4462                         create_attr.inner_map_fd = map->inner_map_fd;
4463         }
4464
4465         map->fd = bpf_create_map_xattr(&create_attr);
4466         if (map->fd < 0 && (create_attr.btf_key_type_id ||
4467                             create_attr.btf_value_type_id)) {
4468                 char *cp, errmsg[STRERR_BUFSIZE];
4469                 int err = -errno;
4470
4471                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4472                 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4473                         map->name, cp, err);
4474                 create_attr.btf_fd = 0;
4475                 create_attr.btf_key_type_id = 0;
4476                 create_attr.btf_value_type_id = 0;
4477                 map->btf_key_type_id = 0;
4478                 map->btf_value_type_id = 0;
4479                 map->fd = bpf_create_map_xattr(&create_attr);
4480         }
4481
4482         if (map->fd < 0)
4483                 return -errno;
4484
4485         if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4486                 bpf_map__destroy(map->inner_map);
4487                 zfree(&map->inner_map);
4488         }
4489
4490         return 0;
4491 }
4492
4493 static int init_map_slots(struct bpf_map *map)
4494 {
4495         const struct bpf_map *targ_map;
4496         unsigned int i;
4497         int fd, err;
4498
4499         for (i = 0; i < map->init_slots_sz; i++) {
4500                 if (!map->init_slots[i])
4501                         continue;
4502
4503                 targ_map = map->init_slots[i];
4504                 fd = bpf_map__fd(targ_map);
4505                 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4506                 if (err) {
4507                         err = -errno;
4508                         pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4509                                 map->name, i, targ_map->name,
4510                                 fd, err);
4511                         return err;
4512                 }
4513                 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4514                          map->name, i, targ_map->name, fd);
4515         }
4516
4517         zfree(&map->init_slots);
4518         map->init_slots_sz = 0;
4519
4520         return 0;
4521 }
4522
4523 static int
4524 bpf_object__create_maps(struct bpf_object *obj)
4525 {
4526         struct bpf_map *map;
4527         char *cp, errmsg[STRERR_BUFSIZE];
4528         unsigned int i, j;
4529         int err;
4530
4531         for (i = 0; i < obj->nr_maps; i++) {
4532                 map = &obj->maps[i];
4533
4534                 if (map->pin_path) {
4535                         err = bpf_object__reuse_map(map);
4536                         if (err) {
4537                                 pr_warn("map '%s': error reusing pinned map\n",
4538                                         map->name);
4539                                 goto err_out;
4540                         }
4541                 }
4542
4543                 if (map->fd >= 0) {
4544                         pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4545                                  map->name, map->fd);
4546                 } else {
4547                         err = bpf_object__create_map(obj, map);
4548                         if (err)
4549                                 goto err_out;
4550
4551                         pr_debug("map '%s': created successfully, fd=%d\n",
4552                                  map->name, map->fd);
4553
4554                         if (bpf_map__is_internal(map)) {
4555                                 err = bpf_object__populate_internal_map(obj, map);
4556                                 if (err < 0) {
4557                                         zclose(map->fd);
4558                                         goto err_out;
4559                                 }
4560                         }
4561
4562                         if (map->init_slots_sz) {
4563                                 err = init_map_slots(map);
4564                                 if (err < 0) {
4565                                         zclose(map->fd);
4566                                         goto err_out;
4567                                 }
4568                         }
4569                 }
4570
4571                 if (map->pin_path && !map->pinned) {
4572                         err = bpf_map__pin(map, NULL);
4573                         if (err) {
4574                                 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4575                                         map->name, map->pin_path, err);
4576                                 zclose(map->fd);
4577                                 goto err_out;
4578                         }
4579                 }
4580         }
4581
4582         return 0;
4583
4584 err_out:
4585         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4586         pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4587         pr_perm_msg(err);
4588         for (j = 0; j < i; j++)
4589                 zclose(obj->maps[j].fd);
4590         return err;
4591 }
4592
4593 #define BPF_CORE_SPEC_MAX_LEN 64
4594
4595 /* represents BPF CO-RE field or array element accessor */
4596 struct bpf_core_accessor {
4597         __u32 type_id;          /* struct/union type or array element type */
4598         __u32 idx;              /* field index or array index */
4599         const char *name;       /* field name or NULL for array accessor */
4600 };
4601
4602 struct bpf_core_spec {
4603         const struct btf *btf;
4604         /* high-level spec: named fields and array indices only */
4605         struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
4606         /* original unresolved (no skip_mods_or_typedefs) root type ID */
4607         __u32 root_type_id;
4608         /* CO-RE relocation kind */
4609         enum bpf_core_relo_kind relo_kind;
4610         /* high-level spec length */
4611         int len;
4612         /* raw, low-level spec: 1-to-1 with accessor spec string */
4613         int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4614         /* raw spec length */
4615         int raw_len;
4616         /* field bit offset represented by spec */
4617         __u32 bit_offset;
4618 };
4619
4620 static bool str_is_empty(const char *s)
4621 {
4622         return !s || !s[0];
4623 }
4624
4625 static bool is_flex_arr(const struct btf *btf,
4626                         const struct bpf_core_accessor *acc,
4627                         const struct btf_array *arr)
4628 {
4629         const struct btf_type *t;
4630
4631         /* not a flexible array, if not inside a struct or has non-zero size */
4632         if (!acc->name || arr->nelems > 0)
4633                 return false;
4634
4635         /* has to be the last member of enclosing struct */
4636         t = btf__type_by_id(btf, acc->type_id);
4637         return acc->idx == btf_vlen(t) - 1;
4638 }
4639
4640 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4641 {
4642         switch (kind) {
4643         case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4644         case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4645         case BPF_FIELD_EXISTS: return "field_exists";
4646         case BPF_FIELD_SIGNED: return "signed";
4647         case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4648         case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
4649         case BPF_TYPE_ID_LOCAL: return "local_type_id";
4650         case BPF_TYPE_ID_TARGET: return "target_type_id";
4651         case BPF_TYPE_EXISTS: return "type_exists";
4652         case BPF_TYPE_SIZE: return "type_size";
4653         case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4654         case BPF_ENUMVAL_VALUE: return "enumval_value";
4655         default: return "unknown";
4656         }
4657 }
4658
4659 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4660 {
4661         switch (kind) {
4662         case BPF_FIELD_BYTE_OFFSET:
4663         case BPF_FIELD_BYTE_SIZE:
4664         case BPF_FIELD_EXISTS:
4665         case BPF_FIELD_SIGNED:
4666         case BPF_FIELD_LSHIFT_U64:
4667         case BPF_FIELD_RSHIFT_U64:
4668                 return true;
4669         default:
4670                 return false;
4671         }
4672 }
4673
4674 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4675 {
4676         switch (kind) {
4677         case BPF_TYPE_ID_LOCAL:
4678         case BPF_TYPE_ID_TARGET:
4679         case BPF_TYPE_EXISTS:
4680         case BPF_TYPE_SIZE:
4681                 return true;
4682         default:
4683                 return false;
4684         }
4685 }
4686
4687 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4688 {
4689         switch (kind) {
4690         case BPF_ENUMVAL_EXISTS:
4691         case BPF_ENUMVAL_VALUE:
4692                 return true;
4693         default:
4694                 return false;
4695         }
4696 }
4697
4698 /*
4699  * Turn bpf_core_relo into a low- and high-level spec representation,
4700  * validating correctness along the way, as well as calculating resulting
4701  * field bit offset, specified by accessor string. Low-level spec captures
4702  * every single level of nestedness, including traversing anonymous
4703  * struct/union members. High-level one only captures semantically meaningful
4704  * "turning points": named fields and array indicies.
4705  * E.g., for this case:
4706  *
4707  *   struct sample {
4708  *       int __unimportant;
4709  *       struct {
4710  *           int __1;
4711  *           int __2;
4712  *           int a[7];
4713  *       };
4714  *   };
4715  *
4716  *   struct sample *s = ...;
4717  *
4718  *   int x = &s->a[3]; // access string = '0:1:2:3'
4719  *
4720  * Low-level spec has 1:1 mapping with each element of access string (it's
4721  * just a parsed access string representation): [0, 1, 2, 3].
4722  *
4723  * High-level spec will capture only 3 points:
4724  *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4725  *   - field 'a' access (corresponds to '2' in low-level spec);
4726  *   - array element #3 access (corresponds to '3' in low-level spec).
4727  *
4728  * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4729  * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
4730  * spec and raw_spec are kept empty.
4731  *
4732  * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4733  * string to specify enumerator's value index that need to be relocated.
4734  */
4735 static int bpf_core_parse_spec(const struct btf *btf,
4736                                __u32 type_id,
4737                                const char *spec_str,
4738                                enum bpf_core_relo_kind relo_kind,
4739                                struct bpf_core_spec *spec)
4740 {
4741         int access_idx, parsed_len, i;
4742         struct bpf_core_accessor *acc;
4743         const struct btf_type *t;
4744         const char *name;
4745         __u32 id;
4746         __s64 sz;
4747
4748         if (str_is_empty(spec_str) || *spec_str == ':')
4749                 return -EINVAL;
4750
4751         memset(spec, 0, sizeof(*spec));
4752         spec->btf = btf;
4753         spec->root_type_id = type_id;
4754         spec->relo_kind = relo_kind;
4755
4756         /* type-based relocations don't have a field access string */
4757         if (core_relo_is_type_based(relo_kind)) {
4758                 if (strcmp(spec_str, "0"))
4759                         return -EINVAL;
4760                 return 0;
4761         }
4762
4763         /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
4764         while (*spec_str) {
4765                 if (*spec_str == ':')
4766                         ++spec_str;
4767                 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4768                         return -EINVAL;
4769                 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4770                         return -E2BIG;
4771                 spec_str += parsed_len;
4772                 spec->raw_spec[spec->raw_len++] = access_idx;
4773         }
4774
4775         if (spec->raw_len == 0)
4776                 return -EINVAL;
4777
4778         t = skip_mods_and_typedefs(btf, type_id, &id);
4779         if (!t)
4780                 return -EINVAL;
4781
4782         access_idx = spec->raw_spec[0];
4783         acc = &spec->spec[0];
4784         acc->type_id = id;
4785         acc->idx = access_idx;
4786         spec->len++;
4787
4788         if (core_relo_is_enumval_based(relo_kind)) {
4789                 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4790                         return -EINVAL;
4791
4792                 /* record enumerator name in a first accessor */
4793                 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4794                 return 0;
4795         }
4796
4797         if (!core_relo_is_field_based(relo_kind))
4798                 return -EINVAL;
4799
4800         sz = btf__resolve_size(btf, id);
4801         if (sz < 0)
4802                 return sz;
4803         spec->bit_offset = access_idx * sz * 8;
4804
4805         for (i = 1; i < spec->raw_len; i++) {
4806                 t = skip_mods_and_typedefs(btf, id, &id);
4807                 if (!t)
4808                         return -EINVAL;
4809
4810                 access_idx = spec->raw_spec[i];
4811                 acc = &spec->spec[spec->len];
4812
4813                 if (btf_is_composite(t)) {
4814                         const struct btf_member *m;
4815                         __u32 bit_offset;
4816
4817                         if (access_idx >= btf_vlen(t))
4818                                 return -EINVAL;
4819
4820                         bit_offset = btf_member_bit_offset(t, access_idx);
4821                         spec->bit_offset += bit_offset;
4822
4823                         m = btf_members(t) + access_idx;
4824                         if (m->name_off) {
4825                                 name = btf__name_by_offset(btf, m->name_off);
4826                                 if (str_is_empty(name))
4827                                         return -EINVAL;
4828
4829                                 acc->type_id = id;
4830                                 acc->idx = access_idx;
4831                                 acc->name = name;
4832                                 spec->len++;
4833                         }
4834
4835                         id = m->type;
4836                 } else if (btf_is_array(t)) {
4837                         const struct btf_array *a = btf_array(t);
4838                         bool flex;
4839
4840                         t = skip_mods_and_typedefs(btf, a->type, &id);
4841                         if (!t)
4842                                 return -EINVAL;
4843
4844                         flex = is_flex_arr(btf, acc - 1, a);
4845                         if (!flex && access_idx >= a->nelems)
4846                                 return -EINVAL;
4847
4848                         spec->spec[spec->len].type_id = id;
4849                         spec->spec[spec->len].idx = access_idx;
4850                         spec->len++;
4851
4852                         sz = btf__resolve_size(btf, id);
4853                         if (sz < 0)
4854                                 return sz;
4855                         spec->bit_offset += access_idx * sz * 8;
4856                 } else {
4857                         pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4858                                 type_id, spec_str, i, id, btf_kind_str(t));
4859                         return -EINVAL;
4860                 }
4861         }
4862
4863         return 0;
4864 }
4865
4866 static bool bpf_core_is_flavor_sep(const char *s)
4867 {
4868         /* check X___Y name pattern, where X and Y are not underscores */
4869         return s[0] != '_' &&                                 /* X */
4870                s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
4871                s[4] != '_';                                   /* Y */
4872 }
4873
4874 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
4875  * before last triple underscore. Struct name part after last triple
4876  * underscore is ignored by BPF CO-RE relocation during relocation matching.
4877  */
4878 static size_t bpf_core_essential_name_len(const char *name)
4879 {
4880         size_t n = strlen(name);
4881         int i;
4882
4883         for (i = n - 5; i >= 0; i--) {
4884                 if (bpf_core_is_flavor_sep(name + i))
4885                         return i + 1;
4886         }
4887         return n;
4888 }
4889
4890 struct core_cand
4891 {
4892         const struct btf *btf;
4893         const struct btf_type *t;
4894         const char *name;
4895         __u32 id;
4896 };
4897
4898 /* dynamically sized list of type IDs and its associated struct btf */
4899 struct core_cand_list {
4900         struct core_cand *cands;
4901         int len;
4902 };
4903
4904 static void bpf_core_free_cands(struct core_cand_list *cands)
4905 {
4906         free(cands->cands);
4907         free(cands);
4908 }
4909
4910 static int bpf_core_add_cands(struct core_cand *local_cand,
4911                               size_t local_essent_len,
4912                               const struct btf *targ_btf,
4913                               const char *targ_btf_name,
4914                               int targ_start_id,
4915                               struct core_cand_list *cands)
4916 {
4917         struct core_cand *new_cands, *cand;
4918         const struct btf_type *t;
4919         const char *targ_name;
4920         size_t targ_essent_len;
4921         int n, i;
4922
4923         n = btf__get_nr_types(targ_btf);
4924         for (i = targ_start_id; i <= n; i++) {
4925                 t = btf__type_by_id(targ_btf, i);
4926                 if (btf_kind(t) != btf_kind(local_cand->t))
4927                         continue;
4928
4929                 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4930                 if (str_is_empty(targ_name))
4931                         continue;
4932
4933                 targ_essent_len = bpf_core_essential_name_len(targ_name);
4934                 if (targ_essent_len != local_essent_len)
4935                         continue;
4936
4937                 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
4938                         continue;
4939
4940                 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
4941                          local_cand->id, btf_kind_str(local_cand->t),
4942                          local_cand->name, i, btf_kind_str(t), targ_name,
4943                          targ_btf_name);
4944                 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
4945                                               sizeof(*cands->cands));
4946                 if (!new_cands)
4947                         return -ENOMEM;
4948
4949                 cand = &new_cands[cands->len];
4950                 cand->btf = targ_btf;
4951                 cand->t = t;
4952                 cand->name = targ_name;
4953                 cand->id = i;
4954
4955                 cands->cands = new_cands;
4956                 cands->len++;
4957         }
4958         return 0;
4959 }
4960
4961 static int load_module_btfs(struct bpf_object *obj)
4962 {
4963         struct bpf_btf_info info;
4964         struct module_btf *mod_btf;
4965         struct btf *btf;
4966         char name[64];
4967         __u32 id = 0, len;
4968         int err, fd;
4969
4970         if (obj->btf_modules_loaded)
4971                 return 0;
4972
4973         /* don't do this again, even if we find no module BTFs */
4974         obj->btf_modules_loaded = true;
4975
4976         /* kernel too old to support module BTFs */
4977         if (!kernel_supports(FEAT_MODULE_BTF))
4978                 return 0;
4979
4980         while (true) {
4981                 err = bpf_btf_get_next_id(id, &id);
4982                 if (err && errno == ENOENT)
4983                         return 0;
4984                 if (err) {
4985                         err = -errno;
4986                         pr_warn("failed to iterate BTF objects: %d\n", err);
4987                         return err;
4988                 }
4989
4990                 fd = bpf_btf_get_fd_by_id(id);
4991                 if (fd < 0) {
4992                         if (errno == ENOENT)
4993                                 continue; /* expected race: BTF was unloaded */
4994                         err = -errno;
4995                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
4996                         return err;
4997                 }
4998
4999                 len = sizeof(info);
5000                 memset(&info, 0, sizeof(info));
5001                 info.name = ptr_to_u64(name);
5002                 info.name_len = sizeof(name);
5003
5004                 err = bpf_obj_get_info_by_fd(fd, &info, &len);
5005                 if (err) {
5006                         err = -errno;
5007                         pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5008                         goto err_out;
5009                 }
5010
5011                 /* ignore non-module BTFs */
5012                 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5013                         close(fd);
5014                         continue;
5015                 }
5016
5017                 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5018                 if (IS_ERR(btf)) {
5019                         pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
5020                                 name, id, PTR_ERR(btf));
5021                         err = PTR_ERR(btf);
5022                         goto err_out;
5023                 }
5024
5025                 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5026                                         sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5027                 if (err)
5028                         goto err_out;
5029
5030                 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5031
5032                 mod_btf->btf = btf;
5033                 mod_btf->id = id;
5034                 mod_btf->fd = fd;
5035                 mod_btf->name = strdup(name);
5036                 if (!mod_btf->name) {
5037                         err = -ENOMEM;
5038                         goto err_out;
5039                 }
5040                 continue;
5041
5042 err_out:
5043                 close(fd);
5044                 return err;
5045         }
5046
5047         return 0;
5048 }
5049
5050 static struct core_cand_list *
5051 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5052 {
5053         struct core_cand local_cand = {};
5054         struct core_cand_list *cands;
5055         const struct btf *main_btf;
5056         size_t local_essent_len;
5057         int err, i;
5058
5059         local_cand.btf = local_btf;
5060         local_cand.t = btf__type_by_id(local_btf, local_type_id);
5061         if (!local_cand.t)
5062                 return ERR_PTR(-EINVAL);
5063
5064         local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
5065         if (str_is_empty(local_cand.name))
5066                 return ERR_PTR(-EINVAL);
5067         local_essent_len = bpf_core_essential_name_len(local_cand.name);
5068
5069         cands = calloc(1, sizeof(*cands));
5070         if (!cands)
5071                 return ERR_PTR(-ENOMEM);
5072
5073         /* Attempt to find target candidates in vmlinux BTF first */
5074         main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5075         err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5076         if (err)
5077                 goto err_out;
5078
5079         /* if vmlinux BTF has any candidate, don't got for module BTFs */
5080         if (cands->len)
5081                 return cands;
5082
5083         /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5084         if (obj->btf_vmlinux_override)
5085                 return cands;
5086
5087         /* now look through module BTFs, trying to still find candidates */
5088         err = load_module_btfs(obj);
5089         if (err)
5090                 goto err_out;
5091
5092         for (i = 0; i < obj->btf_module_cnt; i++) {
5093                 err = bpf_core_add_cands(&local_cand, local_essent_len,
5094                                          obj->btf_modules[i].btf,
5095                                          obj->btf_modules[i].name,
5096                                          btf__get_nr_types(obj->btf_vmlinux) + 1,
5097                                          cands);
5098                 if (err)
5099                         goto err_out;
5100         }
5101
5102         return cands;
5103 err_out:
5104         bpf_core_free_cands(cands);
5105         return ERR_PTR(err);
5106 }
5107
5108 /* Check two types for compatibility for the purpose of field access
5109  * relocation. const/volatile/restrict and typedefs are skipped to ensure we
5110  * are relocating semantically compatible entities:
5111  *   - any two STRUCTs/UNIONs are compatible and can be mixed;
5112  *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
5113  *   - any two PTRs are always compatible;
5114  *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
5115  *     least one of enums should be anonymous;
5116  *   - for ENUMs, check sizes, names are ignored;
5117  *   - for INT, size and signedness are ignored;
5118  *   - any two FLOATs are always compatible;
5119  *   - for ARRAY, dimensionality is ignored, element types are checked for
5120  *     compatibility recursively;
5121  *   - everything else shouldn't be ever a target of relocation.
5122  * These rules are not set in stone and probably will be adjusted as we get
5123  * more experience with using BPF CO-RE relocations.
5124  */
5125 static int bpf_core_fields_are_compat(const struct btf *local_btf,
5126                                       __u32 local_id,
5127                                       const struct btf *targ_btf,
5128                                       __u32 targ_id)
5129 {
5130         const struct btf_type *local_type, *targ_type;
5131
5132 recur:
5133         local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5134         targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5135         if (!local_type || !targ_type)
5136                 return -EINVAL;
5137
5138         if (btf_is_composite(local_type) && btf_is_composite(targ_type))
5139                 return 1;
5140         if (btf_kind(local_type) != btf_kind(targ_type))
5141                 return 0;
5142
5143         switch (btf_kind(local_type)) {
5144         case BTF_KIND_PTR:
5145         case BTF_KIND_FLOAT:
5146                 return 1;
5147         case BTF_KIND_FWD:
5148         case BTF_KIND_ENUM: {
5149                 const char *local_name, *targ_name;
5150                 size_t local_len, targ_len;
5151
5152                 local_name = btf__name_by_offset(local_btf,
5153                                                  local_type->name_off);
5154                 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
5155                 local_len = bpf_core_essential_name_len(local_name);
5156                 targ_len = bpf_core_essential_name_len(targ_name);
5157                 /* one of them is anonymous or both w/ same flavor-less names */
5158                 return local_len == 0 || targ_len == 0 ||
5159                        (local_len == targ_len &&
5160                         strncmp(local_name, targ_name, local_len) == 0);
5161         }
5162         case BTF_KIND_INT:
5163                 /* just reject deprecated bitfield-like integers; all other
5164                  * integers are by default compatible between each other
5165                  */
5166                 return btf_int_offset(local_type) == 0 &&
5167                        btf_int_offset(targ_type) == 0;
5168         case BTF_KIND_ARRAY:
5169                 local_id = btf_array(local_type)->type;
5170                 targ_id = btf_array(targ_type)->type;
5171                 goto recur;
5172         default:
5173                 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
5174                         btf_kind(local_type), local_id, targ_id);
5175                 return 0;
5176         }
5177 }
5178
5179 /*
5180  * Given single high-level named field accessor in local type, find
5181  * corresponding high-level accessor for a target type. Along the way,
5182  * maintain low-level spec for target as well. Also keep updating target
5183  * bit offset.
5184  *
5185  * Searching is performed through recursive exhaustive enumeration of all
5186  * fields of a struct/union. If there are any anonymous (embedded)
5187  * structs/unions, they are recursively searched as well. If field with
5188  * desired name is found, check compatibility between local and target types,
5189  * before returning result.
5190  *
5191  * 1 is returned, if field is found.
5192  * 0 is returned if no compatible field is found.
5193  * <0 is returned on error.
5194  */
5195 static int bpf_core_match_member(const struct btf *local_btf,
5196                                  const struct bpf_core_accessor *local_acc,
5197                                  const struct btf *targ_btf,
5198                                  __u32 targ_id,
5199                                  struct bpf_core_spec *spec,
5200                                  __u32 *next_targ_id)
5201 {
5202         const struct btf_type *local_type, *targ_type;
5203         const struct btf_member *local_member, *m;
5204         const char *local_name, *targ_name;
5205         __u32 local_id;
5206         int i, n, found;
5207
5208         targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5209         if (!targ_type)
5210                 return -EINVAL;
5211         if (!btf_is_composite(targ_type))
5212                 return 0;
5213
5214         local_id = local_acc->type_id;
5215         local_type = btf__type_by_id(local_btf, local_id);
5216         local_member = btf_members(local_type) + local_acc->idx;
5217         local_name = btf__name_by_offset(local_btf, local_member->name_off);
5218
5219         n = btf_vlen(targ_type);
5220         m = btf_members(targ_type);
5221         for (i = 0; i < n; i++, m++) {
5222                 __u32 bit_offset;
5223
5224                 bit_offset = btf_member_bit_offset(targ_type, i);
5225
5226                 /* too deep struct/union/array nesting */
5227                 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5228                         return -E2BIG;
5229
5230                 /* speculate this member will be the good one */
5231                 spec->bit_offset += bit_offset;
5232                 spec->raw_spec[spec->raw_len++] = i;
5233
5234                 targ_name = btf__name_by_offset(targ_btf, m->name_off);
5235                 if (str_is_empty(targ_name)) {
5236                         /* embedded struct/union, we need to go deeper */
5237                         found = bpf_core_match_member(local_btf, local_acc,
5238                                                       targ_btf, m->type,
5239                                                       spec, next_targ_id);
5240                         if (found) /* either found or error */
5241                                 return found;
5242                 } else if (strcmp(local_name, targ_name) == 0) {
5243                         /* matching named field */
5244                         struct bpf_core_accessor *targ_acc;
5245
5246                         targ_acc = &spec->spec[spec->len++];
5247                         targ_acc->type_id = targ_id;
5248                         targ_acc->idx = i;
5249                         targ_acc->name = targ_name;
5250
5251                         *next_targ_id = m->type;
5252                         found = bpf_core_fields_are_compat(local_btf,
5253                                                            local_member->type,
5254                                                            targ_btf, m->type);
5255                         if (!found)
5256                                 spec->len--; /* pop accessor */
5257                         return found;
5258                 }
5259                 /* member turned out not to be what we looked for */
5260                 spec->bit_offset -= bit_offset;
5261                 spec->raw_len--;
5262         }
5263
5264         return 0;
5265 }
5266
5267 /* Check local and target types for compatibility. This check is used for
5268  * type-based CO-RE relocations and follow slightly different rules than
5269  * field-based relocations. This function assumes that root types were already
5270  * checked for name match. Beyond that initial root-level name check, names
5271  * are completely ignored. Compatibility rules are as follows:
5272  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5273  *     kind should match for local and target types (i.e., STRUCT is not
5274  *     compatible with UNION);
5275  *   - for ENUMs, the size is ignored;
5276  *   - for INT, size and signedness are ignored;
5277  *   - for ARRAY, dimensionality is ignored, element types are checked for
5278  *     compatibility recursively;
5279  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5280  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5281  *   - FUNC_PROTOs are compatible if they have compatible signature: same
5282  *     number of input args and compatible return and argument types.
5283  * These rules are not set in stone and probably will be adjusted as we get
5284  * more experience with using BPF CO-RE relocations.
5285  */
5286 static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5287                                      const struct btf *targ_btf, __u32 targ_id)
5288 {
5289         const struct btf_type *local_type, *targ_type;
5290         int depth = 32; /* max recursion depth */
5291
5292         /* caller made sure that names match (ignoring flavor suffix) */
5293         local_type = btf__type_by_id(local_btf, local_id);
5294         targ_type = btf__type_by_id(targ_btf, targ_id);
5295         if (btf_kind(local_type) != btf_kind(targ_type))
5296                 return 0;
5297
5298 recur:
5299         depth--;
5300         if (depth < 0)
5301                 return -EINVAL;
5302
5303         local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5304         targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5305         if (!local_type || !targ_type)
5306                 return -EINVAL;
5307
5308         if (btf_kind(local_type) != btf_kind(targ_type))
5309                 return 0;
5310
5311         switch (btf_kind(local_type)) {
5312         case BTF_KIND_UNKN:
5313         case BTF_KIND_STRUCT:
5314         case BTF_KIND_UNION:
5315         case BTF_KIND_ENUM:
5316         case BTF_KIND_FWD:
5317                 return 1;
5318         case BTF_KIND_INT:
5319                 /* just reject deprecated bitfield-like integers; all other
5320                  * integers are by default compatible between each other
5321                  */
5322                 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5323         case BTF_KIND_PTR:
5324                 local_id = local_type->type;
5325                 targ_id = targ_type->type;
5326                 goto recur;
5327         case BTF_KIND_ARRAY:
5328                 local_id = btf_array(local_type)->type;
5329                 targ_id = btf_array(targ_type)->type;
5330                 goto recur;
5331         case BTF_KIND_FUNC_PROTO: {
5332                 struct btf_param *local_p = btf_params(local_type);
5333                 struct btf_param *targ_p = btf_params(targ_type);
5334                 __u16 local_vlen = btf_vlen(local_type);
5335                 __u16 targ_vlen = btf_vlen(targ_type);
5336                 int i, err;
5337
5338                 if (local_vlen != targ_vlen)
5339                         return 0;
5340
5341                 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5342                         skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5343                         skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5344                         err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5345                         if (err <= 0)
5346                                 return err;
5347                 }
5348
5349                 /* tail recurse for return type check */
5350                 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5351                 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5352                 goto recur;
5353         }
5354         default:
5355                 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5356                         btf_kind_str(local_type), local_id, targ_id);
5357                 return 0;
5358         }
5359 }
5360
5361 /*
5362  * Try to match local spec to a target type and, if successful, produce full
5363  * target spec (high-level, low-level + bit offset).
5364  */
5365 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
5366                                const struct btf *targ_btf, __u32 targ_id,
5367                                struct bpf_core_spec *targ_spec)
5368 {
5369         const struct btf_type *targ_type;
5370         const struct bpf_core_accessor *local_acc;
5371         struct bpf_core_accessor *targ_acc;
5372         int i, sz, matched;
5373
5374         memset(targ_spec, 0, sizeof(*targ_spec));
5375         targ_spec->btf = targ_btf;
5376         targ_spec->root_type_id = targ_id;
5377         targ_spec->relo_kind = local_spec->relo_kind;
5378
5379         if (core_relo_is_type_based(local_spec->relo_kind)) {
5380                 return bpf_core_types_are_compat(local_spec->btf,
5381                                                  local_spec->root_type_id,
5382                                                  targ_btf, targ_id);
5383         }
5384
5385         local_acc = &local_spec->spec[0];
5386         targ_acc = &targ_spec->spec[0];
5387
5388         if (core_relo_is_enumval_based(local_spec->relo_kind)) {
5389                 size_t local_essent_len, targ_essent_len;
5390                 const struct btf_enum *e;
5391                 const char *targ_name;
5392
5393                 /* has to resolve to an enum */
5394                 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
5395                 if (!btf_is_enum(targ_type))
5396                         return 0;
5397
5398                 local_essent_len = bpf_core_essential_name_len(local_acc->name);
5399
5400                 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
5401                         targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
5402                         targ_essent_len = bpf_core_essential_name_len(targ_name);
5403                         if (targ_essent_len != local_essent_len)
5404                                 continue;
5405                         if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
5406                                 targ_acc->type_id = targ_id;
5407                                 targ_acc->idx = i;
5408                                 targ_acc->name = targ_name;
5409                                 targ_spec->len++;
5410                                 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5411                                 targ_spec->raw_len++;
5412                                 return 1;
5413                         }
5414                 }
5415                 return 0;
5416         }
5417
5418         if (!core_relo_is_field_based(local_spec->relo_kind))
5419                 return -EINVAL;
5420
5421         for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
5422                 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
5423                                                    &targ_id);
5424                 if (!targ_type)
5425                         return -EINVAL;
5426
5427                 if (local_acc->name) {
5428                         matched = bpf_core_match_member(local_spec->btf,
5429                                                         local_acc,
5430                                                         targ_btf, targ_id,
5431                                                         targ_spec, &targ_id);
5432                         if (matched <= 0)
5433                                 return matched;
5434                 } else {
5435                         /* for i=0, targ_id is already treated as array element
5436                          * type (because it's the original struct), for others
5437                          * we should find array element type first
5438                          */
5439                         if (i > 0) {
5440                                 const struct btf_array *a;
5441                                 bool flex;
5442
5443                                 if (!btf_is_array(targ_type))
5444                                         return 0;
5445
5446                                 a = btf_array(targ_type);
5447                                 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5448                                 if (!flex && local_acc->idx >= a->nelems)
5449                                         return 0;
5450                                 if (!skip_mods_and_typedefs(targ_btf, a->type,
5451                                                             &targ_id))
5452                                         return -EINVAL;
5453                         }
5454
5455                         /* too deep struct/union/array nesting */
5456                         if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5457                                 return -E2BIG;
5458
5459                         targ_acc->type_id = targ_id;
5460                         targ_acc->idx = local_acc->idx;
5461                         targ_acc->name = NULL;
5462                         targ_spec->len++;
5463                         targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5464                         targ_spec->raw_len++;
5465
5466                         sz = btf__resolve_size(targ_btf, targ_id);
5467                         if (sz < 0)
5468                                 return sz;
5469                         targ_spec->bit_offset += local_acc->idx * sz * 8;
5470                 }
5471         }
5472
5473         return 1;
5474 }
5475
5476 static int bpf_core_calc_field_relo(const struct bpf_program *prog,
5477                                     const struct bpf_core_relo *relo,
5478                                     const struct bpf_core_spec *spec,
5479                                     __u32 *val, __u32 *field_sz, __u32 *type_id,
5480                                     bool *validate)
5481 {
5482         const struct bpf_core_accessor *acc;
5483         const struct btf_type *t;
5484         __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
5485         const struct btf_member *m;
5486         const struct btf_type *mt;
5487         bool bitfield;
5488         __s64 sz;
5489
5490         *field_sz = 0;
5491
5492         if (relo->kind == BPF_FIELD_EXISTS) {
5493                 *val = spec ? 1 : 0;
5494                 return 0;
5495         }
5496
5497         if (!spec)
5498                 return -EUCLEAN; /* request instruction poisoning */
5499
5500         acc = &spec->spec[spec->len - 1];
5501         t = btf__type_by_id(spec->btf, acc->type_id);
5502
5503         /* a[n] accessor needs special handling */
5504         if (!acc->name) {
5505                 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5506                         *val = spec->bit_offset / 8;
5507                         /* remember field size for load/store mem size */
5508                         sz = btf__resolve_size(spec->btf, acc->type_id);
5509                         if (sz < 0)
5510                                 return -EINVAL;
5511                         *field_sz = sz;
5512                         *type_id = acc->type_id;
5513                 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5514                         sz = btf__resolve_size(spec->btf, acc->type_id);
5515                         if (sz < 0)
5516                                 return -EINVAL;
5517                         *val = sz;
5518                 } else {
5519                         pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
5520                                 prog->name, relo->kind, relo->insn_off / 8);
5521                         return -EINVAL;
5522                 }
5523                 if (validate)
5524                         *validate = true;
5525                 return 0;
5526         }
5527
5528         m = btf_members(t) + acc->idx;
5529         mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
5530         bit_off = spec->bit_offset;
5531         bit_sz = btf_member_bitfield_size(t, acc->idx);
5532
5533         bitfield = bit_sz > 0;
5534         if (bitfield) {
5535                 byte_sz = mt->size;
5536                 byte_off = bit_off / 8 / byte_sz * byte_sz;
5537                 /* figure out smallest int size necessary for bitfield load */
5538                 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5539                         if (byte_sz >= 8) {
5540                                 /* bitfield can't be read with 64-bit read */
5541                                 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
5542                                         prog->name, relo->kind, relo->insn_off / 8);
5543                                 return -E2BIG;
5544                         }
5545                         byte_sz *= 2;
5546                         byte_off = bit_off / 8 / byte_sz * byte_sz;
5547                 }
5548         } else {
5549                 sz = btf__resolve_size(spec->btf, field_type_id);
5550                 if (sz < 0)
5551                         return -EINVAL;
5552                 byte_sz = sz;
5553                 byte_off = spec->bit_offset / 8;
5554                 bit_sz = byte_sz * 8;
5555         }
5556
5557         /* for bitfields, all the relocatable aspects are ambiguous and we
5558          * might disagree with compiler, so turn off validation of expected
5559          * value, except for signedness
5560          */
5561         if (validate)
5562                 *validate = !bitfield;
5563
5564         switch (relo->kind) {
5565         case BPF_FIELD_BYTE_OFFSET:
5566                 *val = byte_off;
5567                 if (!bitfield) {
5568                         *field_sz = byte_sz;
5569                         *type_id = field_type_id;
5570                 }
5571                 break;
5572         case BPF_FIELD_BYTE_SIZE:
5573                 *val = byte_sz;
5574                 break;
5575         case BPF_FIELD_SIGNED:
5576                 /* enums will be assumed unsigned */
5577                 *val = btf_is_enum(mt) ||
5578                        (btf_int_encoding(mt) & BTF_INT_SIGNED);
5579                 if (validate)
5580                         *validate = true; /* signedness is never ambiguous */
5581                 break;
5582         case BPF_FIELD_LSHIFT_U64:
5583 #if __BYTE_ORDER == __LITTLE_ENDIAN
5584                 *val = 64 - (bit_off + bit_sz - byte_off  * 8);
5585 #else
5586                 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5587 #endif
5588                 break;
5589         case BPF_FIELD_RSHIFT_U64:
5590                 *val = 64 - bit_sz;
5591                 if (validate)
5592                         *validate = true; /* right shift is never ambiguous */
5593                 break;
5594         case BPF_FIELD_EXISTS:
5595         default:
5596                 return -EOPNOTSUPP;
5597         }
5598
5599         return 0;
5600 }
5601
5602 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5603                                    const struct bpf_core_spec *spec,
5604                                    __u32 *val)
5605 {
5606         __s64 sz;
5607
5608         /* type-based relos return zero when target type is not found */
5609         if (!spec) {
5610                 *val = 0;
5611                 return 0;
5612         }
5613
5614         switch (relo->kind) {
5615         case BPF_TYPE_ID_TARGET:
5616                 *val = spec->root_type_id;
5617                 break;
5618         case BPF_TYPE_EXISTS:
5619                 *val = 1;
5620                 break;
5621         case BPF_TYPE_SIZE:
5622                 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5623                 if (sz < 0)
5624                         return -EINVAL;
5625                 *val = sz;
5626                 break;
5627         case BPF_TYPE_ID_LOCAL:
5628         /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
5629         default:
5630                 return -EOPNOTSUPP;
5631         }
5632
5633         return 0;
5634 }
5635
5636 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5637                                       const struct bpf_core_spec *spec,
5638                                       __u32 *val)
5639 {
5640         const struct btf_type *t;
5641         const struct btf_enum *e;
5642
5643         switch (relo->kind) {
5644         case BPF_ENUMVAL_EXISTS:
5645                 *val = spec ? 1 : 0;
5646                 break;
5647         case BPF_ENUMVAL_VALUE:
5648                 if (!spec)
5649                         return -EUCLEAN; /* request instruction poisoning */
5650                 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5651                 e = btf_enum(t) + spec->spec[0].idx;
5652                 *val = e->val;
5653                 break;
5654         default:
5655                 return -EOPNOTSUPP;
5656         }
5657
5658         return 0;
5659 }
5660
5661 struct bpf_core_relo_res
5662 {
5663         /* expected value in the instruction, unless validate == false */
5664         __u32 orig_val;
5665         /* new value that needs to be patched up to */
5666         __u32 new_val;
5667         /* relocation unsuccessful, poison instruction, but don't fail load */
5668         bool poison;
5669         /* some relocations can't be validated against orig_val */
5670         bool validate;
5671         /* for field byte offset relocations or the forms:
5672          *     *(T *)(rX + <off>) = rY
5673          *     rX = *(T *)(rY + <off>),
5674          * we remember original and resolved field size to adjust direct
5675          * memory loads of pointers and integers; this is necessary for 32-bit
5676          * host kernel architectures, but also allows to automatically
5677          * relocate fields that were resized from, e.g., u32 to u64, etc.
5678          */
5679         bool fail_memsz_adjust;
5680         __u32 orig_sz;
5681         __u32 orig_type_id;
5682         __u32 new_sz;
5683         __u32 new_type_id;
5684 };
5685
5686 /* Calculate original and target relocation values, given local and target
5687  * specs and relocation kind. These values are calculated for each candidate.
5688  * If there are multiple candidates, resulting values should all be consistent
5689  * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
5690  * If instruction has to be poisoned, *poison will be set to true.
5691  */
5692 static int bpf_core_calc_relo(const struct bpf_program *prog,
5693                               const struct bpf_core_relo *relo,
5694                               int relo_idx,
5695                               const struct bpf_core_spec *local_spec,
5696                               const struct bpf_core_spec *targ_spec,
5697                               struct bpf_core_relo_res *res)
5698 {
5699         int err = -EOPNOTSUPP;
5700
5701         res->orig_val = 0;
5702         res->new_val = 0;
5703         res->poison = false;
5704         res->validate = true;
5705         res->fail_memsz_adjust = false;
5706         res->orig_sz = res->new_sz = 0;
5707         res->orig_type_id = res->new_type_id = 0;
5708
5709         if (core_relo_is_field_based(relo->kind)) {
5710                 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5711                                                &res->orig_val, &res->orig_sz,
5712                                                &res->orig_type_id, &res->validate);
5713                 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5714                                                       &res->new_val, &res->new_sz,
5715                                                       &res->new_type_id, NULL);
5716                 if (err)
5717                         goto done;
5718                 /* Validate if it's safe to adjust load/store memory size.
5719                  * Adjustments are performed only if original and new memory
5720                  * sizes differ.
5721                  */
5722                 res->fail_memsz_adjust = false;
5723                 if (res->orig_sz != res->new_sz) {
5724                         const struct btf_type *orig_t, *new_t;
5725
5726                         orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5727                         new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5728
5729                         /* There are two use cases in which it's safe to
5730                          * adjust load/store's mem size:
5731                          *   - reading a 32-bit kernel pointer, while on BPF
5732                          *   size pointers are always 64-bit; in this case
5733                          *   it's safe to "downsize" instruction size due to
5734                          *   pointer being treated as unsigned integer with
5735                          *   zero-extended upper 32-bits;
5736                          *   - reading unsigned integers, again due to
5737                          *   zero-extension is preserving the value correctly.
5738                          *
5739                          * In all other cases it's incorrect to attempt to
5740                          * load/store field because read value will be
5741                          * incorrect, so we poison relocated instruction.
5742                          */
5743                         if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5744                                 goto done;
5745                         if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5746                             btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5747                             btf_int_encoding(new_t) != BTF_INT_SIGNED)
5748                                 goto done;
5749
5750                         /* mark as invalid mem size adjustment, but this will
5751                          * only be checked for LDX/STX/ST insns
5752                          */
5753                         res->fail_memsz_adjust = true;
5754                 }
5755         } else if (core_relo_is_type_based(relo->kind)) {
5756                 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5757                 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
5758         } else if (core_relo_is_enumval_based(relo->kind)) {
5759                 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5760                 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
5761         }
5762
5763 done:
5764         if (err == -EUCLEAN) {
5765                 /* EUCLEAN is used to signal instruction poisoning request */
5766                 res->poison = true;
5767                 err = 0;
5768         } else if (err == -EOPNOTSUPP) {
5769                 /* EOPNOTSUPP means unknown/unsupported relocation */
5770                 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
5771                         prog->name, relo_idx, core_relo_kind_str(relo->kind),
5772                         relo->kind, relo->insn_off / 8);
5773         }
5774
5775         return err;
5776 }
5777
5778 /*
5779  * Turn instruction for which CO_RE relocation failed into invalid one with
5780  * distinct signature.
5781  */
5782 static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5783                                  int insn_idx, struct bpf_insn *insn)
5784 {
5785         pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
5786                  prog->name, relo_idx, insn_idx);
5787         insn->code = BPF_JMP | BPF_CALL;
5788         insn->dst_reg = 0;
5789         insn->src_reg = 0;
5790         insn->off = 0;
5791         /* if this instruction is reachable (not a dead code),
5792          * verifier will complain with the following message:
5793          * invalid func unknown#195896080
5794          */
5795         insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
5796 }
5797
5798 static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5799 {
5800         switch (BPF_SIZE(insn->code)) {
5801         case BPF_DW: return 8;
5802         case BPF_W: return 4;
5803         case BPF_H: return 2;
5804         case BPF_B: return 1;
5805         default: return -1;
5806         }
5807 }
5808
5809 static int insn_bytes_to_bpf_size(__u32 sz)
5810 {
5811         switch (sz) {
5812         case 8: return BPF_DW;
5813         case 4: return BPF_W;
5814         case 2: return BPF_H;
5815         case 1: return BPF_B;
5816         default: return -1;
5817         }
5818 }
5819
5820 /*
5821  * Patch relocatable BPF instruction.
5822  *
5823  * Patched value is determined by relocation kind and target specification.
5824  * For existence relocations target spec will be NULL if field/type is not found.
5825  * Expected insn->imm value is determined using relocation kind and local
5826  * spec, and is checked before patching instruction. If actual insn->imm value
5827  * is wrong, bail out with error.
5828  *
5829  * Currently supported classes of BPF instruction are:
5830  * 1. rX = <imm> (assignment with immediate operand);
5831  * 2. rX += <imm> (arithmetic operations with immediate operand);
5832  * 3. rX = <imm64> (load with 64-bit immediate value);
5833  * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
5834  * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
5835  * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
5836  */
5837 static int bpf_core_patch_insn(struct bpf_program *prog,
5838                                const struct bpf_core_relo *relo,
5839                                int relo_idx,
5840                                const struct bpf_core_relo_res *res)
5841 {
5842         __u32 orig_val, new_val;
5843         struct bpf_insn *insn;
5844         int insn_idx;
5845         __u8 class;
5846
5847         if (relo->insn_off % BPF_INSN_SZ)
5848                 return -EINVAL;
5849         insn_idx = relo->insn_off / BPF_INSN_SZ;
5850         /* adjust insn_idx from section frame of reference to the local
5851          * program's frame of reference; (sub-)program code is not yet
5852          * relocated, so it's enough to just subtract in-section offset
5853          */
5854         insn_idx = insn_idx - prog->sec_insn_off;
5855         insn = &prog->insns[insn_idx];
5856         class = BPF_CLASS(insn->code);
5857
5858         if (res->poison) {
5859 poison:
5860                 /* poison second part of ldimm64 to avoid confusing error from
5861                  * verifier about "unknown opcode 00"
5862                  */
5863                 if (is_ldimm64_insn(insn))
5864                         bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
5865                 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
5866                 return 0;
5867         }
5868
5869         orig_val = res->orig_val;
5870         new_val = res->new_val;
5871
5872         switch (class) {
5873         case BPF_ALU:
5874         case BPF_ALU64:
5875                 if (BPF_SRC(insn->code) != BPF_K)
5876                         return -EINVAL;
5877                 if (res->validate && insn->imm != orig_val) {
5878                         pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
5879                                 prog->name, relo_idx,
5880                                 insn_idx, insn->imm, orig_val, new_val);
5881                         return -EINVAL;
5882                 }
5883                 orig_val = insn->imm;
5884                 insn->imm = new_val;
5885                 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
5886                          prog->name, relo_idx, insn_idx,
5887                          orig_val, new_val);
5888                 break;
5889         case BPF_LDX:
5890         case BPF_ST:
5891         case BPF_STX:
5892                 if (res->validate && insn->off != orig_val) {
5893                         pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
5894                                 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
5895                         return -EINVAL;
5896                 }
5897                 if (new_val > SHRT_MAX) {
5898                         pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
5899                                 prog->name, relo_idx, insn_idx, new_val);
5900                         return -ERANGE;
5901                 }
5902                 if (res->fail_memsz_adjust) {
5903                         pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5904                                 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5905                                 prog->name, relo_idx, insn_idx);
5906                         goto poison;
5907                 }
5908
5909                 orig_val = insn->off;
5910                 insn->off = new_val;
5911                 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
5912                          prog->name, relo_idx, insn_idx, orig_val, new_val);
5913
5914                 if (res->new_sz != res->orig_sz) {
5915                         int insn_bytes_sz, insn_bpf_sz;
5916
5917                         insn_bytes_sz = insn_bpf_size_to_bytes(insn);
5918                         if (insn_bytes_sz != res->orig_sz) {
5919                                 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
5920                                         prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
5921                                 return -EINVAL;
5922                         }
5923
5924                         insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
5925                         if (insn_bpf_sz < 0) {
5926                                 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
5927                                         prog->name, relo_idx, insn_idx, res->new_sz);
5928                                 return -EINVAL;
5929                         }
5930
5931                         insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
5932                         pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
5933                                  prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
5934                 }
5935                 break;
5936         case BPF_LD: {
5937                 __u64 imm;
5938
5939                 if (!is_ldimm64_insn(insn) ||
5940                     insn[0].src_reg != 0 || insn[0].off != 0 ||
5941                     insn_idx + 1 >= prog->insns_cnt ||
5942                     insn[1].code != 0 || insn[1].dst_reg != 0 ||
5943                     insn[1].src_reg != 0 || insn[1].off != 0) {
5944                         pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
5945                                 prog->name, relo_idx, insn_idx);
5946                         return -EINVAL;
5947                 }
5948
5949                 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
5950                 if (res->validate && imm != orig_val) {
5951                         pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
5952                                 prog->name, relo_idx,
5953                                 insn_idx, (unsigned long long)imm,
5954                                 orig_val, new_val);
5955                         return -EINVAL;
5956                 }
5957
5958                 insn[0].imm = new_val;
5959                 insn[1].imm = 0; /* currently only 32-bit values are supported */
5960                 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
5961                          prog->name, relo_idx, insn_idx,
5962                          (unsigned long long)imm, new_val);
5963                 break;
5964         }
5965         default:
5966                 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
5967                         prog->name, relo_idx, insn_idx, insn->code,
5968                         insn->src_reg, insn->dst_reg, insn->off, insn->imm);
5969                 return -EINVAL;
5970         }
5971
5972         return 0;
5973 }
5974
5975 /* Output spec definition in the format:
5976  * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5977  * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5978  */
5979 static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
5980 {
5981         const struct btf_type *t;
5982         const struct btf_enum *e;
5983         const char *s;
5984         __u32 type_id;
5985         int i;
5986
5987         type_id = spec->root_type_id;
5988         t = btf__type_by_id(spec->btf, type_id);
5989         s = btf__name_by_offset(spec->btf, t->name_off);
5990
5991         libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
5992
5993         if (core_relo_is_type_based(spec->relo_kind))
5994                 return;
5995
5996         if (core_relo_is_enumval_based(spec->relo_kind)) {
5997                 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
5998                 e = btf_enum(t) + spec->raw_spec[0];
5999                 s = btf__name_by_offset(spec->btf, e->name_off);
6000
6001                 libbpf_print(level, "::%s = %u", s, e->val);
6002                 return;
6003         }
6004
6005         if (core_relo_is_field_based(spec->relo_kind)) {
6006                 for (i = 0; i < spec->len; i++) {
6007                         if (spec->spec[i].name)
6008                                 libbpf_print(level, ".%s", spec->spec[i].name);
6009                         else if (i > 0 || spec->spec[i].idx > 0)
6010                                 libbpf_print(level, "[%u]", spec->spec[i].idx);
6011                 }
6012
6013                 libbpf_print(level, " (");
6014                 for (i = 0; i < spec->raw_len; i++)
6015                         libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
6016
6017                 if (spec->bit_offset % 8)
6018                         libbpf_print(level, " @ offset %u.%u)",
6019                                      spec->bit_offset / 8, spec->bit_offset % 8);
6020                 else
6021                         libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
6022                 return;
6023         }
6024 }
6025
6026 static size_t bpf_core_hash_fn(const void *key, void *ctx)
6027 {
6028         return (size_t)key;
6029 }
6030
6031 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
6032 {
6033         return k1 == k2;
6034 }
6035
6036 static void *u32_as_hash_key(__u32 x)
6037 {
6038         return (void *)(uintptr_t)x;
6039 }
6040
6041 /*
6042  * CO-RE relocate single instruction.
6043  *
6044  * The outline and important points of the algorithm:
6045  * 1. For given local type, find corresponding candidate target types.
6046  *    Candidate type is a type with the same "essential" name, ignoring
6047  *    everything after last triple underscore (___). E.g., `sample`,
6048  *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
6049  *    for each other. Names with triple underscore are referred to as
6050  *    "flavors" and are useful, among other things, to allow to
6051  *    specify/support incompatible variations of the same kernel struct, which
6052  *    might differ between different kernel versions and/or build
6053  *    configurations.
6054  *
6055  *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
6056  *    converter, when deduplicated BTF of a kernel still contains more than
6057  *    one different types with the same name. In that case, ___2, ___3, etc
6058  *    are appended starting from second name conflict. But start flavors are
6059  *    also useful to be defined "locally", in BPF program, to extract same
6060  *    data from incompatible changes between different kernel
6061  *    versions/configurations. For instance, to handle field renames between
6062  *    kernel versions, one can use two flavors of the struct name with the
6063  *    same common name and use conditional relocations to extract that field,
6064  *    depending on target kernel version.
6065  * 2. For each candidate type, try to match local specification to this
6066  *    candidate target type. Matching involves finding corresponding
6067  *    high-level spec accessors, meaning that all named fields should match,
6068  *    as well as all array accesses should be within the actual bounds. Also,
6069  *    types should be compatible (see bpf_core_fields_are_compat for details).
6070  * 3. It is supported and expected that there might be multiple flavors
6071  *    matching the spec. As long as all the specs resolve to the same set of
6072  *    offsets across all candidates, there is no error. If there is any
6073  *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
6074  *    imprefection of BTF deduplication, which can cause slight duplication of
6075  *    the same BTF type, if some directly or indirectly referenced (by
6076  *    pointer) type gets resolved to different actual types in different
6077  *    object files. If such situation occurs, deduplicated BTF will end up
6078  *    with two (or more) structurally identical types, which differ only in
6079  *    types they refer to through pointer. This should be OK in most cases and
6080  *    is not an error.
6081  * 4. Candidate types search is performed by linearly scanning through all
6082  *    types in target BTF. It is anticipated that this is overall more
6083  *    efficient memory-wise and not significantly worse (if not better)
6084  *    CPU-wise compared to prebuilding a map from all local type names to
6085  *    a list of candidate type names. It's also sped up by caching resolved
6086  *    list of matching candidates per each local "root" type ID, that has at
6087  *    least one bpf_core_relo associated with it. This list is shared
6088  *    between multiple relocations for the same type ID and is updated as some
6089  *    of the candidates are pruned due to structural incompatibility.
6090  */
6091 static int bpf_core_apply_relo(struct bpf_program *prog,
6092                                const struct bpf_core_relo *relo,
6093                                int relo_idx,
6094                                const struct btf *local_btf,
6095                                struct hashmap *cand_cache)
6096 {
6097         struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
6098         const void *type_key = u32_as_hash_key(relo->type_id);
6099         struct bpf_core_relo_res cand_res, targ_res;
6100         const struct btf_type *local_type;
6101         const char *local_name;
6102         struct core_cand_list *cands = NULL;
6103         __u32 local_id;
6104         const char *spec_str;
6105         int i, j, err;
6106
6107         local_id = relo->type_id;
6108         local_type = btf__type_by_id(local_btf, local_id);
6109         if (!local_type)
6110                 return -EINVAL;
6111
6112         local_name = btf__name_by_offset(local_btf, local_type->name_off);
6113         if (!local_name)
6114                 return -EINVAL;
6115
6116         spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
6117         if (str_is_empty(spec_str))
6118                 return -EINVAL;
6119
6120         err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
6121         if (err) {
6122                 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
6123                         prog->name, relo_idx, local_id, btf_kind_str(local_type),
6124                         str_is_empty(local_name) ? "<anon>" : local_name,
6125                         spec_str, err);
6126                 return -EINVAL;
6127         }
6128
6129         pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
6130                  relo_idx, core_relo_kind_str(relo->kind), relo->kind);
6131         bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
6132         libbpf_print(LIBBPF_DEBUG, "\n");
6133
6134         /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
6135         if (relo->kind == BPF_TYPE_ID_LOCAL) {
6136                 targ_res.validate = true;
6137                 targ_res.poison = false;
6138                 targ_res.orig_val = local_spec.root_type_id;
6139                 targ_res.new_val = local_spec.root_type_id;
6140                 goto patch_insn;
6141         }
6142
6143         /* libbpf doesn't support candidate search for anonymous types */
6144         if (str_is_empty(spec_str)) {
6145                 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
6146                         prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
6147                 return -EOPNOTSUPP;
6148         }
6149
6150         if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
6151                 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
6152                 if (IS_ERR(cands)) {
6153                         pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
6154                                 prog->name, relo_idx, local_id, btf_kind_str(local_type),
6155                                 local_name, PTR_ERR(cands));
6156                         return PTR_ERR(cands);
6157                 }
6158                 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
6159                 if (err) {
6160                         bpf_core_free_cands(cands);
6161                         return err;
6162                 }
6163         }
6164
6165         for (i = 0, j = 0; i < cands->len; i++) {
6166                 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
6167                                           cands->cands[i].id, &cand_spec);
6168                 if (err < 0) {
6169                         pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
6170                                 prog->name, relo_idx, i);
6171                         bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
6172                         libbpf_print(LIBBPF_WARN, ": %d\n", err);
6173                         return err;
6174                 }
6175
6176                 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
6177                          relo_idx, err == 0 ? "non-matching" : "matching", i);
6178                 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
6179                 libbpf_print(LIBBPF_DEBUG, "\n");
6180
6181                 if (err == 0)
6182                         continue;
6183
6184                 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
6185                 if (err)
6186                         return err;
6187
6188                 if (j == 0) {
6189                         targ_res = cand_res;
6190                         targ_spec = cand_spec;
6191                 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
6192                         /* if there are many field relo candidates, they
6193                          * should all resolve to the same bit offset
6194                          */
6195                         pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
6196                                 prog->name, relo_idx, cand_spec.bit_offset,
6197                                 targ_spec.bit_offset);
6198                         return -EINVAL;
6199                 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
6200                         /* all candidates should result in the same relocation
6201                          * decision and value, otherwise it's dangerous to
6202                          * proceed due to ambiguity
6203                          */
6204                         pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
6205                                 prog->name, relo_idx,
6206                                 cand_res.poison ? "failure" : "success", cand_res.new_val,
6207                                 targ_res.poison ? "failure" : "success", targ_res.new_val);
6208                         return -EINVAL;
6209                 }
6210
6211                 cands->cands[j++] = cands->cands[i];
6212         }
6213
6214         /*
6215          * For BPF_FIELD_EXISTS relo or when used BPF program has field
6216          * existence checks or kernel version/config checks, it's expected
6217          * that we might not find any candidates. In this case, if field
6218          * wasn't found in any candidate, the list of candidates shouldn't
6219          * change at all, we'll just handle relocating appropriately,
6220          * depending on relo's kind.
6221          */
6222         if (j > 0)
6223                 cands->len = j;
6224
6225         /*
6226          * If no candidates were found, it might be both a programmer error,
6227          * as well as expected case, depending whether instruction w/
6228          * relocation is guarded in some way that makes it unreachable (dead
6229          * code) if relocation can't be resolved. This is handled in
6230          * bpf_core_patch_insn() uniformly by replacing that instruction with
6231          * BPF helper call insn (using invalid helper ID). If that instruction
6232          * is indeed unreachable, then it will be ignored and eliminated by
6233          * verifier. If it was an error, then verifier will complain and point
6234          * to a specific instruction number in its log.
6235          */
6236         if (j == 0) {
6237                 pr_debug("prog '%s': relo #%d: no matching targets found\n",
6238                          prog->name, relo_idx);
6239
6240                 /* calculate single target relo result explicitly */
6241                 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
6242                 if (err)
6243                         return err;
6244         }
6245
6246 patch_insn:
6247         /* bpf_core_patch_insn() should know how to handle missing targ_spec */
6248         err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
6249         if (err) {
6250                 pr_warn("prog '%s': relo #%d: failed to patch insn #%zu: %d\n",
6251                         prog->name, relo_idx, relo->insn_off / BPF_INSN_SZ, err);
6252                 return -EINVAL;
6253         }
6254
6255         return 0;
6256 }
6257
6258 static int
6259 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6260 {
6261         const struct btf_ext_info_sec *sec;
6262         const struct bpf_core_relo *rec;
6263         const struct btf_ext_info *seg;
6264         struct hashmap_entry *entry;
6265         struct hashmap *cand_cache = NULL;
6266         struct bpf_program *prog;
6267         const char *sec_name;
6268         int i, err = 0, insn_idx, sec_idx;
6269
6270         if (obj->btf_ext->core_relo_info.len == 0)
6271                 return 0;
6272
6273         if (targ_btf_path) {
6274                 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6275                 if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
6276                         err = PTR_ERR(obj->btf_vmlinux_override);
6277                         pr_warn("failed to parse target BTF: %d\n", err);
6278                         return err;
6279                 }
6280         }
6281
6282         cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6283         if (IS_ERR(cand_cache)) {
6284                 err = PTR_ERR(cand_cache);
6285                 goto out;
6286         }
6287
6288         seg = &obj->btf_ext->core_relo_info;
6289         for_each_btf_ext_sec(seg, sec) {
6290                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6291                 if (str_is_empty(sec_name)) {
6292                         err = -EINVAL;
6293                         goto out;
6294                 }
6295                 /* bpf_object's ELF is gone by now so it's not easy to find
6296                  * section index by section name, but we can find *any*
6297                  * bpf_program within desired section name and use it's
6298                  * prog->sec_idx to do a proper search by section index and
6299                  * instruction offset
6300                  */
6301                 prog = NULL;
6302                 for (i = 0; i < obj->nr_programs; i++) {
6303                         prog = &obj->programs[i];
6304                         if (strcmp(prog->sec_name, sec_name) == 0)
6305                                 break;
6306                 }
6307                 if (!prog) {
6308                         pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
6309                         return -ENOENT;
6310                 }
6311                 sec_idx = prog->sec_idx;
6312
6313                 pr_debug("sec '%s': found %d CO-RE relocations\n",
6314                          sec_name, sec->num_info);
6315
6316                 for_each_btf_ext_rec(seg, sec, i, rec) {
6317                         insn_idx = rec->insn_off / BPF_INSN_SZ;
6318                         prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6319                         if (!prog) {
6320                                 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
6321                                         sec_name, insn_idx, i);
6322                                 err = -EINVAL;
6323                                 goto out;
6324                         }
6325                         /* no need to apply CO-RE relocation if the program is
6326                          * not going to be loaded
6327                          */
6328                         if (!prog->load)
6329                                 continue;
6330
6331                         err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
6332                         if (err) {
6333                                 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
6334                                         prog->name, i, err);
6335                                 goto out;
6336                         }
6337                 }
6338         }
6339
6340 out:
6341         /* obj->btf_vmlinux and module BTFs are freed after object load */
6342         btf__free(obj->btf_vmlinux_override);
6343         obj->btf_vmlinux_override = NULL;
6344
6345         if (!IS_ERR_OR_NULL(cand_cache)) {
6346                 hashmap__for_each_entry(cand_cache, entry, i) {
6347                         bpf_core_free_cands(entry->value);
6348                 }
6349                 hashmap__free(cand_cache);
6350         }
6351         return err;
6352 }
6353
6354 /* Relocate data references within program code:
6355  *  - map references;
6356  *  - global variable references;
6357  *  - extern references.
6358  */
6359 static int
6360 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6361 {
6362         int i;
6363
6364         for (i = 0; i < prog->nr_reloc; i++) {
6365                 struct reloc_desc *relo = &prog->reloc_desc[i];
6366                 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6367                 struct extern_desc *ext;
6368
6369                 switch (relo->type) {
6370                 case RELO_LD64:
6371                         insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6372                         insn[0].imm = obj->maps[relo->map_idx].fd;
6373                         break;
6374                 case RELO_DATA:
6375                         insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6376                         insn[1].imm = insn[0].imm + relo->sym_off;
6377                         insn[0].imm = obj->maps[relo->map_idx].fd;
6378                         break;
6379                 case RELO_EXTERN_VAR:
6380                         ext = &obj->externs[relo->sym_off];
6381                         if (ext->type == EXT_KCFG) {
6382                                 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6383                                 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6384                                 insn[1].imm = ext->kcfg.data_off;
6385                         } else /* EXT_KSYM */ {
6386                                 if (ext->ksym.type_id) { /* typed ksyms */
6387                                         insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6388                                         insn[0].imm = ext->ksym.kernel_btf_id;
6389                                         insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6390                                 } else { /* typeless ksyms */
6391                                         insn[0].imm = (__u32)ext->ksym.addr;
6392                                         insn[1].imm = ext->ksym.addr >> 32;
6393                                 }
6394                         }
6395                         break;
6396                 case RELO_EXTERN_FUNC:
6397                         ext = &obj->externs[relo->sym_off];
6398                         insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
6399                         insn[0].imm = ext->ksym.kernel_btf_id;
6400                         break;
6401                 case RELO_SUBPROG_ADDR:
6402                         insn[0].src_reg = BPF_PSEUDO_FUNC;
6403                         /* will be handled as a follow up pass */
6404                         break;
6405                 case RELO_CALL:
6406                         /* will be handled as a follow up pass */
6407                         break;
6408                 default:
6409                         pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6410                                 prog->name, i, relo->type);
6411                         return -EINVAL;
6412                 }
6413         }
6414
6415         return 0;
6416 }
6417
6418 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6419                                     const struct bpf_program *prog,
6420                                     const struct btf_ext_info *ext_info,
6421                                     void **prog_info, __u32 *prog_rec_cnt,
6422                                     __u32 *prog_rec_sz)
6423 {
6424         void *copy_start = NULL, *copy_end = NULL;
6425         void *rec, *rec_end, *new_prog_info;
6426         const struct btf_ext_info_sec *sec;
6427         size_t old_sz, new_sz;
6428         const char *sec_name;
6429         int i, off_adj;
6430
6431         for_each_btf_ext_sec(ext_info, sec) {
6432                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6433                 if (!sec_name)
6434                         return -EINVAL;
6435                 if (strcmp(sec_name, prog->sec_name) != 0)
6436                         continue;
6437
6438                 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6439                         __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6440
6441                         if (insn_off < prog->sec_insn_off)
6442                                 continue;
6443                         if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6444                                 break;
6445
6446                         if (!copy_start)
6447                                 copy_start = rec;
6448                         copy_end = rec + ext_info->rec_size;
6449                 }
6450
6451                 if (!copy_start)
6452                         return -ENOENT;
6453
6454                 /* append func/line info of a given (sub-)program to the main
6455                  * program func/line info
6456                  */
6457                 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6458                 new_sz = old_sz + (copy_end - copy_start);
6459                 new_prog_info = realloc(*prog_info, new_sz);
6460                 if (!new_prog_info)
6461                         return -ENOMEM;
6462                 *prog_info = new_prog_info;
6463                 *prog_rec_cnt = new_sz / ext_info->rec_size;
6464                 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6465
6466                 /* Kernel instruction offsets are in units of 8-byte
6467                  * instructions, while .BTF.ext instruction offsets generated
6468                  * by Clang are in units of bytes. So convert Clang offsets
6469                  * into kernel offsets and adjust offset according to program
6470                  * relocated position.
6471                  */
6472                 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6473                 rec = new_prog_info + old_sz;
6474                 rec_end = new_prog_info + new_sz;
6475                 for (; rec < rec_end; rec += ext_info->rec_size) {
6476                         __u32 *insn_off = rec;
6477
6478                         *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6479                 }
6480                 *prog_rec_sz = ext_info->rec_size;
6481                 return 0;
6482         }
6483
6484         return -ENOENT;
6485 }
6486
6487 static int
6488 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6489                               struct bpf_program *main_prog,
6490                               const struct bpf_program *prog)
6491 {
6492         int err;
6493
6494         /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6495          * supprot func/line info
6496          */
6497         if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6498                 return 0;
6499
6500         /* only attempt func info relocation if main program's func_info
6501          * relocation was successful
6502          */
6503         if (main_prog != prog && !main_prog->func_info)
6504                 goto line_info;
6505
6506         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6507                                        &main_prog->func_info,
6508                                        &main_prog->func_info_cnt,
6509                                        &main_prog->func_info_rec_size);
6510         if (err) {
6511                 if (err != -ENOENT) {
6512                         pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6513                                 prog->name, err);
6514                         return err;
6515                 }
6516                 if (main_prog->func_info) {
6517                         /*
6518                          * Some info has already been found but has problem
6519                          * in the last btf_ext reloc. Must have to error out.
6520                          */
6521                         pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6522                         return err;
6523                 }
6524                 /* Have problem loading the very first info. Ignore the rest. */
6525                 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6526                         prog->name);
6527         }
6528
6529 line_info:
6530         /* don't relocate line info if main program's relocation failed */
6531         if (main_prog != prog && !main_prog->line_info)
6532                 return 0;
6533
6534         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6535                                        &main_prog->line_info,
6536                                        &main_prog->line_info_cnt,
6537                                        &main_prog->line_info_rec_size);
6538         if (err) {
6539                 if (err != -ENOENT) {
6540                         pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6541                                 prog->name, err);
6542                         return err;
6543                 }
6544                 if (main_prog->line_info) {
6545                         /*
6546                          * Some info has already been found but has problem
6547                          * in the last btf_ext reloc. Must have to error out.
6548                          */
6549                         pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6550                         return err;
6551                 }
6552                 /* Have problem loading the very first info. Ignore the rest. */
6553                 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6554                         prog->name);
6555         }
6556         return 0;
6557 }
6558
6559 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6560 {
6561         size_t insn_idx = *(const size_t *)key;
6562         const struct reloc_desc *relo = elem;
6563
6564         if (insn_idx == relo->insn_idx)
6565                 return 0;
6566         return insn_idx < relo->insn_idx ? -1 : 1;
6567 }
6568
6569 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6570 {
6571         return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6572                        sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6573 }
6574
6575 static int
6576 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6577                        struct bpf_program *prog)
6578 {
6579         size_t sub_insn_idx, insn_idx, new_cnt;
6580         struct bpf_program *subprog;
6581         struct bpf_insn *insns, *insn;
6582         struct reloc_desc *relo;
6583         int err;
6584
6585         err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6586         if (err)
6587                 return err;
6588
6589         for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6590                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6591                 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6592                         continue;
6593
6594                 relo = find_prog_insn_relo(prog, insn_idx);
6595                 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6596                         pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6597                                 prog->name, insn_idx, relo->type);
6598                         return -LIBBPF_ERRNO__RELOC;
6599                 }
6600                 if (relo) {
6601                         /* sub-program instruction index is a combination of
6602                          * an offset of a symbol pointed to by relocation and
6603                          * call instruction's imm field; for global functions,
6604                          * call always has imm = -1, but for static functions
6605                          * relocation is against STT_SECTION and insn->imm
6606                          * points to a start of a static function
6607                          *
6608                          * for subprog addr relocation, the relo->sym_off + insn->imm is
6609                          * the byte offset in the corresponding section.
6610                          */
6611                         if (relo->type == RELO_CALL)
6612                                 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6613                         else
6614                                 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6615                 } else if (insn_is_pseudo_func(insn)) {
6616                         /*
6617                          * RELO_SUBPROG_ADDR relo is always emitted even if both
6618                          * functions are in the same section, so it shouldn't reach here.
6619                          */
6620                         pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6621                                 prog->name, insn_idx);
6622                         return -LIBBPF_ERRNO__RELOC;
6623                 } else {
6624                         /* if subprogram call is to a static function within
6625                          * the same ELF section, there won't be any relocation
6626                          * emitted, but it also means there is no additional
6627                          * offset necessary, insns->imm is relative to
6628                          * instruction's original position within the section
6629                          */
6630                         sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6631                 }
6632
6633                 /* we enforce that sub-programs should be in .text section */
6634                 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6635                 if (!subprog) {
6636                         pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6637                                 prog->name);
6638                         return -LIBBPF_ERRNO__RELOC;
6639                 }
6640
6641                 /* if it's the first call instruction calling into this
6642                  * subprogram (meaning this subprog hasn't been processed
6643                  * yet) within the context of current main program:
6644                  *   - append it at the end of main program's instructions blog;
6645                  *   - process is recursively, while current program is put on hold;
6646                  *   - if that subprogram calls some other not yet processes
6647                  *   subprogram, same thing will happen recursively until
6648                  *   there are no more unprocesses subprograms left to append
6649                  *   and relocate.
6650                  */
6651                 if (subprog->sub_insn_off == 0) {
6652                         subprog->sub_insn_off = main_prog->insns_cnt;
6653
6654                         new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6655                         insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6656                         if (!insns) {
6657                                 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6658                                 return -ENOMEM;
6659                         }
6660                         main_prog->insns = insns;
6661                         main_prog->insns_cnt = new_cnt;
6662
6663                         memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6664                                subprog->insns_cnt * sizeof(*insns));
6665
6666                         pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6667                                  main_prog->name, subprog->insns_cnt, subprog->name);
6668
6669                         err = bpf_object__reloc_code(obj, main_prog, subprog);
6670                         if (err)
6671                                 return err;
6672                 }
6673
6674                 /* main_prog->insns memory could have been re-allocated, so
6675                  * calculate pointer again
6676                  */
6677                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6678                 /* calculate correct instruction position within current main
6679                  * prog; each main prog can have a different set of
6680                  * subprograms appended (potentially in different order as
6681                  * well), so position of any subprog can be different for
6682                  * different main programs */
6683                 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6684
6685                 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6686                          prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6687         }
6688
6689         return 0;
6690 }
6691
6692 /*
6693  * Relocate sub-program calls.
6694  *
6695  * Algorithm operates as follows. Each entry-point BPF program (referred to as
6696  * main prog) is processed separately. For each subprog (non-entry functions,
6697  * that can be called from either entry progs or other subprogs) gets their
6698  * sub_insn_off reset to zero. This serves as indicator that this subprogram
6699  * hasn't been yet appended and relocated within current main prog. Once its
6700  * relocated, sub_insn_off will point at the position within current main prog
6701  * where given subprog was appended. This will further be used to relocate all
6702  * the call instructions jumping into this subprog.
6703  *
6704  * We start with main program and process all call instructions. If the call
6705  * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6706  * is zero), subprog instructions are appended at the end of main program's
6707  * instruction array. Then main program is "put on hold" while we recursively
6708  * process newly appended subprogram. If that subprogram calls into another
6709  * subprogram that hasn't been appended, new subprogram is appended again to
6710  * the *main* prog's instructions (subprog's instructions are always left
6711  * untouched, as they need to be in unmodified state for subsequent main progs
6712  * and subprog instructions are always sent only as part of a main prog) and
6713  * the process continues recursively. Once all the subprogs called from a main
6714  * prog or any of its subprogs are appended (and relocated), all their
6715  * positions within finalized instructions array are known, so it's easy to
6716  * rewrite call instructions with correct relative offsets, corresponding to
6717  * desired target subprog.
6718  *
6719  * Its important to realize that some subprogs might not be called from some
6720  * main prog and any of its called/used subprogs. Those will keep their
6721  * subprog->sub_insn_off as zero at all times and won't be appended to current
6722  * main prog and won't be relocated within the context of current main prog.
6723  * They might still be used from other main progs later.
6724  *
6725  * Visually this process can be shown as below. Suppose we have two main
6726  * programs mainA and mainB and BPF object contains three subprogs: subA,
6727  * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6728  * subC both call subB:
6729  *
6730  *        +--------+ +-------+
6731  *        |        v v       |
6732  *     +--+---+ +--+-+-+ +---+--+
6733  *     | subA | | subB | | subC |
6734  *     +--+---+ +------+ +---+--+
6735  *        ^                  ^
6736  *        |                  |
6737  *    +---+-------+   +------+----+
6738  *    |   mainA   |   |   mainB   |
6739  *    +-----------+   +-----------+
6740  *
6741  * We'll start relocating mainA, will find subA, append it and start
6742  * processing sub A recursively:
6743  *
6744  *    +-----------+------+
6745  *    |   mainA   | subA |
6746  *    +-----------+------+
6747  *
6748  * At this point we notice that subB is used from subA, so we append it and
6749  * relocate (there are no further subcalls from subB):
6750  *
6751  *    +-----------+------+------+
6752  *    |   mainA   | subA | subB |
6753  *    +-----------+------+------+
6754  *
6755  * At this point, we relocate subA calls, then go one level up and finish with
6756  * relocatin mainA calls. mainA is done.
6757  *
6758  * For mainB process is similar but results in different order. We start with
6759  * mainB and skip subA and subB, as mainB never calls them (at least
6760  * directly), but we see subC is needed, so we append and start processing it:
6761  *
6762  *    +-----------+------+
6763  *    |   mainB   | subC |
6764  *    +-----------+------+
6765  * Now we see subC needs subB, so we go back to it, append and relocate it:
6766  *
6767  *    +-----------+------+------+
6768  *    |   mainB   | subC | subB |
6769  *    +-----------+------+------+
6770  *
6771  * At this point we unwind recursion, relocate calls in subC, then in mainB.
6772  */
6773 static int
6774 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6775 {
6776         struct bpf_program *subprog;
6777         int i, err;
6778
6779         /* mark all subprogs as not relocated (yet) within the context of
6780          * current main program
6781          */
6782         for (i = 0; i < obj->nr_programs; i++) {
6783                 subprog = &obj->programs[i];
6784                 if (!prog_is_subprog(obj, subprog))
6785                         continue;
6786
6787                 subprog->sub_insn_off = 0;
6788         }
6789
6790         err = bpf_object__reloc_code(obj, prog, prog);
6791         if (err)
6792                 return err;
6793
6794
6795         return 0;
6796 }
6797
6798 static int
6799 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6800 {
6801         struct bpf_program *prog;
6802         size_t i;
6803         int err;
6804
6805         if (obj->btf_ext) {
6806                 err = bpf_object__relocate_core(obj, targ_btf_path);
6807                 if (err) {
6808                         pr_warn("failed to perform CO-RE relocations: %d\n",
6809                                 err);
6810                         return err;
6811                 }
6812         }
6813         /* relocate data references first for all programs and sub-programs,
6814          * as they don't change relative to code locations, so subsequent
6815          * subprogram processing won't need to re-calculate any of them
6816          */
6817         for (i = 0; i < obj->nr_programs; i++) {
6818                 prog = &obj->programs[i];
6819                 err = bpf_object__relocate_data(obj, prog);
6820                 if (err) {
6821                         pr_warn("prog '%s': failed to relocate data references: %d\n",
6822                                 prog->name, err);
6823                         return err;
6824                 }
6825         }
6826         /* now relocate subprogram calls and append used subprograms to main
6827          * programs; each copy of subprogram code needs to be relocated
6828          * differently for each main program, because its code location might
6829          * have changed
6830          */
6831         for (i = 0; i < obj->nr_programs; i++) {
6832                 prog = &obj->programs[i];
6833                 /* sub-program's sub-calls are relocated within the context of
6834                  * its main program only
6835                  */
6836                 if (prog_is_subprog(obj, prog))
6837                         continue;
6838
6839                 err = bpf_object__relocate_calls(obj, prog);
6840                 if (err) {
6841                         pr_warn("prog '%s': failed to relocate calls: %d\n",
6842                                 prog->name, err);
6843                         return err;
6844                 }
6845         }
6846         /* free up relocation descriptors */
6847         for (i = 0; i < obj->nr_programs; i++) {
6848                 prog = &obj->programs[i];
6849                 zfree(&prog->reloc_desc);
6850                 prog->nr_reloc = 0;
6851         }
6852         return 0;
6853 }
6854
6855 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6856                                             GElf_Shdr *shdr, Elf_Data *data);
6857
6858 static int bpf_object__collect_map_relos(struct bpf_object *obj,
6859                                          GElf_Shdr *shdr, Elf_Data *data)
6860 {
6861         const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6862         int i, j, nrels, new_sz;
6863         const struct btf_var_secinfo *vi = NULL;
6864         const struct btf_type *sec, *var, *def;
6865         struct bpf_map *map = NULL, *targ_map;
6866         const struct btf_member *member;
6867         const char *name, *mname;
6868         Elf_Data *symbols;
6869         unsigned int moff;
6870         GElf_Sym sym;
6871         GElf_Rel rel;
6872         void *tmp;
6873
6874         if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6875                 return -EINVAL;
6876         sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6877         if (!sec)
6878                 return -EINVAL;
6879
6880         symbols = obj->efile.symbols;
6881         nrels = shdr->sh_size / shdr->sh_entsize;
6882         for (i = 0; i < nrels; i++) {
6883                 if (!gelf_getrel(data, i, &rel)) {
6884                         pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6885                         return -LIBBPF_ERRNO__FORMAT;
6886                 }
6887                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6888                         pr_warn(".maps relo #%d: symbol %zx not found\n",
6889                                 i, (size_t)GELF_R_SYM(rel.r_info));
6890                         return -LIBBPF_ERRNO__FORMAT;
6891                 }
6892                 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
6893                 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6894                         pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6895                                 i, name);
6896                         return -LIBBPF_ERRNO__RELOC;
6897                 }
6898
6899                 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
6900                          i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
6901                          (size_t)rel.r_offset, sym.st_name, name);
6902
6903                 for (j = 0; j < obj->nr_maps; j++) {
6904                         map = &obj->maps[j];
6905                         if (map->sec_idx != obj->efile.btf_maps_shndx)
6906                                 continue;
6907
6908                         vi = btf_var_secinfos(sec) + map->btf_var_idx;
6909                         if (vi->offset <= rel.r_offset &&
6910                             rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6911                                 break;
6912                 }
6913                 if (j == obj->nr_maps) {
6914                         pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
6915                                 i, name, (size_t)rel.r_offset);
6916                         return -EINVAL;
6917                 }
6918
6919                 if (!bpf_map_type__is_map_in_map(map->def.type))
6920                         return -EINVAL;
6921                 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6922                     map->def.key_size != sizeof(int)) {
6923                         pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6924                                 i, map->name, sizeof(int));
6925                         return -EINVAL;
6926                 }
6927
6928                 targ_map = bpf_object__find_map_by_name(obj, name);
6929                 if (!targ_map)
6930                         return -ESRCH;
6931
6932                 var = btf__type_by_id(obj->btf, vi->type);
6933                 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6934                 if (btf_vlen(def) == 0)
6935                         return -EINVAL;
6936                 member = btf_members(def) + btf_vlen(def) - 1;
6937                 mname = btf__name_by_offset(obj->btf, member->name_off);
6938                 if (strcmp(mname, "values"))
6939                         return -EINVAL;
6940
6941                 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6942                 if (rel.r_offset - vi->offset < moff)
6943                         return -EINVAL;
6944
6945                 moff = rel.r_offset - vi->offset - moff;
6946                 /* here we use BPF pointer size, which is always 64 bit, as we
6947                  * are parsing ELF that was built for BPF target
6948                  */
6949                 if (moff % bpf_ptr_sz)
6950                         return -EINVAL;
6951                 moff /= bpf_ptr_sz;
6952                 if (moff >= map->init_slots_sz) {
6953                         new_sz = moff + 1;
6954                         tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6955                         if (!tmp)
6956                                 return -ENOMEM;
6957                         map->init_slots = tmp;
6958                         memset(map->init_slots + map->init_slots_sz, 0,
6959                                (new_sz - map->init_slots_sz) * host_ptr_sz);
6960                         map->init_slots_sz = new_sz;
6961                 }
6962                 map->init_slots[moff] = targ_map;
6963
6964                 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
6965                          i, map->name, moff, name);
6966         }
6967
6968         return 0;
6969 }
6970
6971 static int cmp_relocs(const void *_a, const void *_b)
6972 {
6973         const struct reloc_desc *a = _a;
6974         const struct reloc_desc *b = _b;
6975
6976         if (a->insn_idx != b->insn_idx)
6977                 return a->insn_idx < b->insn_idx ? -1 : 1;
6978
6979         /* no two relocations should have the same insn_idx, but ... */
6980         if (a->type != b->type)
6981                 return a->type < b->type ? -1 : 1;
6982
6983         return 0;
6984 }
6985
6986 static int bpf_object__collect_relos(struct bpf_object *obj)
6987 {
6988         int i, err;
6989
6990         for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6991                 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6992                 Elf_Data *data = obj->efile.reloc_sects[i].data;
6993                 int idx = shdr->sh_info;
6994
6995                 if (shdr->sh_type != SHT_REL) {
6996                         pr_warn("internal error at %d\n", __LINE__);
6997                         return -LIBBPF_ERRNO__INTERNAL;
6998                 }
6999
7000                 if (idx == obj->efile.st_ops_shndx)
7001                         err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7002                 else if (idx == obj->efile.btf_maps_shndx)
7003                         err = bpf_object__collect_map_relos(obj, shdr, data);
7004                 else
7005                         err = bpf_object__collect_prog_relos(obj, shdr, data);
7006                 if (err)
7007                         return err;
7008         }
7009
7010         for (i = 0; i < obj->nr_programs; i++) {
7011                 struct bpf_program *p = &obj->programs[i];
7012                 
7013                 if (!p->nr_reloc)
7014                         continue;
7015
7016                 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
7017         }
7018         return 0;
7019 }
7020
7021 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7022 {
7023         if (BPF_CLASS(insn->code) == BPF_JMP &&
7024             BPF_OP(insn->code) == BPF_CALL &&
7025             BPF_SRC(insn->code) == BPF_K &&
7026             insn->src_reg == 0 &&
7027             insn->dst_reg == 0) {
7028                     *func_id = insn->imm;
7029                     return true;
7030         }
7031         return false;
7032 }
7033
7034 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7035 {
7036         struct bpf_insn *insn = prog->insns;
7037         enum bpf_func_id func_id;
7038         int i;
7039
7040         for (i = 0; i < prog->insns_cnt; i++, insn++) {
7041                 if (!insn_is_helper_call(insn, &func_id))
7042                         continue;
7043
7044                 /* on kernels that don't yet support
7045                  * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7046                  * to bpf_probe_read() which works well for old kernels
7047                  */
7048                 switch (func_id) {
7049                 case BPF_FUNC_probe_read_kernel:
7050                 case BPF_FUNC_probe_read_user:
7051                         if (!kernel_supports(FEAT_PROBE_READ_KERN))
7052                                 insn->imm = BPF_FUNC_probe_read;
7053                         break;
7054                 case BPF_FUNC_probe_read_kernel_str:
7055                 case BPF_FUNC_probe_read_user_str:
7056                         if (!kernel_supports(FEAT_PROBE_READ_KERN))
7057                                 insn->imm = BPF_FUNC_probe_read_str;
7058                         break;
7059                 default:
7060                         break;
7061                 }
7062         }
7063         return 0;
7064 }
7065
7066 static int
7067 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
7068              char *license, __u32 kern_version, int *pfd)
7069 {
7070         struct bpf_prog_load_params load_attr = {};
7071         char *cp, errmsg[STRERR_BUFSIZE];
7072         size_t log_buf_size = 0;
7073         char *log_buf = NULL;
7074         int btf_fd, ret;
7075
7076         if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7077                 /*
7078                  * The program type must be set.  Most likely we couldn't find a proper
7079                  * section definition at load time, and thus we didn't infer the type.
7080                  */
7081                 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7082                         prog->name, prog->sec_name);
7083                 return -EINVAL;
7084         }
7085
7086         if (!insns || !insns_cnt)
7087                 return -EINVAL;
7088
7089         load_attr.prog_type = prog->type;
7090         /* old kernels might not support specifying expected_attach_type */
7091         if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
7092             prog->sec_def->is_exp_attach_type_optional)
7093                 load_attr.expected_attach_type = 0;
7094         else
7095                 load_attr.expected_attach_type = prog->expected_attach_type;
7096         if (kernel_supports(FEAT_PROG_NAME))
7097                 load_attr.name = prog->name;
7098         load_attr.insns = insns;
7099         load_attr.insn_cnt = insns_cnt;
7100         load_attr.license = license;
7101         load_attr.attach_btf_id = prog->attach_btf_id;
7102         if (prog->attach_prog_fd)
7103                 load_attr.attach_prog_fd = prog->attach_prog_fd;
7104         else
7105                 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7106         load_attr.attach_btf_id = prog->attach_btf_id;
7107         load_attr.kern_version = kern_version;
7108         load_attr.prog_ifindex = prog->prog_ifindex;
7109
7110         /* specify func_info/line_info only if kernel supports them */
7111         btf_fd = bpf_object__btf_fd(prog->obj);
7112         if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
7113                 load_attr.prog_btf_fd = btf_fd;
7114                 load_attr.func_info = prog->func_info;
7115                 load_attr.func_info_rec_size = prog->func_info_rec_size;
7116                 load_attr.func_info_cnt = prog->func_info_cnt;
7117                 load_attr.line_info = prog->line_info;
7118                 load_attr.line_info_rec_size = prog->line_info_rec_size;
7119                 load_attr.line_info_cnt = prog->line_info_cnt;
7120         }
7121         load_attr.log_level = prog->log_level;
7122         load_attr.prog_flags = prog->prog_flags;
7123
7124 retry_load:
7125         if (log_buf_size) {
7126                 log_buf = malloc(log_buf_size);
7127                 if (!log_buf)
7128                         return -ENOMEM;
7129
7130                 *log_buf = 0;
7131         }
7132
7133         load_attr.log_buf = log_buf;
7134         load_attr.log_buf_sz = log_buf_size;
7135         ret = libbpf__bpf_prog_load(&load_attr);
7136
7137         if (ret >= 0) {
7138                 if (log_buf && load_attr.log_level)
7139                         pr_debug("verifier log:\n%s", log_buf);
7140
7141                 if (prog->obj->rodata_map_idx >= 0 &&
7142                     kernel_supports(FEAT_PROG_BIND_MAP)) {
7143                         struct bpf_map *rodata_map =
7144                                 &prog->obj->maps[prog->obj->rodata_map_idx];
7145
7146                         if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
7147                                 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7148                                 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
7149                                         prog->name, cp);
7150                                 /* Don't fail hard if can't bind rodata. */
7151                         }
7152                 }
7153
7154                 *pfd = ret;
7155                 ret = 0;
7156                 goto out;
7157         }
7158
7159         if (!log_buf || errno == ENOSPC) {
7160                 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
7161                                    log_buf_size << 1);
7162
7163                 free(log_buf);
7164                 goto retry_load;
7165         }
7166         ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
7167         cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7168         pr_warn("load bpf program failed: %s\n", cp);
7169         pr_perm_msg(ret);
7170
7171         if (log_buf && log_buf[0] != '\0') {
7172                 ret = -LIBBPF_ERRNO__VERIFY;
7173                 pr_warn("-- BEGIN DUMP LOG ---\n");
7174                 pr_warn("\n%s\n", log_buf);
7175                 pr_warn("-- END LOG --\n");
7176         } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
7177                 pr_warn("Program too large (%zu insns), at most %d insns\n",
7178                         load_attr.insn_cnt, BPF_MAXINSNS);
7179                 ret = -LIBBPF_ERRNO__PROG2BIG;
7180         } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
7181                 /* Wrong program type? */
7182                 int fd;
7183
7184                 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
7185                 load_attr.expected_attach_type = 0;
7186                 load_attr.log_buf = NULL;
7187                 load_attr.log_buf_sz = 0;
7188                 fd = libbpf__bpf_prog_load(&load_attr);
7189                 if (fd >= 0) {
7190                         close(fd);
7191                         ret = -LIBBPF_ERRNO__PROGTYPE;
7192                         goto out;
7193                 }
7194         }
7195
7196 out:
7197         free(log_buf);
7198         return ret;
7199 }
7200
7201 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
7202
7203 int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
7204 {
7205         int err = 0, fd, i;
7206
7207         if (prog->obj->loaded) {
7208                 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
7209                 return -EINVAL;
7210         }
7211
7212         if ((prog->type == BPF_PROG_TYPE_TRACING ||
7213              prog->type == BPF_PROG_TYPE_LSM ||
7214              prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
7215                 int btf_obj_fd = 0, btf_type_id = 0;
7216
7217                 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
7218                 if (err)
7219                         return err;
7220
7221                 prog->attach_btf_obj_fd = btf_obj_fd;
7222                 prog->attach_btf_id = btf_type_id;
7223         }
7224
7225         if (prog->instances.nr < 0 || !prog->instances.fds) {
7226                 if (prog->preprocessor) {
7227                         pr_warn("Internal error: can't load program '%s'\n",
7228                                 prog->name);
7229                         return -LIBBPF_ERRNO__INTERNAL;
7230                 }
7231
7232                 prog->instances.fds = malloc(sizeof(int));
7233                 if (!prog->instances.fds) {
7234                         pr_warn("Not enough memory for BPF fds\n");
7235                         return -ENOMEM;
7236                 }
7237                 prog->instances.nr = 1;
7238                 prog->instances.fds[0] = -1;
7239         }
7240
7241         if (!prog->preprocessor) {
7242                 if (prog->instances.nr != 1) {
7243                         pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7244                                 prog->name, prog->instances.nr);
7245                 }
7246                 err = load_program(prog, prog->insns, prog->insns_cnt,
7247                                    license, kern_ver, &fd);
7248                 if (!err)
7249                         prog->instances.fds[0] = fd;
7250                 goto out;
7251         }
7252
7253         for (i = 0; i < prog->instances.nr; i++) {
7254                 struct bpf_prog_prep_result result;
7255                 bpf_program_prep_t preprocessor = prog->preprocessor;
7256
7257                 memset(&result, 0, sizeof(result));
7258                 err = preprocessor(prog, i, prog->insns,
7259                                    prog->insns_cnt, &result);
7260                 if (err) {
7261                         pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
7262                                 i, prog->name);
7263                         goto out;
7264                 }
7265
7266                 if (!result.new_insn_ptr || !result.new_insn_cnt) {
7267                         pr_debug("Skip loading the %dth instance of program '%s'\n",
7268                                  i, prog->name);
7269                         prog->instances.fds[i] = -1;
7270                         if (result.pfd)
7271                                 *result.pfd = -1;
7272                         continue;
7273                 }
7274
7275                 err = load_program(prog, result.new_insn_ptr,
7276                                    result.new_insn_cnt, license, kern_ver, &fd);
7277                 if (err) {
7278                         pr_warn("Loading the %dth instance of program '%s' failed\n",
7279                                 i, prog->name);
7280                         goto out;
7281                 }
7282
7283                 if (result.pfd)
7284                         *result.pfd = fd;
7285                 prog->instances.fds[i] = fd;
7286         }
7287 out:
7288         if (err)
7289                 pr_warn("failed to load program '%s'\n", prog->name);
7290         zfree(&prog->insns);
7291         prog->insns_cnt = 0;
7292         return err;
7293 }
7294
7295 static int
7296 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7297 {
7298         struct bpf_program *prog;
7299         size_t i;
7300         int err;
7301
7302         for (i = 0; i < obj->nr_programs; i++) {
7303                 prog = &obj->programs[i];
7304                 err = bpf_object__sanitize_prog(obj, prog);
7305                 if (err)
7306                         return err;
7307         }
7308
7309         for (i = 0; i < obj->nr_programs; i++) {
7310                 prog = &obj->programs[i];
7311                 if (prog_is_subprog(obj, prog))
7312                         continue;
7313                 if (!prog->load) {
7314                         pr_debug("prog '%s': skipped loading\n", prog->name);
7315                         continue;
7316                 }
7317                 prog->log_level |= log_level;
7318                 err = bpf_program__load(prog, obj->license, obj->kern_version);
7319                 if (err)
7320                         return err;
7321         }
7322         return 0;
7323 }
7324
7325 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7326
7327 static struct bpf_object *
7328 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7329                    const struct bpf_object_open_opts *opts)
7330 {
7331         const char *obj_name, *kconfig;
7332         struct bpf_program *prog;
7333         struct bpf_object *obj;
7334         char tmp_name[64];
7335         int err;
7336
7337         if (elf_version(EV_CURRENT) == EV_NONE) {
7338                 pr_warn("failed to init libelf for %s\n",
7339                         path ? : "(mem buf)");
7340                 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7341         }
7342
7343         if (!OPTS_VALID(opts, bpf_object_open_opts))
7344                 return ERR_PTR(-EINVAL);
7345
7346         obj_name = OPTS_GET(opts, object_name, NULL);
7347         if (obj_buf) {
7348                 if (!obj_name) {
7349                         snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7350                                  (unsigned long)obj_buf,
7351                                  (unsigned long)obj_buf_sz);
7352                         obj_name = tmp_name;
7353                 }
7354                 path = obj_name;
7355                 pr_debug("loading object '%s' from buffer\n", obj_name);
7356         }
7357
7358         obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7359         if (IS_ERR(obj))
7360                 return obj;
7361
7362         kconfig = OPTS_GET(opts, kconfig, NULL);
7363         if (kconfig) {
7364                 obj->kconfig = strdup(kconfig);
7365                 if (!obj->kconfig)
7366                         return ERR_PTR(-ENOMEM);
7367         }
7368
7369         err = bpf_object__elf_init(obj);
7370         err = err ? : bpf_object__check_endianness(obj);
7371         err = err ? : bpf_object__elf_collect(obj);
7372         err = err ? : bpf_object__collect_externs(obj);
7373         err = err ? : bpf_object__finalize_btf(obj);
7374         err = err ? : bpf_object__init_maps(obj, opts);
7375         err = err ? : bpf_object__collect_relos(obj);
7376         if (err)
7377                 goto out;
7378         bpf_object__elf_finish(obj);
7379
7380         bpf_object__for_each_program(prog, obj) {
7381                 prog->sec_def = find_sec_def(prog->sec_name);
7382                 if (!prog->sec_def) {
7383                         /* couldn't guess, but user might manually specify */
7384                         pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7385                                 prog->name, prog->sec_name);
7386                         continue;
7387                 }
7388
7389                 if (prog->sec_def->is_sleepable)
7390                         prog->prog_flags |= BPF_F_SLEEPABLE;
7391                 bpf_program__set_type(prog, prog->sec_def->prog_type);
7392                 bpf_program__set_expected_attach_type(prog,
7393                                 prog->sec_def->expected_attach_type);
7394
7395                 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7396                     prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
7397                         prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
7398         }
7399
7400         return obj;
7401 out:
7402         bpf_object__close(obj);
7403         return ERR_PTR(err);
7404 }
7405
7406 static struct bpf_object *
7407 __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7408 {
7409         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7410                 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7411         );
7412
7413         /* param validation */
7414         if (!attr->file)
7415                 return NULL;
7416
7417         pr_debug("loading %s\n", attr->file);
7418         return __bpf_object__open(attr->file, NULL, 0, &opts);
7419 }
7420
7421 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7422 {
7423         return __bpf_object__open_xattr(attr, 0);
7424 }
7425
7426 struct bpf_object *bpf_object__open(const char *path)
7427 {
7428         struct bpf_object_open_attr attr = {
7429                 .file           = path,
7430                 .prog_type      = BPF_PROG_TYPE_UNSPEC,
7431         };
7432
7433         return bpf_object__open_xattr(&attr);
7434 }
7435
7436 struct bpf_object *
7437 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7438 {
7439         if (!path)
7440                 return ERR_PTR(-EINVAL);
7441
7442         pr_debug("loading %s\n", path);
7443
7444         return __bpf_object__open(path, NULL, 0, opts);
7445 }
7446
7447 struct bpf_object *
7448 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7449                      const struct bpf_object_open_opts *opts)
7450 {
7451         if (!obj_buf || obj_buf_sz == 0)
7452                 return ERR_PTR(-EINVAL);
7453
7454         return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
7455 }
7456
7457 struct bpf_object *
7458 bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7459                         const char *name)
7460 {
7461         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7462                 .object_name = name,
7463                 /* wrong default, but backwards-compatible */
7464                 .relaxed_maps = true,
7465         );
7466
7467         /* returning NULL is wrong, but backwards-compatible */
7468         if (!obj_buf || obj_buf_sz == 0)
7469                 return NULL;
7470
7471         return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
7472 }
7473
7474 int bpf_object__unload(struct bpf_object *obj)
7475 {
7476         size_t i;
7477
7478         if (!obj)
7479                 return -EINVAL;
7480
7481         for (i = 0; i < obj->nr_maps; i++) {
7482                 zclose(obj->maps[i].fd);
7483                 if (obj->maps[i].st_ops)
7484                         zfree(&obj->maps[i].st_ops->kern_vdata);
7485         }
7486
7487         for (i = 0; i < obj->nr_programs; i++)
7488                 bpf_program__unload(&obj->programs[i]);
7489
7490         return 0;
7491 }
7492
7493 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7494 {
7495         struct bpf_map *m;
7496
7497         bpf_object__for_each_map(m, obj) {
7498                 if (!bpf_map__is_internal(m))
7499                         continue;
7500                 if (!kernel_supports(FEAT_GLOBAL_DATA)) {
7501                         pr_warn("kernel doesn't support global data\n");
7502                         return -ENOTSUP;
7503                 }
7504                 if (!kernel_supports(FEAT_ARRAY_MMAP))
7505                         m->def.map_flags ^= BPF_F_MMAPABLE;
7506         }
7507
7508         return 0;
7509 }
7510
7511 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7512 {
7513         char sym_type, sym_name[500];
7514         unsigned long long sym_addr;
7515         const struct btf_type *t;
7516         struct extern_desc *ext;
7517         int ret, err = 0;
7518         FILE *f;
7519
7520         f = fopen("/proc/kallsyms", "r");
7521         if (!f) {
7522                 err = -errno;
7523                 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7524                 return err;
7525         }
7526
7527         while (true) {
7528                 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7529                              &sym_addr, &sym_type, sym_name);
7530                 if (ret == EOF && feof(f))
7531                         break;
7532                 if (ret != 3) {
7533                         pr_warn("failed to read kallsyms entry: %d\n", ret);
7534                         err = -EINVAL;
7535                         goto out;
7536                 }
7537
7538                 ext = find_extern_by_name(obj, sym_name);
7539                 if (!ext || ext->type != EXT_KSYM)
7540                         continue;
7541
7542                 t = btf__type_by_id(obj->btf, ext->btf_id);
7543                 if (!btf_is_var(t))
7544                         continue;
7545
7546                 if (ext->is_set && ext->ksym.addr != sym_addr) {
7547                         pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7548                                 sym_name, ext->ksym.addr, sym_addr);
7549                         err = -EINVAL;
7550                         goto out;
7551                 }
7552                 if (!ext->is_set) {
7553                         ext->is_set = true;
7554                         ext->ksym.addr = sym_addr;
7555                         pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7556                 }
7557         }
7558
7559 out:
7560         fclose(f);
7561         return err;
7562 }
7563
7564 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
7565                             __u16 kind, struct btf **res_btf,
7566                             int *res_btf_fd)
7567 {
7568         int i, id, btf_fd, err;
7569         struct btf *btf;
7570
7571         btf = obj->btf_vmlinux;
7572         btf_fd = 0;
7573         id = btf__find_by_name_kind(btf, ksym_name, kind);
7574
7575         if (id == -ENOENT) {
7576                 err = load_module_btfs(obj);
7577                 if (err)
7578                         return err;
7579
7580                 for (i = 0; i < obj->btf_module_cnt; i++) {
7581                         btf = obj->btf_modules[i].btf;
7582                         /* we assume module BTF FD is always >0 */
7583                         btf_fd = obj->btf_modules[i].fd;
7584                         id = btf__find_by_name_kind(btf, ksym_name, kind);
7585                         if (id != -ENOENT)
7586                                 break;
7587                 }
7588         }
7589         if (id <= 0) {
7590                 pr_warn("extern (%s ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
7591                         __btf_kind_str(kind), ksym_name);
7592                 return -ESRCH;
7593         }
7594
7595         *res_btf = btf;
7596         *res_btf_fd = btf_fd;
7597         return id;
7598 }
7599
7600 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
7601                                                struct extern_desc *ext)
7602 {
7603         const struct btf_type *targ_var, *targ_type;
7604         __u32 targ_type_id, local_type_id;
7605         const char *targ_var_name;
7606         int id, btf_fd = 0, err;
7607         struct btf *btf = NULL;
7608
7609         id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
7610         if (id < 0)
7611                 return id;
7612
7613         /* find local type_id */
7614         local_type_id = ext->ksym.type_id;
7615
7616         /* find target type_id */
7617         targ_var = btf__type_by_id(btf, id);
7618         targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7619         targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7620
7621         err = bpf_core_types_are_compat(obj->btf, local_type_id,
7622                                         btf, targ_type_id);
7623         if (err <= 0) {
7624                 const struct btf_type *local_type;
7625                 const char *targ_name, *local_name;
7626
7627                 local_type = btf__type_by_id(obj->btf, local_type_id);
7628                 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7629                 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7630
7631                 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7632                         ext->name, local_type_id,
7633                         btf_kind_str(local_type), local_name, targ_type_id,
7634                         btf_kind_str(targ_type), targ_name);
7635                 return -EINVAL;
7636         }
7637
7638         ext->is_set = true;
7639         ext->ksym.kernel_btf_obj_fd = btf_fd;
7640         ext->ksym.kernel_btf_id = id;
7641         pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
7642                  ext->name, id, btf_kind_str(targ_var), targ_var_name);
7643
7644         return 0;
7645 }
7646
7647 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
7648                                                 struct extern_desc *ext)
7649 {
7650         int local_func_proto_id, kfunc_proto_id, kfunc_id;
7651         const struct btf_type *kern_func;
7652         struct btf *kern_btf = NULL;
7653         int ret, kern_btf_fd = 0;
7654
7655         local_func_proto_id = ext->ksym.type_id;
7656
7657         kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
7658                                     &kern_btf, &kern_btf_fd);
7659         if (kfunc_id < 0) {
7660                 pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
7661                         ext->name);
7662                 return kfunc_id;
7663         }
7664
7665         if (kern_btf != obj->btf_vmlinux) {
7666                 pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
7667                         ext->name);
7668                 return -ENOTSUP;
7669         }
7670
7671         kern_func = btf__type_by_id(kern_btf, kfunc_id);
7672         kfunc_proto_id = kern_func->type;
7673
7674         ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
7675                                         kern_btf, kfunc_proto_id);
7676         if (ret <= 0) {
7677                 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
7678                         ext->name, local_func_proto_id, kfunc_proto_id);
7679                 return -EINVAL;
7680         }
7681
7682         ext->is_set = true;
7683         ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
7684         ext->ksym.kernel_btf_id = kfunc_id;
7685         pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
7686                  ext->name, kfunc_id);
7687
7688         return 0;
7689 }
7690
7691 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7692 {
7693         const struct btf_type *t;
7694         struct extern_desc *ext;
7695         int i, err;
7696
7697         for (i = 0; i < obj->nr_extern; i++) {
7698                 ext = &obj->externs[i];
7699                 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7700                         continue;
7701
7702                 t = btf__type_by_id(obj->btf, ext->btf_id);
7703                 if (btf_is_var(t))
7704                         err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
7705                 else
7706                         err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
7707                 if (err)
7708                         return err;
7709         }
7710         return 0;
7711 }
7712
7713 static int bpf_object__resolve_externs(struct bpf_object *obj,
7714                                        const char *extra_kconfig)
7715 {
7716         bool need_config = false, need_kallsyms = false;
7717         bool need_vmlinux_btf = false;
7718         struct extern_desc *ext;
7719         void *kcfg_data = NULL;
7720         int err, i;
7721
7722         if (obj->nr_extern == 0)
7723                 return 0;
7724
7725         if (obj->kconfig_map_idx >= 0)
7726                 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7727
7728         for (i = 0; i < obj->nr_extern; i++) {
7729                 ext = &obj->externs[i];
7730
7731                 if (ext->type == EXT_KCFG &&
7732                     strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7733                         void *ext_val = kcfg_data + ext->kcfg.data_off;
7734                         __u32 kver = get_kernel_version();
7735
7736                         if (!kver) {
7737                                 pr_warn("failed to get kernel version\n");
7738                                 return -EINVAL;
7739                         }
7740                         err = set_kcfg_value_num(ext, ext_val, kver);
7741                         if (err)
7742                                 return err;
7743                         pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7744                 } else if (ext->type == EXT_KCFG &&
7745                            strncmp(ext->name, "CONFIG_", 7) == 0) {
7746                         need_config = true;
7747                 } else if (ext->type == EXT_KSYM) {
7748                         if (ext->ksym.type_id)
7749                                 need_vmlinux_btf = true;
7750                         else
7751                                 need_kallsyms = true;
7752                 } else {
7753                         pr_warn("unrecognized extern '%s'\n", ext->name);
7754                         return -EINVAL;
7755                 }
7756         }
7757         if (need_config && extra_kconfig) {
7758                 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7759                 if (err)
7760                         return -EINVAL;
7761                 need_config = false;
7762                 for (i = 0; i < obj->nr_extern; i++) {
7763                         ext = &obj->externs[i];
7764                         if (ext->type == EXT_KCFG && !ext->is_set) {
7765                                 need_config = true;
7766                                 break;
7767                         }
7768                 }
7769         }
7770         if (need_config) {
7771                 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7772                 if (err)
7773                         return -EINVAL;
7774         }
7775         if (need_kallsyms) {
7776                 err = bpf_object__read_kallsyms_file(obj);
7777                 if (err)
7778                         return -EINVAL;
7779         }
7780         if (need_vmlinux_btf) {
7781                 err = bpf_object__resolve_ksyms_btf_id(obj);
7782                 if (err)
7783                         return -EINVAL;
7784         }
7785         for (i = 0; i < obj->nr_extern; i++) {
7786                 ext = &obj->externs[i];
7787
7788                 if (!ext->is_set && !ext->is_weak) {
7789                         pr_warn("extern %s (strong) not resolved\n", ext->name);
7790                         return -ESRCH;
7791                 } else if (!ext->is_set) {
7792                         pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7793                                  ext->name);
7794                 }
7795         }
7796
7797         return 0;
7798 }
7799
7800 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7801 {
7802         struct bpf_object *obj;
7803         int err, i;
7804
7805         if (!attr)
7806                 return -EINVAL;
7807         obj = attr->obj;
7808         if (!obj)
7809                 return -EINVAL;
7810
7811         if (obj->loaded) {
7812                 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7813                 return -EINVAL;
7814         }
7815
7816         err = bpf_object__probe_loading(obj);
7817         err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7818         err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7819         err = err ? : bpf_object__sanitize_and_load_btf(obj);
7820         err = err ? : bpf_object__sanitize_maps(obj);
7821         err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7822         err = err ? : bpf_object__create_maps(obj);
7823         err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7824         err = err ? : bpf_object__load_progs(obj, attr->log_level);
7825
7826         /* clean up module BTFs */
7827         for (i = 0; i < obj->btf_module_cnt; i++) {
7828                 close(obj->btf_modules[i].fd);
7829                 btf__free(obj->btf_modules[i].btf);
7830                 free(obj->btf_modules[i].name);
7831         }
7832         free(obj->btf_modules);
7833
7834         /* clean up vmlinux BTF */
7835         btf__free(obj->btf_vmlinux);
7836         obj->btf_vmlinux = NULL;
7837
7838         obj->loaded = true; /* doesn't matter if successfully or not */
7839
7840         if (err)
7841                 goto out;
7842
7843         return 0;
7844 out:
7845         /* unpin any maps that were auto-pinned during load */
7846         for (i = 0; i < obj->nr_maps; i++)
7847                 if (obj->maps[i].pinned && !obj->maps[i].reused)
7848                         bpf_map__unpin(&obj->maps[i], NULL);
7849
7850         bpf_object__unload(obj);
7851         pr_warn("failed to load object '%s'\n", obj->path);
7852         return err;
7853 }
7854
7855 int bpf_object__load(struct bpf_object *obj)
7856 {
7857         struct bpf_object_load_attr attr = {
7858                 .obj = obj,
7859         };
7860
7861         return bpf_object__load_xattr(&attr);
7862 }
7863
7864 static int make_parent_dir(const char *path)
7865 {
7866         char *cp, errmsg[STRERR_BUFSIZE];
7867         char *dname, *dir;
7868         int err = 0;
7869
7870         dname = strdup(path);
7871         if (dname == NULL)
7872                 return -ENOMEM;
7873
7874         dir = dirname(dname);
7875         if (mkdir(dir, 0700) && errno != EEXIST)
7876                 err = -errno;
7877
7878         free(dname);
7879         if (err) {
7880                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7881                 pr_warn("failed to mkdir %s: %s\n", path, cp);
7882         }
7883         return err;
7884 }
7885
7886 static int check_path(const char *path)
7887 {
7888         char *cp, errmsg[STRERR_BUFSIZE];
7889         struct statfs st_fs;
7890         char *dname, *dir;
7891         int err = 0;
7892
7893         if (path == NULL)
7894                 return -EINVAL;
7895
7896         dname = strdup(path);
7897         if (dname == NULL)
7898                 return -ENOMEM;
7899
7900         dir = dirname(dname);
7901         if (statfs(dir, &st_fs)) {
7902                 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7903                 pr_warn("failed to statfs %s: %s\n", dir, cp);
7904                 err = -errno;
7905         }
7906         free(dname);
7907
7908         if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7909                 pr_warn("specified path %s is not on BPF FS\n", path);
7910                 err = -EINVAL;
7911         }
7912
7913         return err;
7914 }
7915
7916 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7917                               int instance)
7918 {
7919         char *cp, errmsg[STRERR_BUFSIZE];
7920         int err;
7921
7922         err = make_parent_dir(path);
7923         if (err)
7924                 return err;
7925
7926         err = check_path(path);
7927         if (err)
7928                 return err;
7929
7930         if (prog == NULL) {
7931                 pr_warn("invalid program pointer\n");
7932                 return -EINVAL;
7933         }
7934
7935         if (instance < 0 || instance >= prog->instances.nr) {
7936                 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7937                         instance, prog->name, prog->instances.nr);
7938                 return -EINVAL;
7939         }
7940
7941         if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7942                 err = -errno;
7943                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7944                 pr_warn("failed to pin program: %s\n", cp);
7945                 return err;
7946         }
7947         pr_debug("pinned program '%s'\n", path);
7948
7949         return 0;
7950 }
7951
7952 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7953                                 int instance)
7954 {
7955         int err;
7956
7957         err = check_path(path);
7958         if (err)
7959                 return err;
7960
7961         if (prog == NULL) {
7962                 pr_warn("invalid program pointer\n");
7963                 return -EINVAL;
7964         }
7965
7966         if (instance < 0 || instance >= prog->instances.nr) {
7967                 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7968                         instance, prog->name, prog->instances.nr);
7969                 return -EINVAL;
7970         }
7971
7972         err = unlink(path);
7973         if (err != 0)
7974                 return -errno;
7975         pr_debug("unpinned program '%s'\n", path);
7976
7977         return 0;
7978 }
7979
7980 int bpf_program__pin(struct bpf_program *prog, const char *path)
7981 {
7982         int i, err;
7983
7984         err = make_parent_dir(path);
7985         if (err)
7986                 return err;
7987
7988         err = check_path(path);
7989         if (err)
7990                 return err;
7991
7992         if (prog == NULL) {
7993                 pr_warn("invalid program pointer\n");
7994                 return -EINVAL;
7995         }
7996
7997         if (prog->instances.nr <= 0) {
7998                 pr_warn("no instances of prog %s to pin\n", prog->name);
7999                 return -EINVAL;
8000         }
8001
8002         if (prog->instances.nr == 1) {
8003                 /* don't create subdirs when pinning single instance */
8004                 return bpf_program__pin_instance(prog, path, 0);
8005         }
8006
8007         for (i = 0; i < prog->instances.nr; i++) {
8008                 char buf[PATH_MAX];
8009                 int len;
8010
8011                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8012                 if (len < 0) {
8013                         err = -EINVAL;
8014                         goto err_unpin;
8015                 } else if (len >= PATH_MAX) {
8016                         err = -ENAMETOOLONG;
8017                         goto err_unpin;
8018                 }
8019
8020                 err = bpf_program__pin_instance(prog, buf, i);
8021                 if (err)
8022                         goto err_unpin;
8023         }
8024
8025         return 0;
8026
8027 err_unpin:
8028         for (i = i - 1; i >= 0; i--) {
8029                 char buf[PATH_MAX];
8030                 int len;
8031
8032                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8033                 if (len < 0)
8034                         continue;
8035                 else if (len >= PATH_MAX)
8036                         continue;
8037
8038                 bpf_program__unpin_instance(prog, buf, i);
8039         }
8040
8041         rmdir(path);
8042
8043         return err;
8044 }
8045
8046 int bpf_program__unpin(struct bpf_program *prog, const char *path)
8047 {
8048         int i, err;
8049
8050         err = check_path(path);
8051         if (err)
8052                 return err;
8053
8054         if (prog == NULL) {
8055                 pr_warn("invalid program pointer\n");
8056                 return -EINVAL;
8057         }
8058
8059         if (prog->instances.nr <= 0) {
8060                 pr_warn("no instances of prog %s to pin\n", prog->name);
8061                 return -EINVAL;
8062         }
8063
8064         if (prog->instances.nr == 1) {
8065                 /* don't create subdirs when pinning single instance */
8066                 return bpf_program__unpin_instance(prog, path, 0);
8067         }
8068
8069         for (i = 0; i < prog->instances.nr; i++) {
8070                 char buf[PATH_MAX];
8071                 int len;
8072
8073                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8074                 if (len < 0)
8075                         return -EINVAL;
8076                 else if (len >= PATH_MAX)
8077                         return -ENAMETOOLONG;
8078
8079                 err = bpf_program__unpin_instance(prog, buf, i);
8080                 if (err)
8081                         return err;
8082         }
8083
8084         err = rmdir(path);
8085         if (err)
8086                 return -errno;
8087
8088         return 0;
8089 }
8090
8091 int bpf_map__pin(struct bpf_map *map, const char *path)
8092 {
8093         char *cp, errmsg[STRERR_BUFSIZE];
8094         int err;
8095
8096         if (map == NULL) {
8097                 pr_warn("invalid map pointer\n");
8098                 return -EINVAL;
8099         }
8100
8101         if (map->pin_path) {
8102                 if (path && strcmp(path, map->pin_path)) {
8103                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8104                                 bpf_map__name(map), map->pin_path, path);
8105                         return -EINVAL;
8106                 } else if (map->pinned) {
8107                         pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8108                                  bpf_map__name(map), map->pin_path);
8109                         return 0;
8110                 }
8111         } else {
8112                 if (!path) {
8113                         pr_warn("missing a path to pin map '%s' at\n",
8114                                 bpf_map__name(map));
8115                         return -EINVAL;
8116                 } else if (map->pinned) {
8117                         pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8118                         return -EEXIST;
8119                 }
8120
8121                 map->pin_path = strdup(path);
8122                 if (!map->pin_path) {
8123                         err = -errno;
8124                         goto out_err;
8125                 }
8126         }
8127
8128         err = make_parent_dir(map->pin_path);
8129         if (err)
8130                 return err;
8131
8132         err = check_path(map->pin_path);
8133         if (err)
8134                 return err;
8135
8136         if (bpf_obj_pin(map->fd, map->pin_path)) {
8137                 err = -errno;
8138                 goto out_err;
8139         }
8140
8141         map->pinned = true;
8142         pr_debug("pinned map '%s'\n", map->pin_path);
8143
8144         return 0;
8145
8146 out_err:
8147         cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8148         pr_warn("failed to pin map: %s\n", cp);
8149         return err;
8150 }
8151
8152 int bpf_map__unpin(struct bpf_map *map, const char *path)
8153 {
8154         int err;
8155
8156         if (map == NULL) {
8157                 pr_warn("invalid map pointer\n");
8158                 return -EINVAL;
8159         }
8160
8161         if (map->pin_path) {
8162                 if (path && strcmp(path, map->pin_path)) {
8163                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8164                                 bpf_map__name(map), map->pin_path, path);
8165                         return -EINVAL;
8166                 }
8167                 path = map->pin_path;
8168         } else if (!path) {
8169                 pr_warn("no path to unpin map '%s' from\n",
8170                         bpf_map__name(map));
8171                 return -EINVAL;
8172         }
8173
8174         err = check_path(path);
8175         if (err)
8176                 return err;
8177
8178         err = unlink(path);
8179         if (err != 0)
8180                 return -errno;
8181
8182         map->pinned = false;
8183         pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8184
8185         return 0;
8186 }
8187
8188 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8189 {
8190         char *new = NULL;
8191
8192         if (path) {
8193                 new = strdup(path);
8194                 if (!new)
8195                         return -errno;
8196         }
8197
8198         free(map->pin_path);
8199         map->pin_path = new;
8200         return 0;
8201 }
8202
8203 const char *bpf_map__get_pin_path(const struct bpf_map *map)
8204 {
8205         return map->pin_path;
8206 }
8207
8208 bool bpf_map__is_pinned(const struct bpf_map *map)
8209 {
8210         return map->pinned;
8211 }
8212
8213 static void sanitize_pin_path(char *s)
8214 {
8215         /* bpffs disallows periods in path names */
8216         while (*s) {
8217                 if (*s == '.')
8218                         *s = '_';
8219                 s++;
8220         }
8221 }
8222
8223 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8224 {
8225         struct bpf_map *map;
8226         int err;
8227
8228         if (!obj)
8229                 return -ENOENT;
8230
8231         if (!obj->loaded) {
8232                 pr_warn("object not yet loaded; load it first\n");
8233                 return -ENOENT;
8234         }
8235
8236         bpf_object__for_each_map(map, obj) {
8237                 char *pin_path = NULL;
8238                 char buf[PATH_MAX];
8239
8240                 if (path) {
8241                         int len;
8242
8243                         len = snprintf(buf, PATH_MAX, "%s/%s", path,
8244                                        bpf_map__name(map));
8245                         if (len < 0) {
8246                                 err = -EINVAL;
8247                                 goto err_unpin_maps;
8248                         } else if (len >= PATH_MAX) {
8249                                 err = -ENAMETOOLONG;
8250                                 goto err_unpin_maps;
8251                         }
8252                         sanitize_pin_path(buf);
8253                         pin_path = buf;
8254                 } else if (!map->pin_path) {
8255                         continue;
8256                 }
8257
8258                 err = bpf_map__pin(map, pin_path);
8259                 if (err)
8260                         goto err_unpin_maps;
8261         }
8262
8263         return 0;
8264
8265 err_unpin_maps:
8266         while ((map = bpf_map__prev(map, obj))) {
8267                 if (!map->pin_path)
8268                         continue;
8269
8270                 bpf_map__unpin(map, NULL);
8271         }
8272
8273         return err;
8274 }
8275
8276 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8277 {
8278         struct bpf_map *map;
8279         int err;
8280
8281         if (!obj)
8282                 return -ENOENT;
8283
8284         bpf_object__for_each_map(map, obj) {
8285                 char *pin_path = NULL;
8286                 char buf[PATH_MAX];
8287
8288                 if (path) {
8289                         int len;
8290
8291                         len = snprintf(buf, PATH_MAX, "%s/%s", path,
8292                                        bpf_map__name(map));
8293                         if (len < 0)
8294                                 return -EINVAL;
8295                         else if (len >= PATH_MAX)
8296                                 return -ENAMETOOLONG;
8297                         sanitize_pin_path(buf);
8298                         pin_path = buf;
8299                 } else if (!map->pin_path) {
8300                         continue;
8301                 }
8302
8303                 err = bpf_map__unpin(map, pin_path);
8304                 if (err)
8305                         return err;
8306         }
8307
8308         return 0;
8309 }
8310
8311 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8312 {
8313         struct bpf_program *prog;
8314         int err;
8315
8316         if (!obj)
8317                 return -ENOENT;
8318
8319         if (!obj->loaded) {
8320                 pr_warn("object not yet loaded; load it first\n");
8321                 return -ENOENT;
8322         }
8323
8324         bpf_object__for_each_program(prog, obj) {
8325                 char buf[PATH_MAX];
8326                 int len;
8327
8328                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8329                                prog->pin_name);
8330                 if (len < 0) {
8331                         err = -EINVAL;
8332                         goto err_unpin_programs;
8333                 } else if (len >= PATH_MAX) {
8334                         err = -ENAMETOOLONG;
8335                         goto err_unpin_programs;
8336                 }
8337
8338                 err = bpf_program__pin(prog, buf);
8339                 if (err)
8340                         goto err_unpin_programs;
8341         }
8342
8343         return 0;
8344
8345 err_unpin_programs:
8346         while ((prog = bpf_program__prev(prog, obj))) {
8347                 char buf[PATH_MAX];
8348                 int len;
8349
8350                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8351                                prog->pin_name);
8352                 if (len < 0)
8353                         continue;
8354                 else if (len >= PATH_MAX)
8355                         continue;
8356
8357                 bpf_program__unpin(prog, buf);
8358         }
8359
8360         return err;
8361 }
8362
8363 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8364 {
8365         struct bpf_program *prog;
8366         int err;
8367
8368         if (!obj)
8369                 return -ENOENT;
8370
8371         bpf_object__for_each_program(prog, obj) {
8372                 char buf[PATH_MAX];
8373                 int len;
8374
8375                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8376                                prog->pin_name);
8377                 if (len < 0)
8378                         return -EINVAL;
8379                 else if (len >= PATH_MAX)
8380                         return -ENAMETOOLONG;
8381
8382                 err = bpf_program__unpin(prog, buf);
8383                 if (err)
8384                         return err;
8385         }
8386
8387         return 0;
8388 }
8389
8390 int bpf_object__pin(struct bpf_object *obj, const char *path)
8391 {
8392         int err;
8393
8394         err = bpf_object__pin_maps(obj, path);
8395         if (err)
8396                 return err;
8397
8398         err = bpf_object__pin_programs(obj, path);
8399         if (err) {
8400                 bpf_object__unpin_maps(obj, path);
8401                 return err;
8402         }
8403
8404         return 0;
8405 }
8406
8407 static void bpf_map__destroy(struct bpf_map *map)
8408 {
8409         if (map->clear_priv)
8410                 map->clear_priv(map, map->priv);
8411         map->priv = NULL;
8412         map->clear_priv = NULL;
8413
8414         if (map->inner_map) {
8415                 bpf_map__destroy(map->inner_map);
8416                 zfree(&map->inner_map);
8417         }
8418
8419         zfree(&map->init_slots);
8420         map->init_slots_sz = 0;
8421
8422         if (map->mmaped) {
8423                 munmap(map->mmaped, bpf_map_mmap_sz(map));
8424                 map->mmaped = NULL;
8425         }
8426
8427         if (map->st_ops) {
8428                 zfree(&map->st_ops->data);
8429                 zfree(&map->st_ops->progs);
8430                 zfree(&map->st_ops->kern_func_off);
8431                 zfree(&map->st_ops);
8432         }
8433
8434         zfree(&map->name);
8435         zfree(&map->pin_path);
8436
8437         if (map->fd >= 0)
8438                 zclose(map->fd);
8439 }
8440
8441 void bpf_object__close(struct bpf_object *obj)
8442 {
8443         size_t i;
8444
8445         if (IS_ERR_OR_NULL(obj))
8446                 return;
8447
8448         if (obj->clear_priv)
8449                 obj->clear_priv(obj, obj->priv);
8450
8451         bpf_object__elf_finish(obj);
8452         bpf_object__unload(obj);
8453         btf__free(obj->btf);
8454         btf_ext__free(obj->btf_ext);
8455
8456         for (i = 0; i < obj->nr_maps; i++)
8457                 bpf_map__destroy(&obj->maps[i]);
8458
8459         zfree(&obj->kconfig);
8460         zfree(&obj->externs);
8461         obj->nr_extern = 0;
8462
8463         zfree(&obj->maps);
8464         obj->nr_maps = 0;
8465
8466         if (obj->programs && obj->nr_programs) {
8467                 for (i = 0; i < obj->nr_programs; i++)
8468                         bpf_program__exit(&obj->programs[i]);
8469         }
8470         zfree(&obj->programs);
8471
8472         list_del(&obj->list);
8473         free(obj);
8474 }
8475
8476 struct bpf_object *
8477 bpf_object__next(struct bpf_object *prev)
8478 {
8479         struct bpf_object *next;
8480
8481         if (!prev)
8482                 next = list_first_entry(&bpf_objects_list,
8483                                         struct bpf_object,
8484                                         list);
8485         else
8486                 next = list_next_entry(prev, list);
8487
8488         /* Empty list is noticed here so don't need checking on entry. */
8489         if (&next->list == &bpf_objects_list)
8490                 return NULL;
8491
8492         return next;
8493 }
8494
8495 const char *bpf_object__name(const struct bpf_object *obj)
8496 {
8497         return obj ? obj->name : ERR_PTR(-EINVAL);
8498 }
8499
8500 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8501 {
8502         return obj ? obj->kern_version : 0;
8503 }
8504
8505 struct btf *bpf_object__btf(const struct bpf_object *obj)
8506 {
8507         return obj ? obj->btf : NULL;
8508 }
8509
8510 int bpf_object__btf_fd(const struct bpf_object *obj)
8511 {
8512         return obj->btf ? btf__fd(obj->btf) : -1;
8513 }
8514
8515 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8516 {
8517         if (obj->loaded)
8518                 return -EINVAL;
8519
8520         obj->kern_version = kern_version;
8521
8522         return 0;
8523 }
8524
8525 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8526                          bpf_object_clear_priv_t clear_priv)
8527 {
8528         if (obj->priv && obj->clear_priv)
8529                 obj->clear_priv(obj, obj->priv);
8530
8531         obj->priv = priv;
8532         obj->clear_priv = clear_priv;
8533         return 0;
8534 }
8535
8536 void *bpf_object__priv(const struct bpf_object *obj)
8537 {
8538         return obj ? obj->priv : ERR_PTR(-EINVAL);
8539 }
8540
8541 static struct bpf_program *
8542 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8543                     bool forward)
8544 {
8545         size_t nr_programs = obj->nr_programs;
8546         ssize_t idx;
8547
8548         if (!nr_programs)
8549                 return NULL;
8550
8551         if (!p)
8552                 /* Iter from the beginning */
8553                 return forward ? &obj->programs[0] :
8554                         &obj->programs[nr_programs - 1];
8555
8556         if (p->obj != obj) {
8557                 pr_warn("error: program handler doesn't match object\n");
8558                 return NULL;
8559         }
8560
8561         idx = (p - obj->programs) + (forward ? 1 : -1);
8562         if (idx >= obj->nr_programs || idx < 0)
8563                 return NULL;
8564         return &obj->programs[idx];
8565 }
8566
8567 struct bpf_program *
8568 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8569 {
8570         struct bpf_program *prog = prev;
8571
8572         do {
8573                 prog = __bpf_program__iter(prog, obj, true);
8574         } while (prog && prog_is_subprog(obj, prog));
8575
8576         return prog;
8577 }
8578
8579 struct bpf_program *
8580 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8581 {
8582         struct bpf_program *prog = next;
8583
8584         do {
8585                 prog = __bpf_program__iter(prog, obj, false);
8586         } while (prog && prog_is_subprog(obj, prog));
8587
8588         return prog;
8589 }
8590
8591 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8592                           bpf_program_clear_priv_t clear_priv)
8593 {
8594         if (prog->priv && prog->clear_priv)
8595                 prog->clear_priv(prog, prog->priv);
8596
8597         prog->priv = priv;
8598         prog->clear_priv = clear_priv;
8599         return 0;
8600 }
8601
8602 void *bpf_program__priv(const struct bpf_program *prog)
8603 {
8604         return prog ? prog->priv : ERR_PTR(-EINVAL);
8605 }
8606
8607 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8608 {
8609         prog->prog_ifindex = ifindex;
8610 }
8611
8612 const char *bpf_program__name(const struct bpf_program *prog)
8613 {
8614         return prog->name;
8615 }
8616
8617 const char *bpf_program__section_name(const struct bpf_program *prog)
8618 {
8619         return prog->sec_name;
8620 }
8621
8622 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8623 {
8624         const char *title;
8625
8626         title = prog->sec_name;
8627         if (needs_copy) {
8628                 title = strdup(title);
8629                 if (!title) {
8630                         pr_warn("failed to strdup program title\n");
8631                         return ERR_PTR(-ENOMEM);
8632                 }
8633         }
8634
8635         return title;
8636 }
8637
8638 bool bpf_program__autoload(const struct bpf_program *prog)
8639 {
8640         return prog->load;
8641 }
8642
8643 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8644 {
8645         if (prog->obj->loaded)
8646                 return -EINVAL;
8647
8648         prog->load = autoload;
8649         return 0;
8650 }
8651
8652 int bpf_program__fd(const struct bpf_program *prog)
8653 {
8654         return bpf_program__nth_fd(prog, 0);
8655 }
8656
8657 size_t bpf_program__size(const struct bpf_program *prog)
8658 {
8659         return prog->insns_cnt * BPF_INSN_SZ;
8660 }
8661
8662 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8663                           bpf_program_prep_t prep)
8664 {
8665         int *instances_fds;
8666
8667         if (nr_instances <= 0 || !prep)
8668                 return -EINVAL;
8669
8670         if (prog->instances.nr > 0 || prog->instances.fds) {
8671                 pr_warn("Can't set pre-processor after loading\n");
8672                 return -EINVAL;
8673         }
8674
8675         instances_fds = malloc(sizeof(int) * nr_instances);
8676         if (!instances_fds) {
8677                 pr_warn("alloc memory failed for fds\n");
8678                 return -ENOMEM;
8679         }
8680
8681         /* fill all fd with -1 */
8682         memset(instances_fds, -1, sizeof(int) * nr_instances);
8683
8684         prog->instances.nr = nr_instances;
8685         prog->instances.fds = instances_fds;
8686         prog->preprocessor = prep;
8687         return 0;
8688 }
8689
8690 int bpf_program__nth_fd(const struct bpf_program *prog, int n)
8691 {
8692         int fd;
8693
8694         if (!prog)
8695                 return -EINVAL;
8696
8697         if (n >= prog->instances.nr || n < 0) {
8698                 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8699                         n, prog->name, prog->instances.nr);
8700                 return -EINVAL;
8701         }
8702
8703         fd = prog->instances.fds[n];
8704         if (fd < 0) {
8705                 pr_warn("%dth instance of program '%s' is invalid\n",
8706                         n, prog->name);
8707                 return -ENOENT;
8708         }
8709
8710         return fd;
8711 }
8712
8713 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog)
8714 {
8715         return prog->type;
8716 }
8717
8718 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8719 {
8720         prog->type = type;
8721 }
8722
8723 static bool bpf_program__is_type(const struct bpf_program *prog,
8724                                  enum bpf_prog_type type)
8725 {
8726         return prog ? (prog->type == type) : false;
8727 }
8728
8729 #define BPF_PROG_TYPE_FNS(NAME, TYPE)                           \
8730 int bpf_program__set_##NAME(struct bpf_program *prog)           \
8731 {                                                               \
8732         if (!prog)                                              \
8733                 return -EINVAL;                                 \
8734         bpf_program__set_type(prog, TYPE);                      \
8735         return 0;                                               \
8736 }                                                               \
8737                                                                 \
8738 bool bpf_program__is_##NAME(const struct bpf_program *prog)     \
8739 {                                                               \
8740         return bpf_program__is_type(prog, TYPE);                \
8741 }                                                               \
8742
8743 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8744 BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8745 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8746 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8747 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8748 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8749 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8750 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8751 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8752 BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8753 BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8754 BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8755 BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8756
8757 enum bpf_attach_type
8758 bpf_program__get_expected_attach_type(const struct bpf_program *prog)
8759 {
8760         return prog->expected_attach_type;
8761 }
8762
8763 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8764                                            enum bpf_attach_type type)
8765 {
8766         prog->expected_attach_type = type;
8767 }
8768
8769 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional,           \
8770                           attachable, attach_btf)                           \
8771         {                                                                   \
8772                 .sec = string,                                              \
8773                 .len = sizeof(string) - 1,                                  \
8774                 .prog_type = ptype,                                         \
8775                 .expected_attach_type = eatype,                             \
8776                 .is_exp_attach_type_optional = eatype_optional,             \
8777                 .is_attachable = attachable,                                \
8778                 .is_attach_btf = attach_btf,                                \
8779         }
8780
8781 /* Programs that can NOT be attached. */
8782 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
8783
8784 /* Programs that can be attached. */
8785 #define BPF_APROG_SEC(string, ptype, atype) \
8786         BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
8787
8788 /* Programs that must specify expected attach type at load time. */
8789 #define BPF_EAPROG_SEC(string, ptype, eatype) \
8790         BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
8791
8792 /* Programs that use BTF to identify attach point */
8793 #define BPF_PROG_BTF(string, ptype, eatype) \
8794         BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
8795
8796 /* Programs that can be attached but attach type can't be identified by section
8797  * name. Kept for backward compatibility.
8798  */
8799 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
8800
8801 #define SEC_DEF(sec_pfx, ptype, ...) {                                      \
8802         .sec = sec_pfx,                                                     \
8803         .len = sizeof(sec_pfx) - 1,                                         \
8804         .prog_type = BPF_PROG_TYPE_##ptype,                                 \
8805         __VA_ARGS__                                                         \
8806 }
8807
8808 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
8809                                       struct bpf_program *prog);
8810 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
8811                                   struct bpf_program *prog);
8812 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
8813                                       struct bpf_program *prog);
8814 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
8815                                      struct bpf_program *prog);
8816 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
8817                                    struct bpf_program *prog);
8818 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
8819                                     struct bpf_program *prog);
8820
8821 static const struct bpf_sec_def section_defs[] = {
8822         BPF_PROG_SEC("socket",                  BPF_PROG_TYPE_SOCKET_FILTER),
8823         BPF_PROG_SEC("sk_reuseport",            BPF_PROG_TYPE_SK_REUSEPORT),
8824         SEC_DEF("kprobe/", KPROBE,
8825                 .attach_fn = attach_kprobe),
8826         BPF_PROG_SEC("uprobe/",                 BPF_PROG_TYPE_KPROBE),
8827         SEC_DEF("kretprobe/", KPROBE,
8828                 .attach_fn = attach_kprobe),
8829         BPF_PROG_SEC("uretprobe/",              BPF_PROG_TYPE_KPROBE),
8830         BPF_PROG_SEC("classifier",              BPF_PROG_TYPE_SCHED_CLS),
8831         BPF_PROG_SEC("action",                  BPF_PROG_TYPE_SCHED_ACT),
8832         SEC_DEF("tracepoint/", TRACEPOINT,
8833                 .attach_fn = attach_tp),
8834         SEC_DEF("tp/", TRACEPOINT,
8835                 .attach_fn = attach_tp),
8836         SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
8837                 .attach_fn = attach_raw_tp),
8838         SEC_DEF("raw_tp/", RAW_TRACEPOINT,
8839                 .attach_fn = attach_raw_tp),
8840         SEC_DEF("tp_btf/", TRACING,
8841                 .expected_attach_type = BPF_TRACE_RAW_TP,
8842                 .is_attach_btf = true,
8843                 .attach_fn = attach_trace),
8844         SEC_DEF("fentry/", TRACING,
8845                 .expected_attach_type = BPF_TRACE_FENTRY,
8846                 .is_attach_btf = true,
8847                 .attach_fn = attach_trace),
8848         SEC_DEF("fmod_ret/", TRACING,
8849                 .expected_attach_type = BPF_MODIFY_RETURN,
8850                 .is_attach_btf = true,
8851                 .attach_fn = attach_trace),
8852         SEC_DEF("fexit/", TRACING,
8853                 .expected_attach_type = BPF_TRACE_FEXIT,
8854                 .is_attach_btf = true,
8855                 .attach_fn = attach_trace),
8856         SEC_DEF("fentry.s/", TRACING,
8857                 .expected_attach_type = BPF_TRACE_FENTRY,
8858                 .is_attach_btf = true,
8859                 .is_sleepable = true,
8860                 .attach_fn = attach_trace),
8861         SEC_DEF("fmod_ret.s/", TRACING,
8862                 .expected_attach_type = BPF_MODIFY_RETURN,
8863                 .is_attach_btf = true,
8864                 .is_sleepable = true,
8865                 .attach_fn = attach_trace),
8866         SEC_DEF("fexit.s/", TRACING,
8867                 .expected_attach_type = BPF_TRACE_FEXIT,
8868                 .is_attach_btf = true,
8869                 .is_sleepable = true,
8870                 .attach_fn = attach_trace),
8871         SEC_DEF("freplace/", EXT,
8872                 .is_attach_btf = true,
8873                 .attach_fn = attach_trace),
8874         SEC_DEF("lsm/", LSM,
8875                 .is_attach_btf = true,
8876                 .expected_attach_type = BPF_LSM_MAC,
8877                 .attach_fn = attach_lsm),
8878         SEC_DEF("lsm.s/", LSM,
8879                 .is_attach_btf = true,
8880                 .is_sleepable = true,
8881                 .expected_attach_type = BPF_LSM_MAC,
8882                 .attach_fn = attach_lsm),
8883         SEC_DEF("iter/", TRACING,
8884                 .expected_attach_type = BPF_TRACE_ITER,
8885                 .is_attach_btf = true,
8886                 .attach_fn = attach_iter),
8887         BPF_EAPROG_SEC("xdp_devmap/",           BPF_PROG_TYPE_XDP,
8888                                                 BPF_XDP_DEVMAP),
8889         BPF_EAPROG_SEC("xdp_cpumap/",           BPF_PROG_TYPE_XDP,
8890                                                 BPF_XDP_CPUMAP),
8891         BPF_APROG_SEC("xdp",                    BPF_PROG_TYPE_XDP,
8892                                                 BPF_XDP),
8893         BPF_PROG_SEC("perf_event",              BPF_PROG_TYPE_PERF_EVENT),
8894         BPF_PROG_SEC("lwt_in",                  BPF_PROG_TYPE_LWT_IN),
8895         BPF_PROG_SEC("lwt_out",                 BPF_PROG_TYPE_LWT_OUT),
8896         BPF_PROG_SEC("lwt_xmit",                BPF_PROG_TYPE_LWT_XMIT),
8897         BPF_PROG_SEC("lwt_seg6local",           BPF_PROG_TYPE_LWT_SEG6LOCAL),
8898         BPF_APROG_SEC("cgroup_skb/ingress",     BPF_PROG_TYPE_CGROUP_SKB,
8899                                                 BPF_CGROUP_INET_INGRESS),
8900         BPF_APROG_SEC("cgroup_skb/egress",      BPF_PROG_TYPE_CGROUP_SKB,
8901                                                 BPF_CGROUP_INET_EGRESS),
8902         BPF_APROG_COMPAT("cgroup/skb",          BPF_PROG_TYPE_CGROUP_SKB),
8903         BPF_EAPROG_SEC("cgroup/sock_create",    BPF_PROG_TYPE_CGROUP_SOCK,
8904                                                 BPF_CGROUP_INET_SOCK_CREATE),
8905         BPF_EAPROG_SEC("cgroup/sock_release",   BPF_PROG_TYPE_CGROUP_SOCK,
8906                                                 BPF_CGROUP_INET_SOCK_RELEASE),
8907         BPF_APROG_SEC("cgroup/sock",            BPF_PROG_TYPE_CGROUP_SOCK,
8908                                                 BPF_CGROUP_INET_SOCK_CREATE),
8909         BPF_EAPROG_SEC("cgroup/post_bind4",     BPF_PROG_TYPE_CGROUP_SOCK,
8910                                                 BPF_CGROUP_INET4_POST_BIND),
8911         BPF_EAPROG_SEC("cgroup/post_bind6",     BPF_PROG_TYPE_CGROUP_SOCK,
8912                                                 BPF_CGROUP_INET6_POST_BIND),
8913         BPF_APROG_SEC("cgroup/dev",             BPF_PROG_TYPE_CGROUP_DEVICE,
8914                                                 BPF_CGROUP_DEVICE),
8915         BPF_APROG_SEC("sockops",                BPF_PROG_TYPE_SOCK_OPS,
8916                                                 BPF_CGROUP_SOCK_OPS),
8917         BPF_APROG_SEC("sk_skb/stream_parser",   BPF_PROG_TYPE_SK_SKB,
8918                                                 BPF_SK_SKB_STREAM_PARSER),
8919         BPF_APROG_SEC("sk_skb/stream_verdict",  BPF_PROG_TYPE_SK_SKB,
8920                                                 BPF_SK_SKB_STREAM_VERDICT),
8921         BPF_APROG_COMPAT("sk_skb",              BPF_PROG_TYPE_SK_SKB),
8922         BPF_APROG_SEC("sk_msg",                 BPF_PROG_TYPE_SK_MSG,
8923                                                 BPF_SK_MSG_VERDICT),
8924         BPF_APROG_SEC("lirc_mode2",             BPF_PROG_TYPE_LIRC_MODE2,
8925                                                 BPF_LIRC_MODE2),
8926         BPF_APROG_SEC("flow_dissector",         BPF_PROG_TYPE_FLOW_DISSECTOR,
8927                                                 BPF_FLOW_DISSECTOR),
8928         BPF_EAPROG_SEC("cgroup/bind4",          BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8929                                                 BPF_CGROUP_INET4_BIND),
8930         BPF_EAPROG_SEC("cgroup/bind6",          BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8931                                                 BPF_CGROUP_INET6_BIND),
8932         BPF_EAPROG_SEC("cgroup/connect4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8933                                                 BPF_CGROUP_INET4_CONNECT),
8934         BPF_EAPROG_SEC("cgroup/connect6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8935                                                 BPF_CGROUP_INET6_CONNECT),
8936         BPF_EAPROG_SEC("cgroup/sendmsg4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8937                                                 BPF_CGROUP_UDP4_SENDMSG),
8938         BPF_EAPROG_SEC("cgroup/sendmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8939                                                 BPF_CGROUP_UDP6_SENDMSG),
8940         BPF_EAPROG_SEC("cgroup/recvmsg4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8941                                                 BPF_CGROUP_UDP4_RECVMSG),
8942         BPF_EAPROG_SEC("cgroup/recvmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8943                                                 BPF_CGROUP_UDP6_RECVMSG),
8944         BPF_EAPROG_SEC("cgroup/getpeername4",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8945                                                 BPF_CGROUP_INET4_GETPEERNAME),
8946         BPF_EAPROG_SEC("cgroup/getpeername6",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8947                                                 BPF_CGROUP_INET6_GETPEERNAME),
8948         BPF_EAPROG_SEC("cgroup/getsockname4",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8949                                                 BPF_CGROUP_INET4_GETSOCKNAME),
8950         BPF_EAPROG_SEC("cgroup/getsockname6",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8951                                                 BPF_CGROUP_INET6_GETSOCKNAME),
8952         BPF_EAPROG_SEC("cgroup/sysctl",         BPF_PROG_TYPE_CGROUP_SYSCTL,
8953                                                 BPF_CGROUP_SYSCTL),
8954         BPF_EAPROG_SEC("cgroup/getsockopt",     BPF_PROG_TYPE_CGROUP_SOCKOPT,
8955                                                 BPF_CGROUP_GETSOCKOPT),
8956         BPF_EAPROG_SEC("cgroup/setsockopt",     BPF_PROG_TYPE_CGROUP_SOCKOPT,
8957                                                 BPF_CGROUP_SETSOCKOPT),
8958         BPF_PROG_SEC("struct_ops",              BPF_PROG_TYPE_STRUCT_OPS),
8959         BPF_EAPROG_SEC("sk_lookup/",            BPF_PROG_TYPE_SK_LOOKUP,
8960                                                 BPF_SK_LOOKUP),
8961 };
8962
8963 #undef BPF_PROG_SEC_IMPL
8964 #undef BPF_PROG_SEC
8965 #undef BPF_APROG_SEC
8966 #undef BPF_EAPROG_SEC
8967 #undef BPF_APROG_COMPAT
8968 #undef SEC_DEF
8969
8970 #define MAX_TYPE_NAME_SIZE 32
8971
8972 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8973 {
8974         int i, n = ARRAY_SIZE(section_defs);
8975
8976         for (i = 0; i < n; i++) {
8977                 if (strncmp(sec_name,
8978                             section_defs[i].sec, section_defs[i].len))
8979                         continue;
8980                 return &section_defs[i];
8981         }
8982         return NULL;
8983 }
8984
8985 static char *libbpf_get_type_names(bool attach_type)
8986 {
8987         int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8988         char *buf;
8989
8990         buf = malloc(len);
8991         if (!buf)
8992                 return NULL;
8993
8994         buf[0] = '\0';
8995         /* Forge string buf with all available names */
8996         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8997                 if (attach_type && !section_defs[i].is_attachable)
8998                         continue;
8999
9000                 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9001                         free(buf);
9002                         return NULL;
9003                 }
9004                 strcat(buf, " ");
9005                 strcat(buf, section_defs[i].sec);
9006         }
9007
9008         return buf;
9009 }
9010
9011 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9012                              enum bpf_attach_type *expected_attach_type)
9013 {
9014         const struct bpf_sec_def *sec_def;
9015         char *type_names;
9016
9017         if (!name)
9018                 return -EINVAL;
9019
9020         sec_def = find_sec_def(name);
9021         if (sec_def) {
9022                 *prog_type = sec_def->prog_type;
9023                 *expected_attach_type = sec_def->expected_attach_type;
9024                 return 0;
9025         }
9026
9027         pr_debug("failed to guess program type from ELF section '%s'\n", name);
9028         type_names = libbpf_get_type_names(false);
9029         if (type_names != NULL) {
9030                 pr_debug("supported section(type) names are:%s\n", type_names);
9031                 free(type_names);
9032         }
9033
9034         return -ESRCH;
9035 }
9036
9037 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9038                                                      size_t offset)
9039 {
9040         struct bpf_map *map;
9041         size_t i;
9042
9043         for (i = 0; i < obj->nr_maps; i++) {
9044                 map = &obj->maps[i];
9045                 if (!bpf_map__is_struct_ops(map))
9046                         continue;
9047                 if (map->sec_offset <= offset &&
9048                     offset - map->sec_offset < map->def.value_size)
9049                         return map;
9050         }
9051
9052         return NULL;
9053 }
9054
9055 /* Collect the reloc from ELF and populate the st_ops->progs[] */
9056 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9057                                             GElf_Shdr *shdr, Elf_Data *data)
9058 {
9059         const struct btf_member *member;
9060         struct bpf_struct_ops *st_ops;
9061         struct bpf_program *prog;
9062         unsigned int shdr_idx;
9063         const struct btf *btf;
9064         struct bpf_map *map;
9065         Elf_Data *symbols;
9066         unsigned int moff, insn_idx;
9067         const char *name;
9068         __u32 member_idx;
9069         GElf_Sym sym;
9070         GElf_Rel rel;
9071         int i, nrels;
9072
9073         symbols = obj->efile.symbols;
9074         btf = obj->btf;
9075         nrels = shdr->sh_size / shdr->sh_entsize;
9076         for (i = 0; i < nrels; i++) {
9077                 if (!gelf_getrel(data, i, &rel)) {
9078                         pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9079                         return -LIBBPF_ERRNO__FORMAT;
9080                 }
9081
9082                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
9083                         pr_warn("struct_ops reloc: symbol %zx not found\n",
9084                                 (size_t)GELF_R_SYM(rel.r_info));
9085                         return -LIBBPF_ERRNO__FORMAT;
9086                 }
9087
9088                 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
9089                 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
9090                 if (!map) {
9091                         pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
9092                                 (size_t)rel.r_offset);
9093                         return -EINVAL;
9094                 }
9095
9096                 moff = rel.r_offset - map->sec_offset;
9097                 shdr_idx = sym.st_shndx;
9098                 st_ops = map->st_ops;
9099                 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9100                          map->name,
9101                          (long long)(rel.r_info >> 32),
9102                          (long long)sym.st_value,
9103                          shdr_idx, (size_t)rel.r_offset,
9104                          map->sec_offset, sym.st_name, name);
9105
9106                 if (shdr_idx >= SHN_LORESERVE) {
9107                         pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
9108                                 map->name, (size_t)rel.r_offset, shdr_idx);
9109                         return -LIBBPF_ERRNO__RELOC;
9110                 }
9111                 if (sym.st_value % BPF_INSN_SZ) {
9112                         pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9113                                 map->name, (unsigned long long)sym.st_value);
9114                         return -LIBBPF_ERRNO__FORMAT;
9115                 }
9116                 insn_idx = sym.st_value / BPF_INSN_SZ;
9117
9118                 member = find_member_by_offset(st_ops->type, moff * 8);
9119                 if (!member) {
9120                         pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9121                                 map->name, moff);
9122                         return -EINVAL;
9123                 }
9124                 member_idx = member - btf_members(st_ops->type);
9125                 name = btf__name_by_offset(btf, member->name_off);
9126
9127                 if (!resolve_func_ptr(btf, member->type, NULL)) {
9128                         pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9129                                 map->name, name);
9130                         return -EINVAL;
9131                 }
9132
9133                 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9134                 if (!prog) {
9135                         pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9136                                 map->name, shdr_idx, name);
9137                         return -EINVAL;
9138                 }
9139
9140                 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
9141                         const struct bpf_sec_def *sec_def;
9142
9143                         sec_def = find_sec_def(prog->sec_name);
9144                         if (sec_def &&
9145                             sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
9146                                 /* for pr_warn */
9147                                 prog->type = sec_def->prog_type;
9148                                 goto invalid_prog;
9149                         }
9150
9151                         prog->type = BPF_PROG_TYPE_STRUCT_OPS;
9152                         prog->attach_btf_id = st_ops->type_id;
9153                         prog->expected_attach_type = member_idx;
9154                 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
9155                            prog->attach_btf_id != st_ops->type_id ||
9156                            prog->expected_attach_type != member_idx) {
9157                         goto invalid_prog;
9158                 }
9159                 st_ops->progs[member_idx] = prog;
9160         }
9161
9162         return 0;
9163
9164 invalid_prog:
9165         pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
9166                 map->name, prog->name, prog->sec_name, prog->type,
9167                 prog->attach_btf_id, prog->expected_attach_type, name);
9168         return -EINVAL;
9169 }
9170
9171 #define BTF_TRACE_PREFIX "btf_trace_"
9172 #define BTF_LSM_PREFIX "bpf_lsm_"
9173 #define BTF_ITER_PREFIX "bpf_iter_"
9174 #define BTF_MAX_NAME_SIZE 128
9175
9176 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9177                                    const char *name, __u32 kind)
9178 {
9179         char btf_type_name[BTF_MAX_NAME_SIZE];
9180         int ret;
9181
9182         ret = snprintf(btf_type_name, sizeof(btf_type_name),
9183                        "%s%s", prefix, name);
9184         /* snprintf returns the number of characters written excluding the
9185          * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9186          * indicates truncation.
9187          */
9188         if (ret < 0 || ret >= sizeof(btf_type_name))
9189                 return -ENAMETOOLONG;
9190         return btf__find_by_name_kind(btf, btf_type_name, kind);
9191 }
9192
9193 static inline int find_attach_btf_id(struct btf *btf, const char *name,
9194                                      enum bpf_attach_type attach_type)
9195 {
9196         int err;
9197
9198         if (attach_type == BPF_TRACE_RAW_TP)
9199                 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
9200                                               BTF_KIND_TYPEDEF);
9201         else if (attach_type == BPF_LSM_MAC)
9202                 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
9203                                               BTF_KIND_FUNC);
9204         else if (attach_type == BPF_TRACE_ITER)
9205                 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
9206                                               BTF_KIND_FUNC);
9207         else
9208                 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9209
9210         return err;
9211 }
9212
9213 int libbpf_find_vmlinux_btf_id(const char *name,
9214                                enum bpf_attach_type attach_type)
9215 {
9216         struct btf *btf;
9217         int err;
9218
9219         btf = libbpf_find_kernel_btf();
9220         if (IS_ERR(btf)) {
9221                 pr_warn("vmlinux BTF is not found\n");
9222                 return -EINVAL;
9223         }
9224
9225         err = find_attach_btf_id(btf, name, attach_type);
9226         if (err <= 0)
9227                 pr_warn("%s is not found in vmlinux BTF\n", name);
9228
9229         btf__free(btf);
9230         return err;
9231 }
9232
9233 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9234 {
9235         struct bpf_prog_info_linear *info_linear;
9236         struct bpf_prog_info *info;
9237         struct btf *btf = NULL;
9238         int err = -EINVAL;
9239
9240         info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
9241         if (IS_ERR_OR_NULL(info_linear)) {
9242                 pr_warn("failed get_prog_info_linear for FD %d\n",
9243                         attach_prog_fd);
9244                 return -EINVAL;
9245         }
9246         info = &info_linear->info;
9247         if (!info->btf_id) {
9248                 pr_warn("The target program doesn't have BTF\n");
9249                 goto out;
9250         }
9251         if (btf__get_from_id(info->btf_id, &btf)) {
9252                 pr_warn("Failed to get BTF of the program\n");
9253                 goto out;
9254         }
9255         err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9256         btf__free(btf);
9257         if (err <= 0) {
9258                 pr_warn("%s is not found in prog's BTF\n", name);
9259                 goto out;
9260         }
9261 out:
9262         free(info_linear);
9263         return err;
9264 }
9265
9266 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9267                               enum bpf_attach_type attach_type,
9268                               int *btf_obj_fd, int *btf_type_id)
9269 {
9270         int ret, i;
9271
9272         ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9273         if (ret > 0) {
9274                 *btf_obj_fd = 0; /* vmlinux BTF */
9275                 *btf_type_id = ret;
9276                 return 0;
9277         }
9278         if (ret != -ENOENT)
9279                 return ret;
9280
9281         ret = load_module_btfs(obj);
9282         if (ret)
9283                 return ret;
9284
9285         for (i = 0; i < obj->btf_module_cnt; i++) {
9286                 const struct module_btf *mod = &obj->btf_modules[i];
9287
9288                 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9289                 if (ret > 0) {
9290                         *btf_obj_fd = mod->fd;
9291                         *btf_type_id = ret;
9292                         return 0;
9293                 }
9294                 if (ret == -ENOENT)
9295                         continue;
9296
9297                 return ret;
9298         }
9299
9300         return -ESRCH;
9301 }
9302
9303 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
9304 {
9305         enum bpf_attach_type attach_type = prog->expected_attach_type;
9306         __u32 attach_prog_fd = prog->attach_prog_fd;
9307         const char *name = prog->sec_name, *attach_name;
9308         const struct bpf_sec_def *sec = NULL;
9309         int i, err;
9310
9311         if (!name)
9312                 return -EINVAL;
9313
9314         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9315                 if (!section_defs[i].is_attach_btf)
9316                         continue;
9317                 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9318                         continue;
9319
9320                 sec = &section_defs[i];
9321                 break;
9322         }
9323
9324         if (!sec) {
9325                 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
9326                 return -ESRCH;
9327         }
9328         attach_name = name + sec->len;
9329
9330         /* BPF program's BTF ID */
9331         if (attach_prog_fd) {
9332                 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9333                 if (err < 0) {
9334                         pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9335                                  attach_prog_fd, attach_name, err);
9336                         return err;
9337                 }
9338                 *btf_obj_fd = 0;
9339                 *btf_type_id = err;
9340                 return 0;
9341         }
9342
9343         /* kernel/module BTF ID */
9344         err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9345         if (err) {
9346                 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9347                 return err;
9348         }
9349         return 0;
9350 }
9351
9352 int libbpf_attach_type_by_name(const char *name,
9353                                enum bpf_attach_type *attach_type)
9354 {
9355         char *type_names;
9356         int i;
9357
9358         if (!name)
9359                 return -EINVAL;
9360
9361         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9362                 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9363                         continue;
9364                 if (!section_defs[i].is_attachable)
9365                         return -EINVAL;
9366                 *attach_type = section_defs[i].expected_attach_type;
9367                 return 0;
9368         }
9369         pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9370         type_names = libbpf_get_type_names(true);
9371         if (type_names != NULL) {
9372                 pr_debug("attachable section(type) names are:%s\n", type_names);
9373                 free(type_names);
9374         }
9375
9376         return -EINVAL;
9377 }
9378
9379 int bpf_map__fd(const struct bpf_map *map)
9380 {
9381         return map ? map->fd : -EINVAL;
9382 }
9383
9384 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9385 {
9386         return map ? &map->def : ERR_PTR(-EINVAL);
9387 }
9388
9389 const char *bpf_map__name(const struct bpf_map *map)
9390 {
9391         return map ? map->name : NULL;
9392 }
9393
9394 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9395 {
9396         return map->def.type;
9397 }
9398
9399 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9400 {
9401         if (map->fd >= 0)
9402                 return -EBUSY;
9403         map->def.type = type;
9404         return 0;
9405 }
9406
9407 __u32 bpf_map__map_flags(const struct bpf_map *map)
9408 {
9409         return map->def.map_flags;
9410 }
9411
9412 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9413 {
9414         if (map->fd >= 0)
9415                 return -EBUSY;
9416         map->def.map_flags = flags;
9417         return 0;
9418 }
9419
9420 __u32 bpf_map__numa_node(const struct bpf_map *map)
9421 {
9422         return map->numa_node;
9423 }
9424
9425 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9426 {
9427         if (map->fd >= 0)
9428                 return -EBUSY;
9429         map->numa_node = numa_node;
9430         return 0;
9431 }
9432
9433 __u32 bpf_map__key_size(const struct bpf_map *map)
9434 {
9435         return map->def.key_size;
9436 }
9437
9438 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9439 {
9440         if (map->fd >= 0)
9441                 return -EBUSY;
9442         map->def.key_size = size;
9443         return 0;
9444 }
9445
9446 __u32 bpf_map__value_size(const struct bpf_map *map)
9447 {
9448         return map->def.value_size;
9449 }
9450
9451 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9452 {
9453         if (map->fd >= 0)
9454                 return -EBUSY;
9455         map->def.value_size = size;
9456         return 0;
9457 }
9458
9459 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9460 {
9461         return map ? map->btf_key_type_id : 0;
9462 }
9463
9464 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9465 {
9466         return map ? map->btf_value_type_id : 0;
9467 }
9468
9469 int bpf_map__set_priv(struct bpf_map *map, void *priv,
9470                      bpf_map_clear_priv_t clear_priv)
9471 {
9472         if (!map)
9473                 return -EINVAL;
9474
9475         if (map->priv) {
9476                 if (map->clear_priv)
9477                         map->clear_priv(map, map->priv);
9478         }
9479
9480         map->priv = priv;
9481         map->clear_priv = clear_priv;
9482         return 0;
9483 }
9484
9485 void *bpf_map__priv(const struct bpf_map *map)
9486 {
9487         return map ? map->priv : ERR_PTR(-EINVAL);
9488 }
9489
9490 int bpf_map__set_initial_value(struct bpf_map *map,
9491                                const void *data, size_t size)
9492 {
9493         if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9494             size != map->def.value_size || map->fd >= 0)
9495                 return -EINVAL;
9496
9497         memcpy(map->mmaped, data, size);
9498         return 0;
9499 }
9500
9501 bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9502 {
9503         return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9504 }
9505
9506 bool bpf_map__is_internal(const struct bpf_map *map)
9507 {
9508         return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9509 }
9510
9511 __u32 bpf_map__ifindex(const struct bpf_map *map)
9512 {
9513         return map->map_ifindex;
9514 }
9515
9516 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9517 {
9518         if (map->fd >= 0)
9519                 return -EBUSY;
9520         map->map_ifindex = ifindex;
9521         return 0;
9522 }
9523
9524 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9525 {
9526         if (!bpf_map_type__is_map_in_map(map->def.type)) {
9527                 pr_warn("error: unsupported map type\n");
9528                 return -EINVAL;
9529         }
9530         if (map->inner_map_fd != -1) {
9531                 pr_warn("error: inner_map_fd already specified\n");
9532                 return -EINVAL;
9533         }
9534         zfree(&map->inner_map);
9535         map->inner_map_fd = fd;
9536         return 0;
9537 }
9538
9539 static struct bpf_map *
9540 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9541 {
9542         ssize_t idx;
9543         struct bpf_map *s, *e;
9544
9545         if (!obj || !obj->maps)
9546                 return NULL;
9547
9548         s = obj->maps;
9549         e = obj->maps + obj->nr_maps;
9550
9551         if ((m < s) || (m >= e)) {
9552                 pr_warn("error in %s: map handler doesn't belong to object\n",
9553                          __func__);
9554                 return NULL;
9555         }
9556
9557         idx = (m - obj->maps) + i;
9558         if (idx >= obj->nr_maps || idx < 0)
9559                 return NULL;
9560         return &obj->maps[idx];
9561 }
9562
9563 struct bpf_map *
9564 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9565 {
9566         if (prev == NULL)
9567                 return obj->maps;
9568
9569         return __bpf_map__iter(prev, obj, 1);
9570 }
9571
9572 struct bpf_map *
9573 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9574 {
9575         if (next == NULL) {
9576                 if (!obj->nr_maps)
9577                         return NULL;
9578                 return obj->maps + obj->nr_maps - 1;
9579         }
9580
9581         return __bpf_map__iter(next, obj, -1);
9582 }
9583
9584 struct bpf_map *
9585 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9586 {
9587         struct bpf_map *pos;
9588
9589         bpf_object__for_each_map(pos, obj) {
9590                 if (pos->name && !strcmp(pos->name, name))
9591                         return pos;
9592         }
9593         return NULL;
9594 }
9595
9596 int
9597 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9598 {
9599         return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9600 }
9601
9602 struct bpf_map *
9603 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9604 {
9605         return ERR_PTR(-ENOTSUP);
9606 }
9607
9608 long libbpf_get_error(const void *ptr)
9609 {
9610         return PTR_ERR_OR_ZERO(ptr);
9611 }
9612
9613 int bpf_prog_load(const char *file, enum bpf_prog_type type,
9614                   struct bpf_object **pobj, int *prog_fd)
9615 {
9616         struct bpf_prog_load_attr attr;
9617
9618         memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9619         attr.file = file;
9620         attr.prog_type = type;
9621         attr.expected_attach_type = 0;
9622
9623         return bpf_prog_load_xattr(&attr, pobj, prog_fd);
9624 }
9625
9626 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9627                         struct bpf_object **pobj, int *prog_fd)
9628 {
9629         struct bpf_object_open_attr open_attr = {};
9630         struct bpf_program *prog, *first_prog = NULL;
9631         struct bpf_object *obj;
9632         struct bpf_map *map;
9633         int err;
9634
9635         if (!attr)
9636                 return -EINVAL;
9637         if (!attr->file)
9638                 return -EINVAL;
9639
9640         open_attr.file = attr->file;
9641         open_attr.prog_type = attr->prog_type;
9642
9643         obj = bpf_object__open_xattr(&open_attr);
9644         if (IS_ERR_OR_NULL(obj))
9645                 return -ENOENT;
9646
9647         bpf_object__for_each_program(prog, obj) {
9648                 enum bpf_attach_type attach_type = attr->expected_attach_type;
9649                 /*
9650                  * to preserve backwards compatibility, bpf_prog_load treats
9651                  * attr->prog_type, if specified, as an override to whatever
9652                  * bpf_object__open guessed
9653                  */
9654                 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9655                         bpf_program__set_type(prog, attr->prog_type);
9656                         bpf_program__set_expected_attach_type(prog,
9657                                                               attach_type);
9658                 }
9659                 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9660                         /*
9661                          * we haven't guessed from section name and user
9662                          * didn't provide a fallback type, too bad...
9663                          */
9664                         bpf_object__close(obj);
9665                         return -EINVAL;
9666                 }
9667
9668                 prog->prog_ifindex = attr->ifindex;
9669                 prog->log_level = attr->log_level;
9670                 prog->prog_flags |= attr->prog_flags;
9671                 if (!first_prog)
9672                         first_prog = prog;
9673         }
9674
9675         bpf_object__for_each_map(map, obj) {
9676                 if (!bpf_map__is_offload_neutral(map))
9677                         map->map_ifindex = attr->ifindex;
9678         }
9679
9680         if (!first_prog) {
9681                 pr_warn("object file doesn't contain bpf program\n");
9682                 bpf_object__close(obj);
9683                 return -ENOENT;
9684         }
9685
9686         err = bpf_object__load(obj);
9687         if (err) {
9688                 bpf_object__close(obj);
9689                 return err;
9690         }
9691
9692         *pobj = obj;
9693         *prog_fd = bpf_program__fd(first_prog);
9694         return 0;
9695 }
9696
9697 struct bpf_link {
9698         int (*detach)(struct bpf_link *link);
9699         int (*destroy)(struct bpf_link *link);
9700         char *pin_path;         /* NULL, if not pinned */
9701         int fd;                 /* hook FD, -1 if not applicable */
9702         bool disconnected;
9703 };
9704
9705 /* Replace link's underlying BPF program with the new one */
9706 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9707 {
9708         return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9709 }
9710
9711 /* Release "ownership" of underlying BPF resource (typically, BPF program
9712  * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
9713  * link, when destructed through bpf_link__destroy() call won't attempt to
9714  * detach/unregisted that BPF resource. This is useful in situations where,
9715  * say, attached BPF program has to outlive userspace program that attached it
9716  * in the system. Depending on type of BPF program, though, there might be
9717  * additional steps (like pinning BPF program in BPF FS) necessary to ensure
9718  * exit of userspace program doesn't trigger automatic detachment and clean up
9719  * inside the kernel.
9720  */
9721 void bpf_link__disconnect(struct bpf_link *link)
9722 {
9723         link->disconnected = true;
9724 }
9725
9726 int bpf_link__destroy(struct bpf_link *link)
9727 {
9728         int err = 0;
9729
9730         if (IS_ERR_OR_NULL(link))
9731                 return 0;
9732
9733         if (!link->disconnected && link->detach)
9734                 err = link->detach(link);
9735         if (link->destroy)
9736                 link->destroy(link);
9737         if (link->pin_path)
9738                 free(link->pin_path);
9739         free(link);
9740
9741         return err;
9742 }
9743
9744 int bpf_link__fd(const struct bpf_link *link)
9745 {
9746         return link->fd;
9747 }
9748
9749 const char *bpf_link__pin_path(const struct bpf_link *link)
9750 {
9751         return link->pin_path;
9752 }
9753
9754 static int bpf_link__detach_fd(struct bpf_link *link)
9755 {
9756         return close(link->fd);
9757 }
9758
9759 struct bpf_link *bpf_link__open(const char *path)
9760 {
9761         struct bpf_link *link;
9762         int fd;
9763
9764         fd = bpf_obj_get(path);
9765         if (fd < 0) {
9766                 fd = -errno;
9767                 pr_warn("failed to open link at %s: %d\n", path, fd);
9768                 return ERR_PTR(fd);
9769         }
9770
9771         link = calloc(1, sizeof(*link));
9772         if (!link) {
9773                 close(fd);
9774                 return ERR_PTR(-ENOMEM);
9775         }
9776         link->detach = &bpf_link__detach_fd;
9777         link->fd = fd;
9778
9779         link->pin_path = strdup(path);
9780         if (!link->pin_path) {
9781                 bpf_link__destroy(link);
9782                 return ERR_PTR(-ENOMEM);
9783         }
9784
9785         return link;
9786 }
9787
9788 int bpf_link__detach(struct bpf_link *link)
9789 {
9790         return bpf_link_detach(link->fd) ? -errno : 0;
9791 }
9792
9793 int bpf_link__pin(struct bpf_link *link, const char *path)
9794 {
9795         int err;
9796
9797         if (link->pin_path)
9798                 return -EBUSY;
9799         err = make_parent_dir(path);
9800         if (err)
9801                 return err;
9802         err = check_path(path);
9803         if (err)
9804                 return err;
9805
9806         link->pin_path = strdup(path);
9807         if (!link->pin_path)
9808                 return -ENOMEM;
9809
9810         if (bpf_obj_pin(link->fd, link->pin_path)) {
9811                 err = -errno;
9812                 zfree(&link->pin_path);
9813                 return err;
9814         }
9815
9816         pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9817         return 0;
9818 }
9819
9820 int bpf_link__unpin(struct bpf_link *link)
9821 {
9822         int err;
9823
9824         if (!link->pin_path)
9825                 return -EINVAL;
9826
9827         err = unlink(link->pin_path);
9828         if (err != 0)
9829                 return -errno;
9830
9831         pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9832         zfree(&link->pin_path);
9833         return 0;
9834 }
9835
9836 static int bpf_link__detach_perf_event(struct bpf_link *link)
9837 {
9838         int err;
9839
9840         err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
9841         if (err)
9842                 err = -errno;
9843
9844         close(link->fd);
9845         return err;
9846 }
9847
9848 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
9849                                                 int pfd)
9850 {
9851         char errmsg[STRERR_BUFSIZE];
9852         struct bpf_link *link;
9853         int prog_fd, err;
9854
9855         if (pfd < 0) {
9856                 pr_warn("prog '%s': invalid perf event FD %d\n",
9857                         prog->name, pfd);
9858                 return ERR_PTR(-EINVAL);
9859         }
9860         prog_fd = bpf_program__fd(prog);
9861         if (prog_fd < 0) {
9862                 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9863                         prog->name);
9864                 return ERR_PTR(-EINVAL);
9865         }
9866
9867         link = calloc(1, sizeof(*link));
9868         if (!link)
9869                 return ERR_PTR(-ENOMEM);
9870         link->detach = &bpf_link__detach_perf_event;
9871         link->fd = pfd;
9872
9873         if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9874                 err = -errno;
9875                 free(link);
9876                 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
9877                         prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9878                 if (err == -EPROTO)
9879                         pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9880                                 prog->name, pfd);
9881                 return ERR_PTR(err);
9882         }
9883         if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9884                 err = -errno;
9885                 free(link);
9886                 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
9887                         prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9888                 return ERR_PTR(err);
9889         }
9890         return link;
9891 }
9892
9893 /*
9894  * this function is expected to parse integer in the range of [0, 2^31-1] from
9895  * given file using scanf format string fmt. If actual parsed value is
9896  * negative, the result might be indistinguishable from error
9897  */
9898 static int parse_uint_from_file(const char *file, const char *fmt)
9899 {
9900         char buf[STRERR_BUFSIZE];
9901         int err, ret;
9902         FILE *f;
9903
9904         f = fopen(file, "r");
9905         if (!f) {
9906                 err = -errno;
9907                 pr_debug("failed to open '%s': %s\n", file,
9908                          libbpf_strerror_r(err, buf, sizeof(buf)));
9909                 return err;
9910         }
9911         err = fscanf(f, fmt, &ret);
9912         if (err != 1) {
9913                 err = err == EOF ? -EIO : -errno;
9914                 pr_debug("failed to parse '%s': %s\n", file,
9915                         libbpf_strerror_r(err, buf, sizeof(buf)));
9916                 fclose(f);
9917                 return err;
9918         }
9919         fclose(f);
9920         return ret;
9921 }
9922
9923 static int determine_kprobe_perf_type(void)
9924 {
9925         const char *file = "/sys/bus/event_source/devices/kprobe/type";
9926
9927         return parse_uint_from_file(file, "%d\n");
9928 }
9929
9930 static int determine_uprobe_perf_type(void)
9931 {
9932         const char *file = "/sys/bus/event_source/devices/uprobe/type";
9933
9934         return parse_uint_from_file(file, "%d\n");
9935 }
9936
9937 static int determine_kprobe_retprobe_bit(void)
9938 {
9939         const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9940
9941         return parse_uint_from_file(file, "config:%d\n");
9942 }
9943
9944 static int determine_uprobe_retprobe_bit(void)
9945 {
9946         const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9947
9948         return parse_uint_from_file(file, "config:%d\n");
9949 }
9950
9951 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9952                                  uint64_t offset, int pid)
9953 {
9954         struct perf_event_attr attr = {};
9955         char errmsg[STRERR_BUFSIZE];
9956         int type, pfd, err;
9957
9958         type = uprobe ? determine_uprobe_perf_type()
9959                       : determine_kprobe_perf_type();
9960         if (type < 0) {
9961                 pr_warn("failed to determine %s perf type: %s\n",
9962                         uprobe ? "uprobe" : "kprobe",
9963                         libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9964                 return type;
9965         }
9966         if (retprobe) {
9967                 int bit = uprobe ? determine_uprobe_retprobe_bit()
9968                                  : determine_kprobe_retprobe_bit();
9969
9970                 if (bit < 0) {
9971                         pr_warn("failed to determine %s retprobe bit: %s\n",
9972                                 uprobe ? "uprobe" : "kprobe",
9973                                 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9974                         return bit;
9975                 }
9976                 attr.config |= 1 << bit;
9977         }
9978         attr.size = sizeof(attr);
9979         attr.type = type;
9980         attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
9981         attr.config2 = offset;           /* kprobe_addr or probe_offset */
9982
9983         /* pid filter is meaningful only for uprobes */
9984         pfd = syscall(__NR_perf_event_open, &attr,
9985                       pid < 0 ? -1 : pid /* pid */,
9986                       pid == -1 ? 0 : -1 /* cpu */,
9987                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9988         if (pfd < 0) {
9989                 err = -errno;
9990                 pr_warn("%s perf_event_open() failed: %s\n",
9991                         uprobe ? "uprobe" : "kprobe",
9992                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9993                 return err;
9994         }
9995         return pfd;
9996 }
9997
9998 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9999                                             bool retprobe,
10000                                             const char *func_name)
10001 {
10002         char errmsg[STRERR_BUFSIZE];
10003         struct bpf_link *link;
10004         int pfd, err;
10005
10006         pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
10007                                     0 /* offset */, -1 /* pid */);
10008         if (pfd < 0) {
10009                 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
10010                         prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
10011                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10012                 return ERR_PTR(pfd);
10013         }
10014         link = bpf_program__attach_perf_event(prog, pfd);
10015         if (IS_ERR(link)) {
10016                 close(pfd);
10017                 err = PTR_ERR(link);
10018                 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
10019                         prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
10020                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10021                 return link;
10022         }
10023         return link;
10024 }
10025
10026 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
10027                                       struct bpf_program *prog)
10028 {
10029         const char *func_name;
10030         bool retprobe;
10031
10032         func_name = prog->sec_name + sec->len;
10033         retprobe = strcmp(sec->sec, "kretprobe/") == 0;
10034
10035         return bpf_program__attach_kprobe(prog, retprobe, func_name);
10036 }
10037
10038 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
10039                                             bool retprobe, pid_t pid,
10040                                             const char *binary_path,
10041                                             size_t func_offset)
10042 {
10043         char errmsg[STRERR_BUFSIZE];
10044         struct bpf_link *link;
10045         int pfd, err;
10046
10047         pfd = perf_event_open_probe(true /* uprobe */, retprobe,
10048                                     binary_path, func_offset, pid);
10049         if (pfd < 0) {
10050                 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
10051                         prog->name, retprobe ? "uretprobe" : "uprobe",
10052                         binary_path, func_offset,
10053                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10054                 return ERR_PTR(pfd);
10055         }
10056         link = bpf_program__attach_perf_event(prog, pfd);
10057         if (IS_ERR(link)) {
10058                 close(pfd);
10059                 err = PTR_ERR(link);
10060                 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
10061                         prog->name, retprobe ? "uretprobe" : "uprobe",
10062                         binary_path, func_offset,
10063                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10064                 return link;
10065         }
10066         return link;
10067 }
10068
10069 static int determine_tracepoint_id(const char *tp_category,
10070                                    const char *tp_name)
10071 {
10072         char file[PATH_MAX];
10073         int ret;
10074
10075         ret = snprintf(file, sizeof(file),
10076                        "/sys/kernel/debug/tracing/events/%s/%s/id",
10077                        tp_category, tp_name);
10078         if (ret < 0)
10079                 return -errno;
10080         if (ret >= sizeof(file)) {
10081                 pr_debug("tracepoint %s/%s path is too long\n",
10082                          tp_category, tp_name);
10083                 return -E2BIG;
10084         }
10085         return parse_uint_from_file(file, "%d\n");
10086 }
10087
10088 static int perf_event_open_tracepoint(const char *tp_category,
10089                                       const char *tp_name)
10090 {
10091         struct perf_event_attr attr = {};
10092         char errmsg[STRERR_BUFSIZE];
10093         int tp_id, pfd, err;
10094
10095         tp_id = determine_tracepoint_id(tp_category, tp_name);
10096         if (tp_id < 0) {
10097                 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
10098                         tp_category, tp_name,
10099                         libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
10100                 return tp_id;
10101         }
10102
10103         attr.type = PERF_TYPE_TRACEPOINT;
10104         attr.size = sizeof(attr);
10105         attr.config = tp_id;
10106
10107         pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
10108                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10109         if (pfd < 0) {
10110                 err = -errno;
10111                 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
10112                         tp_category, tp_name,
10113                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10114                 return err;
10115         }
10116         return pfd;
10117 }
10118
10119 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
10120                                                 const char *tp_category,
10121                                                 const char *tp_name)
10122 {
10123         char errmsg[STRERR_BUFSIZE];
10124         struct bpf_link *link;
10125         int pfd, err;
10126
10127         pfd = perf_event_open_tracepoint(tp_category, tp_name);
10128         if (pfd < 0) {
10129                 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
10130                         prog->name, tp_category, tp_name,
10131                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10132                 return ERR_PTR(pfd);
10133         }
10134         link = bpf_program__attach_perf_event(prog, pfd);
10135         if (IS_ERR(link)) {
10136                 close(pfd);
10137                 err = PTR_ERR(link);
10138                 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
10139                         prog->name, tp_category, tp_name,
10140                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10141                 return link;
10142         }
10143         return link;
10144 }
10145
10146 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
10147                                   struct bpf_program *prog)
10148 {
10149         char *sec_name, *tp_cat, *tp_name;
10150         struct bpf_link *link;
10151
10152         sec_name = strdup(prog->sec_name);
10153         if (!sec_name)
10154                 return ERR_PTR(-ENOMEM);
10155
10156         /* extract "tp/<category>/<name>" */
10157         tp_cat = sec_name + sec->len;
10158         tp_name = strchr(tp_cat, '/');
10159         if (!tp_name) {
10160                 link = ERR_PTR(-EINVAL);
10161                 goto out;
10162         }
10163         *tp_name = '\0';
10164         tp_name++;
10165
10166         link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
10167 out:
10168         free(sec_name);
10169         return link;
10170 }
10171
10172 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
10173                                                     const char *tp_name)
10174 {
10175         char errmsg[STRERR_BUFSIZE];
10176         struct bpf_link *link;
10177         int prog_fd, pfd;
10178
10179         prog_fd = bpf_program__fd(prog);
10180         if (prog_fd < 0) {
10181                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10182                 return ERR_PTR(-EINVAL);
10183         }
10184
10185         link = calloc(1, sizeof(*link));
10186         if (!link)
10187                 return ERR_PTR(-ENOMEM);
10188         link->detach = &bpf_link__detach_fd;
10189
10190         pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
10191         if (pfd < 0) {
10192                 pfd = -errno;
10193                 free(link);
10194                 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
10195                         prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10196                 return ERR_PTR(pfd);
10197         }
10198         link->fd = pfd;
10199         return link;
10200 }
10201
10202 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
10203                                       struct bpf_program *prog)
10204 {
10205         const char *tp_name = prog->sec_name + sec->len;
10206
10207         return bpf_program__attach_raw_tracepoint(prog, tp_name);
10208 }
10209
10210 /* Common logic for all BPF program types that attach to a btf_id */
10211 static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
10212 {
10213         char errmsg[STRERR_BUFSIZE];
10214         struct bpf_link *link;
10215         int prog_fd, pfd;
10216
10217         prog_fd = bpf_program__fd(prog);
10218         if (prog_fd < 0) {
10219                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10220                 return ERR_PTR(-EINVAL);
10221         }
10222
10223         link = calloc(1, sizeof(*link));
10224         if (!link)
10225                 return ERR_PTR(-ENOMEM);
10226         link->detach = &bpf_link__detach_fd;
10227
10228         pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
10229         if (pfd < 0) {
10230                 pfd = -errno;
10231                 free(link);
10232                 pr_warn("prog '%s': failed to attach: %s\n",
10233                         prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10234                 return ERR_PTR(pfd);
10235         }
10236         link->fd = pfd;
10237         return (struct bpf_link *)link;
10238 }
10239
10240 struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
10241 {
10242         return bpf_program__attach_btf_id(prog);
10243 }
10244
10245 struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
10246 {
10247         return bpf_program__attach_btf_id(prog);
10248 }
10249
10250 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
10251                                      struct bpf_program *prog)
10252 {
10253         return bpf_program__attach_trace(prog);
10254 }
10255
10256 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
10257                                    struct bpf_program *prog)
10258 {
10259         return bpf_program__attach_lsm(prog);
10260 }
10261
10262 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
10263                                     struct bpf_program *prog)
10264 {
10265         return bpf_program__attach_iter(prog, NULL);
10266 }
10267
10268 static struct bpf_link *
10269 bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
10270                        const char *target_name)
10271 {
10272         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
10273                             .target_btf_id = btf_id);
10274         enum bpf_attach_type attach_type;
10275         char errmsg[STRERR_BUFSIZE];
10276         struct bpf_link *link;
10277         int prog_fd, link_fd;
10278
10279         prog_fd = bpf_program__fd(prog);
10280         if (prog_fd < 0) {
10281                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10282                 return ERR_PTR(-EINVAL);
10283         }
10284
10285         link = calloc(1, sizeof(*link));
10286         if (!link)
10287                 return ERR_PTR(-ENOMEM);
10288         link->detach = &bpf_link__detach_fd;
10289
10290         attach_type = bpf_program__get_expected_attach_type(prog);
10291         link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
10292         if (link_fd < 0) {
10293                 link_fd = -errno;
10294                 free(link);
10295                 pr_warn("prog '%s': failed to attach to %s: %s\n",
10296                         prog->name, target_name,
10297                         libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10298                 return ERR_PTR(link_fd);
10299         }
10300         link->fd = link_fd;
10301         return link;
10302 }
10303
10304 struct bpf_link *
10305 bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
10306 {
10307         return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
10308 }
10309
10310 struct bpf_link *
10311 bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
10312 {
10313         return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
10314 }
10315
10316 struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
10317 {
10318         /* target_fd/target_ifindex use the same field in LINK_CREATE */
10319         return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
10320 }
10321
10322 struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
10323                                               int target_fd,
10324                                               const char *attach_func_name)
10325 {
10326         int btf_id;
10327
10328         if (!!target_fd != !!attach_func_name) {
10329                 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10330                         prog->name);
10331                 return ERR_PTR(-EINVAL);
10332         }
10333
10334         if (prog->type != BPF_PROG_TYPE_EXT) {
10335                 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10336                         prog->name);
10337                 return ERR_PTR(-EINVAL);
10338         }
10339
10340         if (target_fd) {
10341                 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10342                 if (btf_id < 0)
10343                         return ERR_PTR(btf_id);
10344
10345                 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10346         } else {
10347                 /* no target, so use raw_tracepoint_open for compatibility
10348                  * with old kernels
10349                  */
10350                 return bpf_program__attach_trace(prog);
10351         }
10352 }
10353
10354 struct bpf_link *
10355 bpf_program__attach_iter(struct bpf_program *prog,
10356                          const struct bpf_iter_attach_opts *opts)
10357 {
10358         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10359         char errmsg[STRERR_BUFSIZE];
10360         struct bpf_link *link;
10361         int prog_fd, link_fd;
10362         __u32 target_fd = 0;
10363
10364         if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10365                 return ERR_PTR(-EINVAL);
10366
10367         link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10368         link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10369
10370         prog_fd = bpf_program__fd(prog);
10371         if (prog_fd < 0) {
10372                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10373                 return ERR_PTR(-EINVAL);
10374         }
10375
10376         link = calloc(1, sizeof(*link));
10377         if (!link)
10378                 return ERR_PTR(-ENOMEM);
10379         link->detach = &bpf_link__detach_fd;
10380
10381         link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10382                                   &link_create_opts);
10383         if (link_fd < 0) {
10384                 link_fd = -errno;
10385                 free(link);
10386                 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10387                         prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10388                 return ERR_PTR(link_fd);
10389         }
10390         link->fd = link_fd;
10391         return link;
10392 }
10393
10394 struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10395 {
10396         const struct bpf_sec_def *sec_def;
10397
10398         sec_def = find_sec_def(prog->sec_name);
10399         if (!sec_def || !sec_def->attach_fn)
10400                 return ERR_PTR(-ESRCH);
10401
10402         return sec_def->attach_fn(sec_def, prog);
10403 }
10404
10405 static int bpf_link__detach_struct_ops(struct bpf_link *link)
10406 {
10407         __u32 zero = 0;
10408
10409         if (bpf_map_delete_elem(link->fd, &zero))
10410                 return -errno;
10411
10412         return 0;
10413 }
10414
10415 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10416 {
10417         struct bpf_struct_ops *st_ops;
10418         struct bpf_link *link;
10419         __u32 i, zero = 0;
10420         int err;
10421
10422         if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10423                 return ERR_PTR(-EINVAL);
10424
10425         link = calloc(1, sizeof(*link));
10426         if (!link)
10427                 return ERR_PTR(-EINVAL);
10428
10429         st_ops = map->st_ops;
10430         for (i = 0; i < btf_vlen(st_ops->type); i++) {
10431                 struct bpf_program *prog = st_ops->progs[i];
10432                 void *kern_data;
10433                 int prog_fd;
10434
10435                 if (!prog)
10436                         continue;
10437
10438                 prog_fd = bpf_program__fd(prog);
10439                 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10440                 *(unsigned long *)kern_data = prog_fd;
10441         }
10442
10443         err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10444         if (err) {
10445                 err = -errno;
10446                 free(link);
10447                 return ERR_PTR(err);
10448         }
10449
10450         link->detach = bpf_link__detach_struct_ops;
10451         link->fd = map->fd;
10452
10453         return link;
10454 }
10455
10456 enum bpf_perf_event_ret
10457 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10458                            void **copy_mem, size_t *copy_size,
10459                            bpf_perf_event_print_t fn, void *private_data)
10460 {
10461         struct perf_event_mmap_page *header = mmap_mem;
10462         __u64 data_head = ring_buffer_read_head(header);
10463         __u64 data_tail = header->data_tail;
10464         void *base = ((__u8 *)header) + page_size;
10465         int ret = LIBBPF_PERF_EVENT_CONT;
10466         struct perf_event_header *ehdr;
10467         size_t ehdr_size;
10468
10469         while (data_head != data_tail) {
10470                 ehdr = base + (data_tail & (mmap_size - 1));
10471                 ehdr_size = ehdr->size;
10472
10473                 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10474                         void *copy_start = ehdr;
10475                         size_t len_first = base + mmap_size - copy_start;
10476                         size_t len_secnd = ehdr_size - len_first;
10477
10478                         if (*copy_size < ehdr_size) {
10479                                 free(*copy_mem);
10480                                 *copy_mem = malloc(ehdr_size);
10481                                 if (!*copy_mem) {
10482                                         *copy_size = 0;
10483                                         ret = LIBBPF_PERF_EVENT_ERROR;
10484                                         break;
10485                                 }
10486                                 *copy_size = ehdr_size;
10487                         }
10488
10489                         memcpy(*copy_mem, copy_start, len_first);
10490                         memcpy(*copy_mem + len_first, base, len_secnd);
10491                         ehdr = *copy_mem;
10492                 }
10493
10494                 ret = fn(ehdr, private_data);
10495                 data_tail += ehdr_size;
10496                 if (ret != LIBBPF_PERF_EVENT_CONT)
10497                         break;
10498         }
10499
10500         ring_buffer_write_tail(header, data_tail);
10501         return ret;
10502 }
10503
10504 struct perf_buffer;
10505
10506 struct perf_buffer_params {
10507         struct perf_event_attr *attr;
10508         /* if event_cb is specified, it takes precendence */
10509         perf_buffer_event_fn event_cb;
10510         /* sample_cb and lost_cb are higher-level common-case callbacks */
10511         perf_buffer_sample_fn sample_cb;
10512         perf_buffer_lost_fn lost_cb;
10513         void *ctx;
10514         int cpu_cnt;
10515         int *cpus;
10516         int *map_keys;
10517 };
10518
10519 struct perf_cpu_buf {
10520         struct perf_buffer *pb;
10521         void *base; /* mmap()'ed memory */
10522         void *buf; /* for reconstructing segmented data */
10523         size_t buf_size;
10524         int fd;
10525         int cpu;
10526         int map_key;
10527 };
10528
10529 struct perf_buffer {
10530         perf_buffer_event_fn event_cb;
10531         perf_buffer_sample_fn sample_cb;
10532         perf_buffer_lost_fn lost_cb;
10533         void *ctx; /* passed into callbacks */
10534
10535         size_t page_size;
10536         size_t mmap_size;
10537         struct perf_cpu_buf **cpu_bufs;
10538         struct epoll_event *events;
10539         int cpu_cnt; /* number of allocated CPU buffers */
10540         int epoll_fd; /* perf event FD */
10541         int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
10542 };
10543
10544 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10545                                       struct perf_cpu_buf *cpu_buf)
10546 {
10547         if (!cpu_buf)
10548                 return;
10549         if (cpu_buf->base &&
10550             munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10551                 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10552         if (cpu_buf->fd >= 0) {
10553                 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10554                 close(cpu_buf->fd);
10555         }
10556         free(cpu_buf->buf);
10557         free(cpu_buf);
10558 }
10559
10560 void perf_buffer__free(struct perf_buffer *pb)
10561 {
10562         int i;
10563
10564         if (IS_ERR_OR_NULL(pb))
10565                 return;
10566         if (pb->cpu_bufs) {
10567                 for (i = 0; i < pb->cpu_cnt; i++) {
10568                         struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10569
10570                         if (!cpu_buf)
10571                                 continue;
10572
10573                         bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10574                         perf_buffer__free_cpu_buf(pb, cpu_buf);
10575                 }
10576                 free(pb->cpu_bufs);
10577         }
10578         if (pb->epoll_fd >= 0)
10579                 close(pb->epoll_fd);
10580         free(pb->events);
10581         free(pb);
10582 }
10583
10584 static struct perf_cpu_buf *
10585 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10586                           int cpu, int map_key)
10587 {
10588         struct perf_cpu_buf *cpu_buf;
10589         char msg[STRERR_BUFSIZE];
10590         int err;
10591
10592         cpu_buf = calloc(1, sizeof(*cpu_buf));
10593         if (!cpu_buf)
10594                 return ERR_PTR(-ENOMEM);
10595
10596         cpu_buf->pb = pb;
10597         cpu_buf->cpu = cpu;
10598         cpu_buf->map_key = map_key;
10599
10600         cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
10601                               -1, PERF_FLAG_FD_CLOEXEC);
10602         if (cpu_buf->fd < 0) {
10603                 err = -errno;
10604                 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10605                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10606                 goto error;
10607         }
10608
10609         cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10610                              PROT_READ | PROT_WRITE, MAP_SHARED,
10611                              cpu_buf->fd, 0);
10612         if (cpu_buf->base == MAP_FAILED) {
10613                 cpu_buf->base = NULL;
10614                 err = -errno;
10615                 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10616                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10617                 goto error;
10618         }
10619
10620         if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10621                 err = -errno;
10622                 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10623                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10624                 goto error;
10625         }
10626
10627         return cpu_buf;
10628
10629 error:
10630         perf_buffer__free_cpu_buf(pb, cpu_buf);
10631         return (struct perf_cpu_buf *)ERR_PTR(err);
10632 }
10633
10634 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10635                                               struct perf_buffer_params *p);
10636
10637 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
10638                                      const struct perf_buffer_opts *opts)
10639 {
10640         struct perf_buffer_params p = {};
10641         struct perf_event_attr attr = { 0, };
10642
10643         attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10644         attr.type = PERF_TYPE_SOFTWARE;
10645         attr.sample_type = PERF_SAMPLE_RAW;
10646         attr.sample_period = 1;
10647         attr.wakeup_events = 1;
10648
10649         p.attr = &attr;
10650         p.sample_cb = opts ? opts->sample_cb : NULL;
10651         p.lost_cb = opts ? opts->lost_cb : NULL;
10652         p.ctx = opts ? opts->ctx : NULL;
10653
10654         return __perf_buffer__new(map_fd, page_cnt, &p);
10655 }
10656
10657 struct perf_buffer *
10658 perf_buffer__new_raw(int map_fd, size_t page_cnt,
10659                      const struct perf_buffer_raw_opts *opts)
10660 {
10661         struct perf_buffer_params p = {};
10662
10663         p.attr = opts->attr;
10664         p.event_cb = opts->event_cb;
10665         p.ctx = opts->ctx;
10666         p.cpu_cnt = opts->cpu_cnt;
10667         p.cpus = opts->cpus;
10668         p.map_keys = opts->map_keys;
10669
10670         return __perf_buffer__new(map_fd, page_cnt, &p);
10671 }
10672
10673 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10674                                               struct perf_buffer_params *p)
10675 {
10676         const char *online_cpus_file = "/sys/devices/system/cpu/online";
10677         struct bpf_map_info map;
10678         char msg[STRERR_BUFSIZE];
10679         struct perf_buffer *pb;
10680         bool *online = NULL;
10681         __u32 map_info_len;
10682         int err, i, j, n;
10683
10684         if (page_cnt & (page_cnt - 1)) {
10685                 pr_warn("page count should be power of two, but is %zu\n",
10686                         page_cnt);
10687                 return ERR_PTR(-EINVAL);
10688         }
10689
10690         /* best-effort sanity checks */
10691         memset(&map, 0, sizeof(map));
10692         map_info_len = sizeof(map);
10693         err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10694         if (err) {
10695                 err = -errno;
10696                 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
10697                  * -EBADFD, -EFAULT, or -E2BIG on real error
10698                  */
10699                 if (err != -EINVAL) {
10700                         pr_warn("failed to get map info for map FD %d: %s\n",
10701                                 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10702                         return ERR_PTR(err);
10703                 }
10704                 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10705                          map_fd);
10706         } else {
10707                 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10708                         pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10709                                 map.name);
10710                         return ERR_PTR(-EINVAL);
10711                 }
10712         }
10713
10714         pb = calloc(1, sizeof(*pb));
10715         if (!pb)
10716                 return ERR_PTR(-ENOMEM);
10717
10718         pb->event_cb = p->event_cb;
10719         pb->sample_cb = p->sample_cb;
10720         pb->lost_cb = p->lost_cb;
10721         pb->ctx = p->ctx;
10722
10723         pb->page_size = getpagesize();
10724         pb->mmap_size = pb->page_size * page_cnt;
10725         pb->map_fd = map_fd;
10726
10727         pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10728         if (pb->epoll_fd < 0) {
10729                 err = -errno;
10730                 pr_warn("failed to create epoll instance: %s\n",
10731                         libbpf_strerror_r(err, msg, sizeof(msg)));
10732                 goto error;
10733         }
10734
10735         if (p->cpu_cnt > 0) {
10736                 pb->cpu_cnt = p->cpu_cnt;
10737         } else {
10738                 pb->cpu_cnt = libbpf_num_possible_cpus();
10739                 if (pb->cpu_cnt < 0) {
10740                         err = pb->cpu_cnt;
10741                         goto error;
10742                 }
10743                 if (map.max_entries && map.max_entries < pb->cpu_cnt)
10744                         pb->cpu_cnt = map.max_entries;
10745         }
10746
10747         pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10748         if (!pb->events) {
10749                 err = -ENOMEM;
10750                 pr_warn("failed to allocate events: out of memory\n");
10751                 goto error;
10752         }
10753         pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10754         if (!pb->cpu_bufs) {
10755                 err = -ENOMEM;
10756                 pr_warn("failed to allocate buffers: out of memory\n");
10757                 goto error;
10758         }
10759
10760         err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10761         if (err) {
10762                 pr_warn("failed to get online CPU mask: %d\n", err);
10763                 goto error;
10764         }
10765
10766         for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
10767                 struct perf_cpu_buf *cpu_buf;
10768                 int cpu, map_key;
10769
10770                 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10771                 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10772
10773                 /* in case user didn't explicitly requested particular CPUs to
10774                  * be attached to, skip offline/not present CPUs
10775                  */
10776                 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10777                         continue;
10778
10779                 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10780                 if (IS_ERR(cpu_buf)) {
10781                         err = PTR_ERR(cpu_buf);
10782                         goto error;
10783                 }
10784
10785                 pb->cpu_bufs[j] = cpu_buf;
10786
10787                 err = bpf_map_update_elem(pb->map_fd, &map_key,
10788                                           &cpu_buf->fd, 0);
10789                 if (err) {
10790                         err = -errno;
10791                         pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10792                                 cpu, map_key, cpu_buf->fd,
10793                                 libbpf_strerror_r(err, msg, sizeof(msg)));
10794                         goto error;
10795                 }
10796
10797                 pb->events[j].events = EPOLLIN;
10798                 pb->events[j].data.ptr = cpu_buf;
10799                 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
10800                               &pb->events[j]) < 0) {
10801                         err = -errno;
10802                         pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10803                                 cpu, cpu_buf->fd,
10804                                 libbpf_strerror_r(err, msg, sizeof(msg)));
10805                         goto error;
10806                 }
10807                 j++;
10808         }
10809         pb->cpu_cnt = j;
10810         free(online);
10811
10812         return pb;
10813
10814 error:
10815         free(online);
10816         if (pb)
10817                 perf_buffer__free(pb);
10818         return ERR_PTR(err);
10819 }
10820
10821 struct perf_sample_raw {
10822         struct perf_event_header header;
10823         uint32_t size;
10824         char data[];
10825 };
10826
10827 struct perf_sample_lost {
10828         struct perf_event_header header;
10829         uint64_t id;
10830         uint64_t lost;
10831         uint64_t sample_id;
10832 };
10833
10834 static enum bpf_perf_event_ret
10835 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10836 {
10837         struct perf_cpu_buf *cpu_buf = ctx;
10838         struct perf_buffer *pb = cpu_buf->pb;
10839         void *data = e;
10840
10841         /* user wants full control over parsing perf event */
10842         if (pb->event_cb)
10843                 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10844
10845         switch (e->type) {
10846         case PERF_RECORD_SAMPLE: {
10847                 struct perf_sample_raw *s = data;
10848
10849                 if (pb->sample_cb)
10850                         pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10851                 break;
10852         }
10853         case PERF_RECORD_LOST: {
10854                 struct perf_sample_lost *s = data;
10855
10856                 if (pb->lost_cb)
10857                         pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10858                 break;
10859         }
10860         default:
10861                 pr_warn("unknown perf sample type %d\n", e->type);
10862                 return LIBBPF_PERF_EVENT_ERROR;
10863         }
10864         return LIBBPF_PERF_EVENT_CONT;
10865 }
10866
10867 static int perf_buffer__process_records(struct perf_buffer *pb,
10868                                         struct perf_cpu_buf *cpu_buf)
10869 {
10870         enum bpf_perf_event_ret ret;
10871
10872         ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10873                                          pb->page_size, &cpu_buf->buf,
10874                                          &cpu_buf->buf_size,
10875                                          perf_buffer__process_record, cpu_buf);
10876         if (ret != LIBBPF_PERF_EVENT_CONT)
10877                 return ret;
10878         return 0;
10879 }
10880
10881 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10882 {
10883         return pb->epoll_fd;
10884 }
10885
10886 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10887 {
10888         int i, cnt, err;
10889
10890         cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10891         for (i = 0; i < cnt; i++) {
10892                 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10893
10894                 err = perf_buffer__process_records(pb, cpu_buf);
10895                 if (err) {
10896                         pr_warn("error while processing records: %d\n", err);
10897                         return err;
10898                 }
10899         }
10900         return cnt < 0 ? -errno : cnt;
10901 }
10902
10903 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10904  * manager.
10905  */
10906 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10907 {
10908         return pb->cpu_cnt;
10909 }
10910
10911 /*
10912  * Return perf_event FD of a ring buffer in *buf_idx* slot of
10913  * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10914  * select()/poll()/epoll() Linux syscalls.
10915  */
10916 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10917 {
10918         struct perf_cpu_buf *cpu_buf;
10919
10920         if (buf_idx >= pb->cpu_cnt)
10921                 return -EINVAL;
10922
10923         cpu_buf = pb->cpu_bufs[buf_idx];
10924         if (!cpu_buf)
10925                 return -ENOENT;
10926
10927         return cpu_buf->fd;
10928 }
10929
10930 /*
10931  * Consume data from perf ring buffer corresponding to slot *buf_idx* in
10932  * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10933  * consume, do nothing and return success.
10934  * Returns:
10935  *   - 0 on success;
10936  *   - <0 on failure.
10937  */
10938 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10939 {
10940         struct perf_cpu_buf *cpu_buf;
10941
10942         if (buf_idx >= pb->cpu_cnt)
10943                 return -EINVAL;
10944
10945         cpu_buf = pb->cpu_bufs[buf_idx];
10946         if (!cpu_buf)
10947                 return -ENOENT;
10948
10949         return perf_buffer__process_records(pb, cpu_buf);
10950 }
10951
10952 int perf_buffer__consume(struct perf_buffer *pb)
10953 {
10954         int i, err;
10955
10956         for (i = 0; i < pb->cpu_cnt; i++) {
10957                 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10958
10959                 if (!cpu_buf)
10960                         continue;
10961
10962                 err = perf_buffer__process_records(pb, cpu_buf);
10963                 if (err) {
10964                         pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
10965                         return err;
10966                 }
10967         }
10968         return 0;
10969 }
10970
10971 struct bpf_prog_info_array_desc {
10972         int     array_offset;   /* e.g. offset of jited_prog_insns */
10973         int     count_offset;   /* e.g. offset of jited_prog_len */
10974         int     size_offset;    /* > 0: offset of rec size,
10975                                  * < 0: fix size of -size_offset
10976                                  */
10977 };
10978
10979 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10980         [BPF_PROG_INFO_JITED_INSNS] = {
10981                 offsetof(struct bpf_prog_info, jited_prog_insns),
10982                 offsetof(struct bpf_prog_info, jited_prog_len),
10983                 -1,
10984         },
10985         [BPF_PROG_INFO_XLATED_INSNS] = {
10986                 offsetof(struct bpf_prog_info, xlated_prog_insns),
10987                 offsetof(struct bpf_prog_info, xlated_prog_len),
10988                 -1,
10989         },
10990         [BPF_PROG_INFO_MAP_IDS] = {
10991                 offsetof(struct bpf_prog_info, map_ids),
10992                 offsetof(struct bpf_prog_info, nr_map_ids),
10993                 -(int)sizeof(__u32),
10994         },
10995         [BPF_PROG_INFO_JITED_KSYMS] = {
10996                 offsetof(struct bpf_prog_info, jited_ksyms),
10997                 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10998                 -(int)sizeof(__u64),
10999         },
11000         [BPF_PROG_INFO_JITED_FUNC_LENS] = {
11001                 offsetof(struct bpf_prog_info, jited_func_lens),
11002                 offsetof(struct bpf_prog_info, nr_jited_func_lens),
11003                 -(int)sizeof(__u32),
11004         },
11005         [BPF_PROG_INFO_FUNC_INFO] = {
11006                 offsetof(struct bpf_prog_info, func_info),
11007                 offsetof(struct bpf_prog_info, nr_func_info),
11008                 offsetof(struct bpf_prog_info, func_info_rec_size),
11009         },
11010         [BPF_PROG_INFO_LINE_INFO] = {
11011                 offsetof(struct bpf_prog_info, line_info),
11012                 offsetof(struct bpf_prog_info, nr_line_info),
11013                 offsetof(struct bpf_prog_info, line_info_rec_size),
11014         },
11015         [BPF_PROG_INFO_JITED_LINE_INFO] = {
11016                 offsetof(struct bpf_prog_info, jited_line_info),
11017                 offsetof(struct bpf_prog_info, nr_jited_line_info),
11018                 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
11019         },
11020         [BPF_PROG_INFO_PROG_TAGS] = {
11021                 offsetof(struct bpf_prog_info, prog_tags),
11022                 offsetof(struct bpf_prog_info, nr_prog_tags),
11023                 -(int)sizeof(__u8) * BPF_TAG_SIZE,
11024         },
11025
11026 };
11027
11028 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
11029                                            int offset)
11030 {
11031         __u32 *array = (__u32 *)info;
11032
11033         if (offset >= 0)
11034                 return array[offset / sizeof(__u32)];
11035         return -(int)offset;
11036 }
11037
11038 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
11039                                            int offset)
11040 {
11041         __u64 *array = (__u64 *)info;
11042
11043         if (offset >= 0)
11044                 return array[offset / sizeof(__u64)];
11045         return -(int)offset;
11046 }
11047
11048 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
11049                                          __u32 val)
11050 {
11051         __u32 *array = (__u32 *)info;
11052
11053         if (offset >= 0)
11054                 array[offset / sizeof(__u32)] = val;
11055 }
11056
11057 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
11058                                          __u64 val)
11059 {
11060         __u64 *array = (__u64 *)info;
11061
11062         if (offset >= 0)
11063                 array[offset / sizeof(__u64)] = val;
11064 }
11065
11066 struct bpf_prog_info_linear *
11067 bpf_program__get_prog_info_linear(int fd, __u64 arrays)
11068 {
11069         struct bpf_prog_info_linear *info_linear;
11070         struct bpf_prog_info info = {};
11071         __u32 info_len = sizeof(info);
11072         __u32 data_len = 0;
11073         int i, err;
11074         void *ptr;
11075
11076         if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
11077                 return ERR_PTR(-EINVAL);
11078
11079         /* step 1: get array dimensions */
11080         err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
11081         if (err) {
11082                 pr_debug("can't get prog info: %s", strerror(errno));
11083                 return ERR_PTR(-EFAULT);
11084         }
11085
11086         /* step 2: calculate total size of all arrays */
11087         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11088                 bool include_array = (arrays & (1UL << i)) > 0;
11089                 struct bpf_prog_info_array_desc *desc;
11090                 __u32 count, size;
11091
11092                 desc = bpf_prog_info_array_desc + i;
11093
11094                 /* kernel is too old to support this field */
11095                 if (info_len < desc->array_offset + sizeof(__u32) ||
11096                     info_len < desc->count_offset + sizeof(__u32) ||
11097                     (desc->size_offset > 0 && info_len < desc->size_offset))
11098                         include_array = false;
11099
11100                 if (!include_array) {
11101                         arrays &= ~(1UL << i);  /* clear the bit */
11102                         continue;
11103                 }
11104
11105                 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11106                 size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11107
11108                 data_len += count * size;
11109         }
11110
11111         /* step 3: allocate continuous memory */
11112         data_len = roundup(data_len, sizeof(__u64));
11113         info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
11114         if (!info_linear)
11115                 return ERR_PTR(-ENOMEM);
11116
11117         /* step 4: fill data to info_linear->info */
11118         info_linear->arrays = arrays;
11119         memset(&info_linear->info, 0, sizeof(info));
11120         ptr = info_linear->data;
11121
11122         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11123                 struct bpf_prog_info_array_desc *desc;
11124                 __u32 count, size;
11125
11126                 if ((arrays & (1UL << i)) == 0)
11127                         continue;
11128
11129                 desc  = bpf_prog_info_array_desc + i;
11130                 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11131                 size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11132                 bpf_prog_info_set_offset_u32(&info_linear->info,
11133                                              desc->count_offset, count);
11134                 bpf_prog_info_set_offset_u32(&info_linear->info,
11135                                              desc->size_offset, size);
11136                 bpf_prog_info_set_offset_u64(&info_linear->info,
11137                                              desc->array_offset,
11138                                              ptr_to_u64(ptr));
11139                 ptr += count * size;
11140         }
11141
11142         /* step 5: call syscall again to get required arrays */
11143         err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
11144         if (err) {
11145                 pr_debug("can't get prog info: %s", strerror(errno));
11146                 free(info_linear);
11147                 return ERR_PTR(-EFAULT);
11148         }
11149
11150         /* step 6: verify the data */
11151         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11152                 struct bpf_prog_info_array_desc *desc;
11153                 __u32 v1, v2;
11154
11155                 if ((arrays & (1UL << i)) == 0)
11156                         continue;
11157
11158                 desc = bpf_prog_info_array_desc + i;
11159                 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11160                 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11161                                                    desc->count_offset);
11162                 if (v1 != v2)
11163                         pr_warn("%s: mismatch in element count\n", __func__);
11164
11165                 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11166                 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11167                                                    desc->size_offset);
11168                 if (v1 != v2)
11169                         pr_warn("%s: mismatch in rec size\n", __func__);
11170         }
11171
11172         /* step 7: update info_len and data_len */
11173         info_linear->info_len = sizeof(struct bpf_prog_info);
11174         info_linear->data_len = data_len;
11175
11176         return info_linear;
11177 }
11178
11179 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
11180 {
11181         int i;
11182
11183         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11184                 struct bpf_prog_info_array_desc *desc;
11185                 __u64 addr, offs;
11186
11187                 if ((info_linear->arrays & (1UL << i)) == 0)
11188                         continue;
11189
11190                 desc = bpf_prog_info_array_desc + i;
11191                 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
11192                                                      desc->array_offset);
11193                 offs = addr - ptr_to_u64(info_linear->data);
11194                 bpf_prog_info_set_offset_u64(&info_linear->info,
11195                                              desc->array_offset, offs);
11196         }
11197 }
11198
11199 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
11200 {
11201         int i;
11202
11203         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11204                 struct bpf_prog_info_array_desc *desc;
11205                 __u64 addr, offs;
11206
11207                 if ((info_linear->arrays & (1UL << i)) == 0)
11208                         continue;
11209
11210                 desc = bpf_prog_info_array_desc + i;
11211                 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
11212                                                      desc->array_offset);
11213                 addr = offs + ptr_to_u64(info_linear->data);
11214                 bpf_prog_info_set_offset_u64(&info_linear->info,
11215                                              desc->array_offset, addr);
11216         }
11217 }
11218
11219 int bpf_program__set_attach_target(struct bpf_program *prog,
11220                                    int attach_prog_fd,
11221                                    const char *attach_func_name)
11222 {
11223         int btf_obj_fd = 0, btf_id = 0, err;
11224
11225         if (!prog || attach_prog_fd < 0 || !attach_func_name)
11226                 return -EINVAL;
11227
11228         if (prog->obj->loaded)
11229                 return -EINVAL;
11230
11231         if (attach_prog_fd) {
11232                 btf_id = libbpf_find_prog_btf_id(attach_func_name,
11233                                                  attach_prog_fd);
11234                 if (btf_id < 0)
11235                         return btf_id;
11236         } else {
11237                 /* load btf_vmlinux, if not yet */
11238                 err = bpf_object__load_vmlinux_btf(prog->obj, true);
11239                 if (err)
11240                         return err;
11241                 err = find_kernel_btf_id(prog->obj, attach_func_name,
11242                                          prog->expected_attach_type,
11243                                          &btf_obj_fd, &btf_id);
11244                 if (err)
11245                         return err;
11246         }
11247
11248         prog->attach_btf_id = btf_id;
11249         prog->attach_btf_obj_fd = btf_obj_fd;
11250         prog->attach_prog_fd = attach_prog_fd;
11251         return 0;
11252 }
11253
11254 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
11255 {
11256         int err = 0, n, len, start, end = -1;
11257         bool *tmp;
11258
11259         *mask = NULL;
11260         *mask_sz = 0;
11261
11262         /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
11263         while (*s) {
11264                 if (*s == ',' || *s == '\n') {
11265                         s++;
11266                         continue;
11267                 }
11268                 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
11269                 if (n <= 0 || n > 2) {
11270                         pr_warn("Failed to get CPU range %s: %d\n", s, n);
11271                         err = -EINVAL;
11272                         goto cleanup;
11273                 } else if (n == 1) {
11274                         end = start;
11275                 }
11276                 if (start < 0 || start > end) {
11277                         pr_warn("Invalid CPU range [%d,%d] in %s\n",
11278                                 start, end, s);
11279                         err = -EINVAL;
11280                         goto cleanup;
11281                 }
11282                 tmp = realloc(*mask, end + 1);
11283                 if (!tmp) {
11284                         err = -ENOMEM;
11285                         goto cleanup;
11286                 }
11287                 *mask = tmp;
11288                 memset(tmp + *mask_sz, 0, start - *mask_sz);
11289                 memset(tmp + start, 1, end - start + 1);
11290                 *mask_sz = end + 1;
11291                 s += len;
11292         }
11293         if (!*mask_sz) {
11294                 pr_warn("Empty CPU range\n");
11295                 return -EINVAL;
11296         }
11297         return 0;
11298 cleanup:
11299         free(*mask);
11300         *mask = NULL;
11301         return err;
11302 }
11303
11304 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
11305 {
11306         int fd, err = 0, len;
11307         char buf[128];
11308
11309         fd = open(fcpu, O_RDONLY);
11310         if (fd < 0) {
11311                 err = -errno;
11312                 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
11313                 return err;
11314         }
11315         len = read(fd, buf, sizeof(buf));
11316         close(fd);
11317         if (len <= 0) {
11318                 err = len ? -errno : -EINVAL;
11319                 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
11320                 return err;
11321         }
11322         if (len >= sizeof(buf)) {
11323                 pr_warn("CPU mask is too big in file %s\n", fcpu);
11324                 return -E2BIG;
11325         }
11326         buf[len] = '\0';
11327
11328         return parse_cpu_mask_str(buf, mask, mask_sz);
11329 }
11330
11331 int libbpf_num_possible_cpus(void)
11332 {
11333         static const char *fcpu = "/sys/devices/system/cpu/possible";
11334         static int cpus;
11335         int err, n, i, tmp_cpus;
11336         bool *mask;
11337
11338         tmp_cpus = READ_ONCE(cpus);
11339         if (tmp_cpus > 0)
11340                 return tmp_cpus;
11341
11342         err = parse_cpu_mask_file(fcpu, &mask, &n);
11343         if (err)
11344                 return err;
11345
11346         tmp_cpus = 0;
11347         for (i = 0; i < n; i++) {
11348                 if (mask[i])
11349                         tmp_cpus++;
11350         }
11351         free(mask);
11352
11353         WRITE_ONCE(cpus, tmp_cpus);
11354         return tmp_cpus;
11355 }
11356
11357 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11358                               const struct bpf_object_open_opts *opts)
11359 {
11360         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11361                 .object_name = s->name,
11362         );
11363         struct bpf_object *obj;
11364         int i;
11365
11366         /* Attempt to preserve opts->object_name, unless overriden by user
11367          * explicitly. Overwriting object name for skeletons is discouraged,
11368          * as it breaks global data maps, because they contain object name
11369          * prefix as their own map name prefix. When skeleton is generated,
11370          * bpftool is making an assumption that this name will stay the same.
11371          */
11372         if (opts) {
11373                 memcpy(&skel_opts, opts, sizeof(*opts));
11374                 if (!opts->object_name)
11375                         skel_opts.object_name = s->name;
11376         }
11377
11378         obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11379         if (IS_ERR(obj)) {
11380                 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
11381                         s->name, PTR_ERR(obj));
11382                 return PTR_ERR(obj);
11383         }
11384
11385         *s->obj = obj;
11386
11387         for (i = 0; i < s->map_cnt; i++) {
11388                 struct bpf_map **map = s->maps[i].map;
11389                 const char *name = s->maps[i].name;
11390                 void **mmaped = s->maps[i].mmaped;
11391
11392                 *map = bpf_object__find_map_by_name(obj, name);
11393                 if (!*map) {
11394                         pr_warn("failed to find skeleton map '%s'\n", name);
11395                         return -ESRCH;
11396                 }
11397
11398                 /* externs shouldn't be pre-setup from user code */
11399                 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11400                         *mmaped = (*map)->mmaped;
11401         }
11402
11403         for (i = 0; i < s->prog_cnt; i++) {
11404                 struct bpf_program **prog = s->progs[i].prog;
11405                 const char *name = s->progs[i].name;
11406
11407                 *prog = bpf_object__find_program_by_name(obj, name);
11408                 if (!*prog) {
11409                         pr_warn("failed to find skeleton program '%s'\n", name);
11410                         return -ESRCH;
11411                 }
11412         }
11413
11414         return 0;
11415 }
11416
11417 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11418 {
11419         int i, err;
11420
11421         err = bpf_object__load(*s->obj);
11422         if (err) {
11423                 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11424                 return err;
11425         }
11426
11427         for (i = 0; i < s->map_cnt; i++) {
11428                 struct bpf_map *map = *s->maps[i].map;
11429                 size_t mmap_sz = bpf_map_mmap_sz(map);
11430                 int prot, map_fd = bpf_map__fd(map);
11431                 void **mmaped = s->maps[i].mmaped;
11432
11433                 if (!mmaped)
11434                         continue;
11435
11436                 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11437                         *mmaped = NULL;
11438                         continue;
11439                 }
11440
11441                 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11442                         prot = PROT_READ;
11443                 else
11444                         prot = PROT_READ | PROT_WRITE;
11445
11446                 /* Remap anonymous mmap()-ed "map initialization image" as
11447                  * a BPF map-backed mmap()-ed memory, but preserving the same
11448                  * memory address. This will cause kernel to change process'
11449                  * page table to point to a different piece of kernel memory,
11450                  * but from userspace point of view memory address (and its
11451                  * contents, being identical at this point) will stay the
11452                  * same. This mapping will be released by bpf_object__close()
11453                  * as per normal clean up procedure, so we don't need to worry
11454                  * about it from skeleton's clean up perspective.
11455                  */
11456                 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11457                                 MAP_SHARED | MAP_FIXED, map_fd, 0);
11458                 if (*mmaped == MAP_FAILED) {
11459                         err = -errno;
11460                         *mmaped = NULL;
11461                         pr_warn("failed to re-mmap() map '%s': %d\n",
11462                                  bpf_map__name(map), err);
11463                         return err;
11464                 }
11465         }
11466
11467         return 0;
11468 }
11469
11470 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11471 {
11472         int i;
11473
11474         for (i = 0; i < s->prog_cnt; i++) {
11475                 struct bpf_program *prog = *s->progs[i].prog;
11476                 struct bpf_link **link = s->progs[i].link;
11477                 const struct bpf_sec_def *sec_def;
11478
11479                 if (!prog->load)
11480                         continue;
11481
11482                 sec_def = find_sec_def(prog->sec_name);
11483                 if (!sec_def || !sec_def->attach_fn)
11484                         continue;
11485
11486                 *link = sec_def->attach_fn(sec_def, prog);
11487                 if (IS_ERR(*link)) {
11488                         pr_warn("failed to auto-attach program '%s': %ld\n",
11489                                 bpf_program__name(prog), PTR_ERR(*link));
11490                         return PTR_ERR(*link);
11491                 }
11492         }
11493
11494         return 0;
11495 }
11496
11497 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11498 {
11499         int i;
11500
11501         for (i = 0; i < s->prog_cnt; i++) {
11502                 struct bpf_link **link = s->progs[i].link;
11503
11504                 bpf_link__destroy(*link);
11505                 *link = NULL;
11506         }
11507 }
11508
11509 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11510 {
11511         if (s->progs)
11512                 bpf_object__detach_skeleton(s);
11513         if (s->obj)
11514                 bpf_object__close(*s->obj);
11515         free(s->maps);
11516         free(s->progs);
11517         free(s);
11518 }