Merge tag 'vfio-v5.12-rc6' of git://github.com/awilliam/linux-vfio
[linux-2.6-microblaze.git] / tools / lib / bpf / libbpf.c
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  * Copyright (C) 2019 Isovalent, Inc.
11  */
12
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/list.h>
35 #include <linux/limits.h>
36 #include <linux/perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <linux/version.h>
39 #include <sys/epoll.h>
40 #include <sys/ioctl.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/types.h>
44 #include <sys/vfs.h>
45 #include <sys/utsname.h>
46 #include <sys/resource.h>
47 #include <libelf.h>
48 #include <gelf.h>
49 #include <zlib.h>
50
51 #include "libbpf.h"
52 #include "bpf.h"
53 #include "btf.h"
54 #include "str_error.h"
55 #include "libbpf_internal.h"
56 #include "hashmap.h"
57
58 #ifndef EM_BPF
59 #define EM_BPF 247
60 #endif
61
62 #ifndef BPF_FS_MAGIC
63 #define BPF_FS_MAGIC            0xcafe4a11
64 #endif
65
66 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
67
68 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
69  * compilation if user enables corresponding warning. Disable it explicitly.
70  */
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
72
73 #define __printf(a, b)  __attribute__((format(printf, a, b)))
74
75 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76 static const struct btf_type *
77 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
78
79 static int __base_pr(enum libbpf_print_level level, const char *format,
80                      va_list args)
81 {
82         if (level == LIBBPF_DEBUG)
83                 return 0;
84
85         return vfprintf(stderr, format, args);
86 }
87
88 static libbpf_print_fn_t __libbpf_pr = __base_pr;
89
90 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
91 {
92         libbpf_print_fn_t old_print_fn = __libbpf_pr;
93
94         __libbpf_pr = fn;
95         return old_print_fn;
96 }
97
98 __printf(2, 3)
99 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
100 {
101         va_list args;
102
103         if (!__libbpf_pr)
104                 return;
105
106         va_start(args, format);
107         __libbpf_pr(level, format, args);
108         va_end(args);
109 }
110
111 static void pr_perm_msg(int err)
112 {
113         struct rlimit limit;
114         char buf[100];
115
116         if (err != -EPERM || geteuid() != 0)
117                 return;
118
119         err = getrlimit(RLIMIT_MEMLOCK, &limit);
120         if (err)
121                 return;
122
123         if (limit.rlim_cur == RLIM_INFINITY)
124                 return;
125
126         if (limit.rlim_cur < 1024)
127                 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
128         else if (limit.rlim_cur < 1024*1024)
129                 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
130         else
131                 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
132
133         pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
134                 buf);
135 }
136
137 #define STRERR_BUFSIZE  128
138
139 /* Copied from tools/perf/util/util.h */
140 #ifndef zfree
141 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
142 #endif
143
144 #ifndef zclose
145 # define zclose(fd) ({                  \
146         int ___err = 0;                 \
147         if ((fd) >= 0)                  \
148                 ___err = close((fd));   \
149         fd = -1;                        \
150         ___err; })
151 #endif
152
153 static inline __u64 ptr_to_u64(const void *ptr)
154 {
155         return (__u64) (unsigned long) ptr;
156 }
157
158 enum kern_feature_id {
159         /* v4.14: kernel support for program & map names. */
160         FEAT_PROG_NAME,
161         /* v5.2: kernel support for global data sections. */
162         FEAT_GLOBAL_DATA,
163         /* BTF support */
164         FEAT_BTF,
165         /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
166         FEAT_BTF_FUNC,
167         /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
168         FEAT_BTF_DATASEC,
169         /* BTF_FUNC_GLOBAL is supported */
170         FEAT_BTF_GLOBAL_FUNC,
171         /* BPF_F_MMAPABLE is supported for arrays */
172         FEAT_ARRAY_MMAP,
173         /* kernel support for expected_attach_type in BPF_PROG_LOAD */
174         FEAT_EXP_ATTACH_TYPE,
175         /* bpf_probe_read_{kernel,user}[_str] helpers */
176         FEAT_PROBE_READ_KERN,
177         /* BPF_PROG_BIND_MAP is supported */
178         FEAT_PROG_BIND_MAP,
179         /* Kernel support for module BTFs */
180         FEAT_MODULE_BTF,
181         __FEAT_CNT,
182 };
183
184 static bool kernel_supports(enum kern_feature_id feat_id);
185
186 enum reloc_type {
187         RELO_LD64,
188         RELO_CALL,
189         RELO_DATA,
190         RELO_EXTERN,
191 };
192
193 struct reloc_desc {
194         enum reloc_type type;
195         int insn_idx;
196         int map_idx;
197         int sym_off;
198         bool processed;
199 };
200
201 struct bpf_sec_def;
202
203 typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
204                                         struct bpf_program *prog);
205
206 struct bpf_sec_def {
207         const char *sec;
208         size_t len;
209         enum bpf_prog_type prog_type;
210         enum bpf_attach_type expected_attach_type;
211         bool is_exp_attach_type_optional;
212         bool is_attachable;
213         bool is_attach_btf;
214         bool is_sleepable;
215         attach_fn_t attach_fn;
216 };
217
218 /*
219  * bpf_prog should be a better name but it has been used in
220  * linux/filter.h.
221  */
222 struct bpf_program {
223         const struct bpf_sec_def *sec_def;
224         char *sec_name;
225         size_t sec_idx;
226         /* this program's instruction offset (in number of instructions)
227          * within its containing ELF section
228          */
229         size_t sec_insn_off;
230         /* number of original instructions in ELF section belonging to this
231          * program, not taking into account subprogram instructions possible
232          * appended later during relocation
233          */
234         size_t sec_insn_cnt;
235         /* Offset (in number of instructions) of the start of instruction
236          * belonging to this BPF program  within its containing main BPF
237          * program. For the entry-point (main) BPF program, this is always
238          * zero. For a sub-program, this gets reset before each of main BPF
239          * programs are processed and relocated and is used to determined
240          * whether sub-program was already appended to the main program, and
241          * if yes, at which instruction offset.
242          */
243         size_t sub_insn_off;
244
245         char *name;
246         /* sec_name with / replaced by _; makes recursive pinning
247          * in bpf_object__pin_programs easier
248          */
249         char *pin_name;
250
251         /* instructions that belong to BPF program; insns[0] is located at
252          * sec_insn_off instruction within its ELF section in ELF file, so
253          * when mapping ELF file instruction index to the local instruction,
254          * one needs to subtract sec_insn_off; and vice versa.
255          */
256         struct bpf_insn *insns;
257         /* actual number of instruction in this BPF program's image; for
258          * entry-point BPF programs this includes the size of main program
259          * itself plus all the used sub-programs, appended at the end
260          */
261         size_t insns_cnt;
262
263         struct reloc_desc *reloc_desc;
264         int nr_reloc;
265         int log_level;
266
267         struct {
268                 int nr;
269                 int *fds;
270         } instances;
271         bpf_program_prep_t preprocessor;
272
273         struct bpf_object *obj;
274         void *priv;
275         bpf_program_clear_priv_t clear_priv;
276
277         bool load;
278         enum bpf_prog_type type;
279         enum bpf_attach_type expected_attach_type;
280         int prog_ifindex;
281         __u32 attach_btf_obj_fd;
282         __u32 attach_btf_id;
283         __u32 attach_prog_fd;
284         void *func_info;
285         __u32 func_info_rec_size;
286         __u32 func_info_cnt;
287
288         void *line_info;
289         __u32 line_info_rec_size;
290         __u32 line_info_cnt;
291         __u32 prog_flags;
292 };
293
294 struct bpf_struct_ops {
295         const char *tname;
296         const struct btf_type *type;
297         struct bpf_program **progs;
298         __u32 *kern_func_off;
299         /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
300         void *data;
301         /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
302          *      btf_vmlinux's format.
303          * struct bpf_struct_ops_tcp_congestion_ops {
304          *      [... some other kernel fields ...]
305          *      struct tcp_congestion_ops data;
306          * }
307          * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
308          * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
309          * from "data".
310          */
311         void *kern_vdata;
312         __u32 type_id;
313 };
314
315 #define DATA_SEC ".data"
316 #define BSS_SEC ".bss"
317 #define RODATA_SEC ".rodata"
318 #define KCONFIG_SEC ".kconfig"
319 #define KSYMS_SEC ".ksyms"
320 #define STRUCT_OPS_SEC ".struct_ops"
321
322 enum libbpf_map_type {
323         LIBBPF_MAP_UNSPEC,
324         LIBBPF_MAP_DATA,
325         LIBBPF_MAP_BSS,
326         LIBBPF_MAP_RODATA,
327         LIBBPF_MAP_KCONFIG,
328 };
329
330 static const char * const libbpf_type_to_btf_name[] = {
331         [LIBBPF_MAP_DATA]       = DATA_SEC,
332         [LIBBPF_MAP_BSS]        = BSS_SEC,
333         [LIBBPF_MAP_RODATA]     = RODATA_SEC,
334         [LIBBPF_MAP_KCONFIG]    = KCONFIG_SEC,
335 };
336
337 struct bpf_map {
338         char *name;
339         int fd;
340         int sec_idx;
341         size_t sec_offset;
342         int map_ifindex;
343         int inner_map_fd;
344         struct bpf_map_def def;
345         __u32 numa_node;
346         __u32 btf_var_idx;
347         __u32 btf_key_type_id;
348         __u32 btf_value_type_id;
349         __u32 btf_vmlinux_value_type_id;
350         void *priv;
351         bpf_map_clear_priv_t clear_priv;
352         enum libbpf_map_type libbpf_type;
353         void *mmaped;
354         struct bpf_struct_ops *st_ops;
355         struct bpf_map *inner_map;
356         void **init_slots;
357         int init_slots_sz;
358         char *pin_path;
359         bool pinned;
360         bool reused;
361 };
362
363 enum extern_type {
364         EXT_UNKNOWN,
365         EXT_KCFG,
366         EXT_KSYM,
367 };
368
369 enum kcfg_type {
370         KCFG_UNKNOWN,
371         KCFG_CHAR,
372         KCFG_BOOL,
373         KCFG_INT,
374         KCFG_TRISTATE,
375         KCFG_CHAR_ARR,
376 };
377
378 struct extern_desc {
379         enum extern_type type;
380         int sym_idx;
381         int btf_id;
382         int sec_btf_id;
383         const char *name;
384         bool is_set;
385         bool is_weak;
386         union {
387                 struct {
388                         enum kcfg_type type;
389                         int sz;
390                         int align;
391                         int data_off;
392                         bool is_signed;
393                 } kcfg;
394                 struct {
395                         unsigned long long addr;
396
397                         /* target btf_id of the corresponding kernel var. */
398                         int kernel_btf_obj_fd;
399                         int kernel_btf_id;
400
401                         /* local btf_id of the ksym extern's type. */
402                         __u32 type_id;
403                 } ksym;
404         };
405 };
406
407 static LIST_HEAD(bpf_objects_list);
408
409 struct module_btf {
410         struct btf *btf;
411         char *name;
412         __u32 id;
413         int fd;
414 };
415
416 struct bpf_object {
417         char name[BPF_OBJ_NAME_LEN];
418         char license[64];
419         __u32 kern_version;
420
421         struct bpf_program *programs;
422         size_t nr_programs;
423         struct bpf_map *maps;
424         size_t nr_maps;
425         size_t maps_cap;
426
427         char *kconfig;
428         struct extern_desc *externs;
429         int nr_extern;
430         int kconfig_map_idx;
431         int rodata_map_idx;
432
433         bool loaded;
434         bool has_subcalls;
435
436         /*
437          * Information when doing elf related work. Only valid if fd
438          * is valid.
439          */
440         struct {
441                 int fd;
442                 const void *obj_buf;
443                 size_t obj_buf_sz;
444                 Elf *elf;
445                 GElf_Ehdr ehdr;
446                 Elf_Data *symbols;
447                 Elf_Data *data;
448                 Elf_Data *rodata;
449                 Elf_Data *bss;
450                 Elf_Data *st_ops_data;
451                 size_t shstrndx; /* section index for section name strings */
452                 size_t strtabidx;
453                 struct {
454                         GElf_Shdr shdr;
455                         Elf_Data *data;
456                 } *reloc_sects;
457                 int nr_reloc_sects;
458                 int maps_shndx;
459                 int btf_maps_shndx;
460                 __u32 btf_maps_sec_btf_id;
461                 int text_shndx;
462                 int symbols_shndx;
463                 int data_shndx;
464                 int rodata_shndx;
465                 int bss_shndx;
466                 int st_ops_shndx;
467         } efile;
468         /*
469          * All loaded bpf_object is linked in a list, which is
470          * hidden to caller. bpf_objects__<func> handlers deal with
471          * all objects.
472          */
473         struct list_head list;
474
475         struct btf *btf;
476         struct btf_ext *btf_ext;
477
478         /* Parse and load BTF vmlinux if any of the programs in the object need
479          * it at load time.
480          */
481         struct btf *btf_vmlinux;
482         /* vmlinux BTF override for CO-RE relocations */
483         struct btf *btf_vmlinux_override;
484         /* Lazily initialized kernel module BTFs */
485         struct module_btf *btf_modules;
486         bool btf_modules_loaded;
487         size_t btf_module_cnt;
488         size_t btf_module_cap;
489
490         void *priv;
491         bpf_object_clear_priv_t clear_priv;
492
493         char path[];
494 };
495 #define obj_elf_valid(o)        ((o)->efile.elf)
496
497 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
498 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
499 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
500 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
501 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
502 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
503 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
504 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
505                               size_t off, __u32 sym_type, GElf_Sym *sym);
506
507 void bpf_program__unload(struct bpf_program *prog)
508 {
509         int i;
510
511         if (!prog)
512                 return;
513
514         /*
515          * If the object is opened but the program was never loaded,
516          * it is possible that prog->instances.nr == -1.
517          */
518         if (prog->instances.nr > 0) {
519                 for (i = 0; i < prog->instances.nr; i++)
520                         zclose(prog->instances.fds[i]);
521         } else if (prog->instances.nr != -1) {
522                 pr_warn("Internal error: instances.nr is %d\n",
523                         prog->instances.nr);
524         }
525
526         prog->instances.nr = -1;
527         zfree(&prog->instances.fds);
528
529         zfree(&prog->func_info);
530         zfree(&prog->line_info);
531 }
532
533 static void bpf_program__exit(struct bpf_program *prog)
534 {
535         if (!prog)
536                 return;
537
538         if (prog->clear_priv)
539                 prog->clear_priv(prog, prog->priv);
540
541         prog->priv = NULL;
542         prog->clear_priv = NULL;
543
544         bpf_program__unload(prog);
545         zfree(&prog->name);
546         zfree(&prog->sec_name);
547         zfree(&prog->pin_name);
548         zfree(&prog->insns);
549         zfree(&prog->reloc_desc);
550
551         prog->nr_reloc = 0;
552         prog->insns_cnt = 0;
553         prog->sec_idx = -1;
554 }
555
556 static char *__bpf_program__pin_name(struct bpf_program *prog)
557 {
558         char *name, *p;
559
560         name = p = strdup(prog->sec_name);
561         while ((p = strchr(p, '/')))
562                 *p = '_';
563
564         return name;
565 }
566
567 static bool insn_is_subprog_call(const struct bpf_insn *insn)
568 {
569         return BPF_CLASS(insn->code) == BPF_JMP &&
570                BPF_OP(insn->code) == BPF_CALL &&
571                BPF_SRC(insn->code) == BPF_K &&
572                insn->src_reg == BPF_PSEUDO_CALL &&
573                insn->dst_reg == 0 &&
574                insn->off == 0;
575 }
576
577 static int
578 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
579                       const char *name, size_t sec_idx, const char *sec_name,
580                       size_t sec_off, void *insn_data, size_t insn_data_sz)
581 {
582         if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
583                 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
584                         sec_name, name, sec_off, insn_data_sz);
585                 return -EINVAL;
586         }
587
588         memset(prog, 0, sizeof(*prog));
589         prog->obj = obj;
590
591         prog->sec_idx = sec_idx;
592         prog->sec_insn_off = sec_off / BPF_INSN_SZ;
593         prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
594         /* insns_cnt can later be increased by appending used subprograms */
595         prog->insns_cnt = prog->sec_insn_cnt;
596
597         prog->type = BPF_PROG_TYPE_UNSPEC;
598         prog->load = true;
599
600         prog->instances.fds = NULL;
601         prog->instances.nr = -1;
602
603         prog->sec_name = strdup(sec_name);
604         if (!prog->sec_name)
605                 goto errout;
606
607         prog->name = strdup(name);
608         if (!prog->name)
609                 goto errout;
610
611         prog->pin_name = __bpf_program__pin_name(prog);
612         if (!prog->pin_name)
613                 goto errout;
614
615         prog->insns = malloc(insn_data_sz);
616         if (!prog->insns)
617                 goto errout;
618         memcpy(prog->insns, insn_data, insn_data_sz);
619
620         return 0;
621 errout:
622         pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
623         bpf_program__exit(prog);
624         return -ENOMEM;
625 }
626
627 static int
628 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
629                          const char *sec_name, int sec_idx)
630 {
631         struct bpf_program *prog, *progs;
632         void *data = sec_data->d_buf;
633         size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
634         int nr_progs, err;
635         const char *name;
636         GElf_Sym sym;
637
638         progs = obj->programs;
639         nr_progs = obj->nr_programs;
640         sec_off = 0;
641
642         while (sec_off < sec_sz) {
643                 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
644                         pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
645                                 sec_name, sec_off);
646                         return -LIBBPF_ERRNO__FORMAT;
647                 }
648
649                 prog_sz = sym.st_size;
650
651                 name = elf_sym_str(obj, sym.st_name);
652                 if (!name) {
653                         pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
654                                 sec_name, sec_off);
655                         return -LIBBPF_ERRNO__FORMAT;
656                 }
657
658                 if (sec_off + prog_sz > sec_sz) {
659                         pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
660                                 sec_name, sec_off);
661                         return -LIBBPF_ERRNO__FORMAT;
662                 }
663
664                 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
665                          sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
666
667                 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
668                 if (!progs) {
669                         /*
670                          * In this case the original obj->programs
671                          * is still valid, so don't need special treat for
672                          * bpf_close_object().
673                          */
674                         pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
675                                 sec_name, name);
676                         return -ENOMEM;
677                 }
678                 obj->programs = progs;
679
680                 prog = &progs[nr_progs];
681
682                 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
683                                             sec_off, data + sec_off, prog_sz);
684                 if (err)
685                         return err;
686
687                 nr_progs++;
688                 obj->nr_programs = nr_progs;
689
690                 sec_off += prog_sz;
691         }
692
693         return 0;
694 }
695
696 static __u32 get_kernel_version(void)
697 {
698         __u32 major, minor, patch;
699         struct utsname info;
700
701         uname(&info);
702         if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
703                 return 0;
704         return KERNEL_VERSION(major, minor, patch);
705 }
706
707 static const struct btf_member *
708 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
709 {
710         struct btf_member *m;
711         int i;
712
713         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
714                 if (btf_member_bit_offset(t, i) == bit_offset)
715                         return m;
716         }
717
718         return NULL;
719 }
720
721 static const struct btf_member *
722 find_member_by_name(const struct btf *btf, const struct btf_type *t,
723                     const char *name)
724 {
725         struct btf_member *m;
726         int i;
727
728         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
729                 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
730                         return m;
731         }
732
733         return NULL;
734 }
735
736 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
737 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
738                                    const char *name, __u32 kind);
739
740 static int
741 find_struct_ops_kern_types(const struct btf *btf, const char *tname,
742                            const struct btf_type **type, __u32 *type_id,
743                            const struct btf_type **vtype, __u32 *vtype_id,
744                            const struct btf_member **data_member)
745 {
746         const struct btf_type *kern_type, *kern_vtype;
747         const struct btf_member *kern_data_member;
748         __s32 kern_vtype_id, kern_type_id;
749         __u32 i;
750
751         kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
752         if (kern_type_id < 0) {
753                 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
754                         tname);
755                 return kern_type_id;
756         }
757         kern_type = btf__type_by_id(btf, kern_type_id);
758
759         /* Find the corresponding "map_value" type that will be used
760          * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
761          * find "struct bpf_struct_ops_tcp_congestion_ops" from the
762          * btf_vmlinux.
763          */
764         kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
765                                                 tname, BTF_KIND_STRUCT);
766         if (kern_vtype_id < 0) {
767                 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
768                         STRUCT_OPS_VALUE_PREFIX, tname);
769                 return kern_vtype_id;
770         }
771         kern_vtype = btf__type_by_id(btf, kern_vtype_id);
772
773         /* Find "struct tcp_congestion_ops" from
774          * struct bpf_struct_ops_tcp_congestion_ops {
775          *      [ ... ]
776          *      struct tcp_congestion_ops data;
777          * }
778          */
779         kern_data_member = btf_members(kern_vtype);
780         for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
781                 if (kern_data_member->type == kern_type_id)
782                         break;
783         }
784         if (i == btf_vlen(kern_vtype)) {
785                 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
786                         tname, STRUCT_OPS_VALUE_PREFIX, tname);
787                 return -EINVAL;
788         }
789
790         *type = kern_type;
791         *type_id = kern_type_id;
792         *vtype = kern_vtype;
793         *vtype_id = kern_vtype_id;
794         *data_member = kern_data_member;
795
796         return 0;
797 }
798
799 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
800 {
801         return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
802 }
803
804 /* Init the map's fields that depend on kern_btf */
805 static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
806                                          const struct btf *btf,
807                                          const struct btf *kern_btf)
808 {
809         const struct btf_member *member, *kern_member, *kern_data_member;
810         const struct btf_type *type, *kern_type, *kern_vtype;
811         __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
812         struct bpf_struct_ops *st_ops;
813         void *data, *kern_data;
814         const char *tname;
815         int err;
816
817         st_ops = map->st_ops;
818         type = st_ops->type;
819         tname = st_ops->tname;
820         err = find_struct_ops_kern_types(kern_btf, tname,
821                                          &kern_type, &kern_type_id,
822                                          &kern_vtype, &kern_vtype_id,
823                                          &kern_data_member);
824         if (err)
825                 return err;
826
827         pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
828                  map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
829
830         map->def.value_size = kern_vtype->size;
831         map->btf_vmlinux_value_type_id = kern_vtype_id;
832
833         st_ops->kern_vdata = calloc(1, kern_vtype->size);
834         if (!st_ops->kern_vdata)
835                 return -ENOMEM;
836
837         data = st_ops->data;
838         kern_data_off = kern_data_member->offset / 8;
839         kern_data = st_ops->kern_vdata + kern_data_off;
840
841         member = btf_members(type);
842         for (i = 0; i < btf_vlen(type); i++, member++) {
843                 const struct btf_type *mtype, *kern_mtype;
844                 __u32 mtype_id, kern_mtype_id;
845                 void *mdata, *kern_mdata;
846                 __s64 msize, kern_msize;
847                 __u32 moff, kern_moff;
848                 __u32 kern_member_idx;
849                 const char *mname;
850
851                 mname = btf__name_by_offset(btf, member->name_off);
852                 kern_member = find_member_by_name(kern_btf, kern_type, mname);
853                 if (!kern_member) {
854                         pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
855                                 map->name, mname);
856                         return -ENOTSUP;
857                 }
858
859                 kern_member_idx = kern_member - btf_members(kern_type);
860                 if (btf_member_bitfield_size(type, i) ||
861                     btf_member_bitfield_size(kern_type, kern_member_idx)) {
862                         pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
863                                 map->name, mname);
864                         return -ENOTSUP;
865                 }
866
867                 moff = member->offset / 8;
868                 kern_moff = kern_member->offset / 8;
869
870                 mdata = data + moff;
871                 kern_mdata = kern_data + kern_moff;
872
873                 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
874                 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
875                                                     &kern_mtype_id);
876                 if (BTF_INFO_KIND(mtype->info) !=
877                     BTF_INFO_KIND(kern_mtype->info)) {
878                         pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
879                                 map->name, mname, BTF_INFO_KIND(mtype->info),
880                                 BTF_INFO_KIND(kern_mtype->info));
881                         return -ENOTSUP;
882                 }
883
884                 if (btf_is_ptr(mtype)) {
885                         struct bpf_program *prog;
886
887                         prog = st_ops->progs[i];
888                         if (!prog)
889                                 continue;
890
891                         kern_mtype = skip_mods_and_typedefs(kern_btf,
892                                                             kern_mtype->type,
893                                                             &kern_mtype_id);
894
895                         /* mtype->type must be a func_proto which was
896                          * guaranteed in bpf_object__collect_st_ops_relos(),
897                          * so only check kern_mtype for func_proto here.
898                          */
899                         if (!btf_is_func_proto(kern_mtype)) {
900                                 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
901                                         map->name, mname);
902                                 return -ENOTSUP;
903                         }
904
905                         prog->attach_btf_id = kern_type_id;
906                         prog->expected_attach_type = kern_member_idx;
907
908                         st_ops->kern_func_off[i] = kern_data_off + kern_moff;
909
910                         pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
911                                  map->name, mname, prog->name, moff,
912                                  kern_moff);
913
914                         continue;
915                 }
916
917                 msize = btf__resolve_size(btf, mtype_id);
918                 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
919                 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
920                         pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
921                                 map->name, mname, (ssize_t)msize,
922                                 (ssize_t)kern_msize);
923                         return -ENOTSUP;
924                 }
925
926                 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
927                          map->name, mname, (unsigned int)msize,
928                          moff, kern_moff);
929                 memcpy(kern_mdata, mdata, msize);
930         }
931
932         return 0;
933 }
934
935 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
936 {
937         struct bpf_map *map;
938         size_t i;
939         int err;
940
941         for (i = 0; i < obj->nr_maps; i++) {
942                 map = &obj->maps[i];
943
944                 if (!bpf_map__is_struct_ops(map))
945                         continue;
946
947                 err = bpf_map__init_kern_struct_ops(map, obj->btf,
948                                                     obj->btf_vmlinux);
949                 if (err)
950                         return err;
951         }
952
953         return 0;
954 }
955
956 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
957 {
958         const struct btf_type *type, *datasec;
959         const struct btf_var_secinfo *vsi;
960         struct bpf_struct_ops *st_ops;
961         const char *tname, *var_name;
962         __s32 type_id, datasec_id;
963         const struct btf *btf;
964         struct bpf_map *map;
965         __u32 i;
966
967         if (obj->efile.st_ops_shndx == -1)
968                 return 0;
969
970         btf = obj->btf;
971         datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
972                                             BTF_KIND_DATASEC);
973         if (datasec_id < 0) {
974                 pr_warn("struct_ops init: DATASEC %s not found\n",
975                         STRUCT_OPS_SEC);
976                 return -EINVAL;
977         }
978
979         datasec = btf__type_by_id(btf, datasec_id);
980         vsi = btf_var_secinfos(datasec);
981         for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
982                 type = btf__type_by_id(obj->btf, vsi->type);
983                 var_name = btf__name_by_offset(obj->btf, type->name_off);
984
985                 type_id = btf__resolve_type(obj->btf, vsi->type);
986                 if (type_id < 0) {
987                         pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
988                                 vsi->type, STRUCT_OPS_SEC);
989                         return -EINVAL;
990                 }
991
992                 type = btf__type_by_id(obj->btf, type_id);
993                 tname = btf__name_by_offset(obj->btf, type->name_off);
994                 if (!tname[0]) {
995                         pr_warn("struct_ops init: anonymous type is not supported\n");
996                         return -ENOTSUP;
997                 }
998                 if (!btf_is_struct(type)) {
999                         pr_warn("struct_ops init: %s is not a struct\n", tname);
1000                         return -EINVAL;
1001                 }
1002
1003                 map = bpf_object__add_map(obj);
1004                 if (IS_ERR(map))
1005                         return PTR_ERR(map);
1006
1007                 map->sec_idx = obj->efile.st_ops_shndx;
1008                 map->sec_offset = vsi->offset;
1009                 map->name = strdup(var_name);
1010                 if (!map->name)
1011                         return -ENOMEM;
1012
1013                 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1014                 map->def.key_size = sizeof(int);
1015                 map->def.value_size = type->size;
1016                 map->def.max_entries = 1;
1017
1018                 map->st_ops = calloc(1, sizeof(*map->st_ops));
1019                 if (!map->st_ops)
1020                         return -ENOMEM;
1021                 st_ops = map->st_ops;
1022                 st_ops->data = malloc(type->size);
1023                 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1024                 st_ops->kern_func_off = malloc(btf_vlen(type) *
1025                                                sizeof(*st_ops->kern_func_off));
1026                 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1027                         return -ENOMEM;
1028
1029                 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1030                         pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1031                                 var_name, STRUCT_OPS_SEC);
1032                         return -EINVAL;
1033                 }
1034
1035                 memcpy(st_ops->data,
1036                        obj->efile.st_ops_data->d_buf + vsi->offset,
1037                        type->size);
1038                 st_ops->tname = tname;
1039                 st_ops->type = type;
1040                 st_ops->type_id = type_id;
1041
1042                 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1043                          tname, type_id, var_name, vsi->offset);
1044         }
1045
1046         return 0;
1047 }
1048
1049 static struct bpf_object *bpf_object__new(const char *path,
1050                                           const void *obj_buf,
1051                                           size_t obj_buf_sz,
1052                                           const char *obj_name)
1053 {
1054         struct bpf_object *obj;
1055         char *end;
1056
1057         obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1058         if (!obj) {
1059                 pr_warn("alloc memory failed for %s\n", path);
1060                 return ERR_PTR(-ENOMEM);
1061         }
1062
1063         strcpy(obj->path, path);
1064         if (obj_name) {
1065                 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1066                 obj->name[sizeof(obj->name) - 1] = 0;
1067         } else {
1068                 /* Using basename() GNU version which doesn't modify arg. */
1069                 strncpy(obj->name, basename((void *)path),
1070                         sizeof(obj->name) - 1);
1071                 end = strchr(obj->name, '.');
1072                 if (end)
1073                         *end = 0;
1074         }
1075
1076         obj->efile.fd = -1;
1077         /*
1078          * Caller of this function should also call
1079          * bpf_object__elf_finish() after data collection to return
1080          * obj_buf to user. If not, we should duplicate the buffer to
1081          * avoid user freeing them before elf finish.
1082          */
1083         obj->efile.obj_buf = obj_buf;
1084         obj->efile.obj_buf_sz = obj_buf_sz;
1085         obj->efile.maps_shndx = -1;
1086         obj->efile.btf_maps_shndx = -1;
1087         obj->efile.data_shndx = -1;
1088         obj->efile.rodata_shndx = -1;
1089         obj->efile.bss_shndx = -1;
1090         obj->efile.st_ops_shndx = -1;
1091         obj->kconfig_map_idx = -1;
1092         obj->rodata_map_idx = -1;
1093
1094         obj->kern_version = get_kernel_version();
1095         obj->loaded = false;
1096
1097         INIT_LIST_HEAD(&obj->list);
1098         list_add(&obj->list, &bpf_objects_list);
1099         return obj;
1100 }
1101
1102 static void bpf_object__elf_finish(struct bpf_object *obj)
1103 {
1104         if (!obj_elf_valid(obj))
1105                 return;
1106
1107         if (obj->efile.elf) {
1108                 elf_end(obj->efile.elf);
1109                 obj->efile.elf = NULL;
1110         }
1111         obj->efile.symbols = NULL;
1112         obj->efile.data = NULL;
1113         obj->efile.rodata = NULL;
1114         obj->efile.bss = NULL;
1115         obj->efile.st_ops_data = NULL;
1116
1117         zfree(&obj->efile.reloc_sects);
1118         obj->efile.nr_reloc_sects = 0;
1119         zclose(obj->efile.fd);
1120         obj->efile.obj_buf = NULL;
1121         obj->efile.obj_buf_sz = 0;
1122 }
1123
1124 /* if libelf is old and doesn't support mmap(), fall back to read() */
1125 #ifndef ELF_C_READ_MMAP
1126 #define ELF_C_READ_MMAP ELF_C_READ
1127 #endif
1128
1129 static int bpf_object__elf_init(struct bpf_object *obj)
1130 {
1131         int err = 0;
1132         GElf_Ehdr *ep;
1133
1134         if (obj_elf_valid(obj)) {
1135                 pr_warn("elf: init internal error\n");
1136                 return -LIBBPF_ERRNO__LIBELF;
1137         }
1138
1139         if (obj->efile.obj_buf_sz > 0) {
1140                 /*
1141                  * obj_buf should have been validated by
1142                  * bpf_object__open_buffer().
1143                  */
1144                 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1145                                             obj->efile.obj_buf_sz);
1146         } else {
1147                 obj->efile.fd = open(obj->path, O_RDONLY);
1148                 if (obj->efile.fd < 0) {
1149                         char errmsg[STRERR_BUFSIZE], *cp;
1150
1151                         err = -errno;
1152                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1153                         pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1154                         return err;
1155                 }
1156
1157                 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1158         }
1159
1160         if (!obj->efile.elf) {
1161                 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1162                 err = -LIBBPF_ERRNO__LIBELF;
1163                 goto errout;
1164         }
1165
1166         if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1167                 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1168                 err = -LIBBPF_ERRNO__FORMAT;
1169                 goto errout;
1170         }
1171         ep = &obj->efile.ehdr;
1172
1173         if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1174                 pr_warn("elf: failed to get section names section index for %s: %s\n",
1175                         obj->path, elf_errmsg(-1));
1176                 err = -LIBBPF_ERRNO__FORMAT;
1177                 goto errout;
1178         }
1179
1180         /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1181         if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1182                 pr_warn("elf: failed to get section names strings from %s: %s\n",
1183                         obj->path, elf_errmsg(-1));
1184                 err = -LIBBPF_ERRNO__FORMAT;
1185                 goto errout;
1186         }
1187
1188         /* Old LLVM set e_machine to EM_NONE */
1189         if (ep->e_type != ET_REL ||
1190             (ep->e_machine && ep->e_machine != EM_BPF)) {
1191                 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1192                 err = -LIBBPF_ERRNO__FORMAT;
1193                 goto errout;
1194         }
1195
1196         return 0;
1197 errout:
1198         bpf_object__elf_finish(obj);
1199         return err;
1200 }
1201
1202 static int bpf_object__check_endianness(struct bpf_object *obj)
1203 {
1204 #if __BYTE_ORDER == __LITTLE_ENDIAN
1205         if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1206                 return 0;
1207 #elif __BYTE_ORDER == __BIG_ENDIAN
1208         if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1209                 return 0;
1210 #else
1211 # error "Unrecognized __BYTE_ORDER__"
1212 #endif
1213         pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1214         return -LIBBPF_ERRNO__ENDIAN;
1215 }
1216
1217 static int
1218 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1219 {
1220         memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1221         pr_debug("license of %s is %s\n", obj->path, obj->license);
1222         return 0;
1223 }
1224
1225 static int
1226 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1227 {
1228         __u32 kver;
1229
1230         if (size != sizeof(kver)) {
1231                 pr_warn("invalid kver section in %s\n", obj->path);
1232                 return -LIBBPF_ERRNO__FORMAT;
1233         }
1234         memcpy(&kver, data, sizeof(kver));
1235         obj->kern_version = kver;
1236         pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1237         return 0;
1238 }
1239
1240 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1241 {
1242         if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1243             type == BPF_MAP_TYPE_HASH_OF_MAPS)
1244                 return true;
1245         return false;
1246 }
1247
1248 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1249                              __u32 *size)
1250 {
1251         int ret = -ENOENT;
1252
1253         *size = 0;
1254         if (!name) {
1255                 return -EINVAL;
1256         } else if (!strcmp(name, DATA_SEC)) {
1257                 if (obj->efile.data)
1258                         *size = obj->efile.data->d_size;
1259         } else if (!strcmp(name, BSS_SEC)) {
1260                 if (obj->efile.bss)
1261                         *size = obj->efile.bss->d_size;
1262         } else if (!strcmp(name, RODATA_SEC)) {
1263                 if (obj->efile.rodata)
1264                         *size = obj->efile.rodata->d_size;
1265         } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1266                 if (obj->efile.st_ops_data)
1267                         *size = obj->efile.st_ops_data->d_size;
1268         } else {
1269                 Elf_Scn *scn = elf_sec_by_name(obj, name);
1270                 Elf_Data *data = elf_sec_data(obj, scn);
1271
1272                 if (data) {
1273                         ret = 0; /* found it */
1274                         *size = data->d_size;
1275                 }
1276         }
1277
1278         return *size ? 0 : ret;
1279 }
1280
1281 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1282                                 __u32 *off)
1283 {
1284         Elf_Data *symbols = obj->efile.symbols;
1285         const char *sname;
1286         size_t si;
1287
1288         if (!name || !off)
1289                 return -EINVAL;
1290
1291         for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1292                 GElf_Sym sym;
1293
1294                 if (!gelf_getsym(symbols, si, &sym))
1295                         continue;
1296                 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1297                     GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1298                         continue;
1299
1300                 sname = elf_sym_str(obj, sym.st_name);
1301                 if (!sname) {
1302                         pr_warn("failed to get sym name string for var %s\n",
1303                                 name);
1304                         return -EIO;
1305                 }
1306                 if (strcmp(name, sname) == 0) {
1307                         *off = sym.st_value;
1308                         return 0;
1309                 }
1310         }
1311
1312         return -ENOENT;
1313 }
1314
1315 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1316 {
1317         struct bpf_map *new_maps;
1318         size_t new_cap;
1319         int i;
1320
1321         if (obj->nr_maps < obj->maps_cap)
1322                 return &obj->maps[obj->nr_maps++];
1323
1324         new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1325         new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1326         if (!new_maps) {
1327                 pr_warn("alloc maps for object failed\n");
1328                 return ERR_PTR(-ENOMEM);
1329         }
1330
1331         obj->maps_cap = new_cap;
1332         obj->maps = new_maps;
1333
1334         /* zero out new maps */
1335         memset(obj->maps + obj->nr_maps, 0,
1336                (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1337         /*
1338          * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1339          * when failure (zclose won't close negative fd)).
1340          */
1341         for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1342                 obj->maps[i].fd = -1;
1343                 obj->maps[i].inner_map_fd = -1;
1344         }
1345
1346         return &obj->maps[obj->nr_maps++];
1347 }
1348
1349 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1350 {
1351         long page_sz = sysconf(_SC_PAGE_SIZE);
1352         size_t map_sz;
1353
1354         map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1355         map_sz = roundup(map_sz, page_sz);
1356         return map_sz;
1357 }
1358
1359 static char *internal_map_name(struct bpf_object *obj,
1360                                enum libbpf_map_type type)
1361 {
1362         char map_name[BPF_OBJ_NAME_LEN], *p;
1363         const char *sfx = libbpf_type_to_btf_name[type];
1364         int sfx_len = max((size_t)7, strlen(sfx));
1365         int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1366                           strlen(obj->name));
1367
1368         snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1369                  sfx_len, libbpf_type_to_btf_name[type]);
1370
1371         /* sanitise map name to characters allowed by kernel */
1372         for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1373                 if (!isalnum(*p) && *p != '_' && *p != '.')
1374                         *p = '_';
1375
1376         return strdup(map_name);
1377 }
1378
1379 static int
1380 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1381                               int sec_idx, void *data, size_t data_sz)
1382 {
1383         struct bpf_map_def *def;
1384         struct bpf_map *map;
1385         int err;
1386
1387         map = bpf_object__add_map(obj);
1388         if (IS_ERR(map))
1389                 return PTR_ERR(map);
1390
1391         map->libbpf_type = type;
1392         map->sec_idx = sec_idx;
1393         map->sec_offset = 0;
1394         map->name = internal_map_name(obj, type);
1395         if (!map->name) {
1396                 pr_warn("failed to alloc map name\n");
1397                 return -ENOMEM;
1398         }
1399
1400         def = &map->def;
1401         def->type = BPF_MAP_TYPE_ARRAY;
1402         def->key_size = sizeof(int);
1403         def->value_size = data_sz;
1404         def->max_entries = 1;
1405         def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1406                          ? BPF_F_RDONLY_PROG : 0;
1407         def->map_flags |= BPF_F_MMAPABLE;
1408
1409         pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1410                  map->name, map->sec_idx, map->sec_offset, def->map_flags);
1411
1412         map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1413                            MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1414         if (map->mmaped == MAP_FAILED) {
1415                 err = -errno;
1416                 map->mmaped = NULL;
1417                 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1418                         map->name, err);
1419                 zfree(&map->name);
1420                 return err;
1421         }
1422
1423         if (data)
1424                 memcpy(map->mmaped, data, data_sz);
1425
1426         pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1427         return 0;
1428 }
1429
1430 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1431 {
1432         int err;
1433
1434         /*
1435          * Populate obj->maps with libbpf internal maps.
1436          */
1437         if (obj->efile.data_shndx >= 0) {
1438                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1439                                                     obj->efile.data_shndx,
1440                                                     obj->efile.data->d_buf,
1441                                                     obj->efile.data->d_size);
1442                 if (err)
1443                         return err;
1444         }
1445         if (obj->efile.rodata_shndx >= 0) {
1446                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1447                                                     obj->efile.rodata_shndx,
1448                                                     obj->efile.rodata->d_buf,
1449                                                     obj->efile.rodata->d_size);
1450                 if (err)
1451                         return err;
1452
1453                 obj->rodata_map_idx = obj->nr_maps - 1;
1454         }
1455         if (obj->efile.bss_shndx >= 0) {
1456                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1457                                                     obj->efile.bss_shndx,
1458                                                     NULL,
1459                                                     obj->efile.bss->d_size);
1460                 if (err)
1461                         return err;
1462         }
1463         return 0;
1464 }
1465
1466
1467 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1468                                                const void *name)
1469 {
1470         int i;
1471
1472         for (i = 0; i < obj->nr_extern; i++) {
1473                 if (strcmp(obj->externs[i].name, name) == 0)
1474                         return &obj->externs[i];
1475         }
1476         return NULL;
1477 }
1478
1479 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1480                               char value)
1481 {
1482         switch (ext->kcfg.type) {
1483         case KCFG_BOOL:
1484                 if (value == 'm') {
1485                         pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1486                                 ext->name, value);
1487                         return -EINVAL;
1488                 }
1489                 *(bool *)ext_val = value == 'y' ? true : false;
1490                 break;
1491         case KCFG_TRISTATE:
1492                 if (value == 'y')
1493                         *(enum libbpf_tristate *)ext_val = TRI_YES;
1494                 else if (value == 'm')
1495                         *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1496                 else /* value == 'n' */
1497                         *(enum libbpf_tristate *)ext_val = TRI_NO;
1498                 break;
1499         case KCFG_CHAR:
1500                 *(char *)ext_val = value;
1501                 break;
1502         case KCFG_UNKNOWN:
1503         case KCFG_INT:
1504         case KCFG_CHAR_ARR:
1505         default:
1506                 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1507                         ext->name, value);
1508                 return -EINVAL;
1509         }
1510         ext->is_set = true;
1511         return 0;
1512 }
1513
1514 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1515                               const char *value)
1516 {
1517         size_t len;
1518
1519         if (ext->kcfg.type != KCFG_CHAR_ARR) {
1520                 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1521                 return -EINVAL;
1522         }
1523
1524         len = strlen(value);
1525         if (value[len - 1] != '"') {
1526                 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1527                         ext->name, value);
1528                 return -EINVAL;
1529         }
1530
1531         /* strip quotes */
1532         len -= 2;
1533         if (len >= ext->kcfg.sz) {
1534                 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1535                         ext->name, value, len, ext->kcfg.sz - 1);
1536                 len = ext->kcfg.sz - 1;
1537         }
1538         memcpy(ext_val, value + 1, len);
1539         ext_val[len] = '\0';
1540         ext->is_set = true;
1541         return 0;
1542 }
1543
1544 static int parse_u64(const char *value, __u64 *res)
1545 {
1546         char *value_end;
1547         int err;
1548
1549         errno = 0;
1550         *res = strtoull(value, &value_end, 0);
1551         if (errno) {
1552                 err = -errno;
1553                 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1554                 return err;
1555         }
1556         if (*value_end) {
1557                 pr_warn("failed to parse '%s' as integer completely\n", value);
1558                 return -EINVAL;
1559         }
1560         return 0;
1561 }
1562
1563 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1564 {
1565         int bit_sz = ext->kcfg.sz * 8;
1566
1567         if (ext->kcfg.sz == 8)
1568                 return true;
1569
1570         /* Validate that value stored in u64 fits in integer of `ext->sz`
1571          * bytes size without any loss of information. If the target integer
1572          * is signed, we rely on the following limits of integer type of
1573          * Y bits and subsequent transformation:
1574          *
1575          *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
1576          *            0 <= X + 2^(Y-1) <= 2^Y - 1
1577          *            0 <= X + 2^(Y-1) <  2^Y
1578          *
1579          *  For unsigned target integer, check that all the (64 - Y) bits are
1580          *  zero.
1581          */
1582         if (ext->kcfg.is_signed)
1583                 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1584         else
1585                 return (v >> bit_sz) == 0;
1586 }
1587
1588 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1589                               __u64 value)
1590 {
1591         if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1592                 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1593                         ext->name, (unsigned long long)value);
1594                 return -EINVAL;
1595         }
1596         if (!is_kcfg_value_in_range(ext, value)) {
1597                 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1598                         ext->name, (unsigned long long)value, ext->kcfg.sz);
1599                 return -ERANGE;
1600         }
1601         switch (ext->kcfg.sz) {
1602                 case 1: *(__u8 *)ext_val = value; break;
1603                 case 2: *(__u16 *)ext_val = value; break;
1604                 case 4: *(__u32 *)ext_val = value; break;
1605                 case 8: *(__u64 *)ext_val = value; break;
1606                 default:
1607                         return -EINVAL;
1608         }
1609         ext->is_set = true;
1610         return 0;
1611 }
1612
1613 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1614                                             char *buf, void *data)
1615 {
1616         struct extern_desc *ext;
1617         char *sep, *value;
1618         int len, err = 0;
1619         void *ext_val;
1620         __u64 num;
1621
1622         if (strncmp(buf, "CONFIG_", 7))
1623                 return 0;
1624
1625         sep = strchr(buf, '=');
1626         if (!sep) {
1627                 pr_warn("failed to parse '%s': no separator\n", buf);
1628                 return -EINVAL;
1629         }
1630
1631         /* Trim ending '\n' */
1632         len = strlen(buf);
1633         if (buf[len - 1] == '\n')
1634                 buf[len - 1] = '\0';
1635         /* Split on '=' and ensure that a value is present. */
1636         *sep = '\0';
1637         if (!sep[1]) {
1638                 *sep = '=';
1639                 pr_warn("failed to parse '%s': no value\n", buf);
1640                 return -EINVAL;
1641         }
1642
1643         ext = find_extern_by_name(obj, buf);
1644         if (!ext || ext->is_set)
1645                 return 0;
1646
1647         ext_val = data + ext->kcfg.data_off;
1648         value = sep + 1;
1649
1650         switch (*value) {
1651         case 'y': case 'n': case 'm':
1652                 err = set_kcfg_value_tri(ext, ext_val, *value);
1653                 break;
1654         case '"':
1655                 err = set_kcfg_value_str(ext, ext_val, value);
1656                 break;
1657         default:
1658                 /* assume integer */
1659                 err = parse_u64(value, &num);
1660                 if (err) {
1661                         pr_warn("extern (kcfg) %s=%s should be integer\n",
1662                                 ext->name, value);
1663                         return err;
1664                 }
1665                 err = set_kcfg_value_num(ext, ext_val, num);
1666                 break;
1667         }
1668         if (err)
1669                 return err;
1670         pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1671         return 0;
1672 }
1673
1674 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1675 {
1676         char buf[PATH_MAX];
1677         struct utsname uts;
1678         int len, err = 0;
1679         gzFile file;
1680
1681         uname(&uts);
1682         len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1683         if (len < 0)
1684                 return -EINVAL;
1685         else if (len >= PATH_MAX)
1686                 return -ENAMETOOLONG;
1687
1688         /* gzopen also accepts uncompressed files. */
1689         file = gzopen(buf, "r");
1690         if (!file)
1691                 file = gzopen("/proc/config.gz", "r");
1692
1693         if (!file) {
1694                 pr_warn("failed to open system Kconfig\n");
1695                 return -ENOENT;
1696         }
1697
1698         while (gzgets(file, buf, sizeof(buf))) {
1699                 err = bpf_object__process_kconfig_line(obj, buf, data);
1700                 if (err) {
1701                         pr_warn("error parsing system Kconfig line '%s': %d\n",
1702                                 buf, err);
1703                         goto out;
1704                 }
1705         }
1706
1707 out:
1708         gzclose(file);
1709         return err;
1710 }
1711
1712 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1713                                         const char *config, void *data)
1714 {
1715         char buf[PATH_MAX];
1716         int err = 0;
1717         FILE *file;
1718
1719         file = fmemopen((void *)config, strlen(config), "r");
1720         if (!file) {
1721                 err = -errno;
1722                 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1723                 return err;
1724         }
1725
1726         while (fgets(buf, sizeof(buf), file)) {
1727                 err = bpf_object__process_kconfig_line(obj, buf, data);
1728                 if (err) {
1729                         pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1730                                 buf, err);
1731                         break;
1732                 }
1733         }
1734
1735         fclose(file);
1736         return err;
1737 }
1738
1739 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1740 {
1741         struct extern_desc *last_ext = NULL, *ext;
1742         size_t map_sz;
1743         int i, err;
1744
1745         for (i = 0; i < obj->nr_extern; i++) {
1746                 ext = &obj->externs[i];
1747                 if (ext->type == EXT_KCFG)
1748                         last_ext = ext;
1749         }
1750
1751         if (!last_ext)
1752                 return 0;
1753
1754         map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1755         err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1756                                             obj->efile.symbols_shndx,
1757                                             NULL, map_sz);
1758         if (err)
1759                 return err;
1760
1761         obj->kconfig_map_idx = obj->nr_maps - 1;
1762
1763         return 0;
1764 }
1765
1766 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1767 {
1768         Elf_Data *symbols = obj->efile.symbols;
1769         int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1770         Elf_Data *data = NULL;
1771         Elf_Scn *scn;
1772
1773         if (obj->efile.maps_shndx < 0)
1774                 return 0;
1775
1776         if (!symbols)
1777                 return -EINVAL;
1778
1779
1780         scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1781         data = elf_sec_data(obj, scn);
1782         if (!scn || !data) {
1783                 pr_warn("elf: failed to get legacy map definitions for %s\n",
1784                         obj->path);
1785                 return -EINVAL;
1786         }
1787
1788         /*
1789          * Count number of maps. Each map has a name.
1790          * Array of maps is not supported: only the first element is
1791          * considered.
1792          *
1793          * TODO: Detect array of map and report error.
1794          */
1795         nr_syms = symbols->d_size / sizeof(GElf_Sym);
1796         for (i = 0; i < nr_syms; i++) {
1797                 GElf_Sym sym;
1798
1799                 if (!gelf_getsym(symbols, i, &sym))
1800                         continue;
1801                 if (sym.st_shndx != obj->efile.maps_shndx)
1802                         continue;
1803                 nr_maps++;
1804         }
1805         /* Assume equally sized map definitions */
1806         pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1807                  nr_maps, data->d_size, obj->path);
1808
1809         if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1810                 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1811                         obj->path);
1812                 return -EINVAL;
1813         }
1814         map_def_sz = data->d_size / nr_maps;
1815
1816         /* Fill obj->maps using data in "maps" section.  */
1817         for (i = 0; i < nr_syms; i++) {
1818                 GElf_Sym sym;
1819                 const char *map_name;
1820                 struct bpf_map_def *def;
1821                 struct bpf_map *map;
1822
1823                 if (!gelf_getsym(symbols, i, &sym))
1824                         continue;
1825                 if (sym.st_shndx != obj->efile.maps_shndx)
1826                         continue;
1827
1828                 map = bpf_object__add_map(obj);
1829                 if (IS_ERR(map))
1830                         return PTR_ERR(map);
1831
1832                 map_name = elf_sym_str(obj, sym.st_name);
1833                 if (!map_name) {
1834                         pr_warn("failed to get map #%d name sym string for obj %s\n",
1835                                 i, obj->path);
1836                         return -LIBBPF_ERRNO__FORMAT;
1837                 }
1838
1839                 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1840                 map->sec_idx = sym.st_shndx;
1841                 map->sec_offset = sym.st_value;
1842                 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1843                          map_name, map->sec_idx, map->sec_offset);
1844                 if (sym.st_value + map_def_sz > data->d_size) {
1845                         pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1846                                 obj->path, map_name);
1847                         return -EINVAL;
1848                 }
1849
1850                 map->name = strdup(map_name);
1851                 if (!map->name) {
1852                         pr_warn("failed to alloc map name\n");
1853                         return -ENOMEM;
1854                 }
1855                 pr_debug("map %d is \"%s\"\n", i, map->name);
1856                 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1857                 /*
1858                  * If the definition of the map in the object file fits in
1859                  * bpf_map_def, copy it.  Any extra fields in our version
1860                  * of bpf_map_def will default to zero as a result of the
1861                  * calloc above.
1862                  */
1863                 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1864                         memcpy(&map->def, def, map_def_sz);
1865                 } else {
1866                         /*
1867                          * Here the map structure being read is bigger than what
1868                          * we expect, truncate if the excess bits are all zero.
1869                          * If they are not zero, reject this map as
1870                          * incompatible.
1871                          */
1872                         char *b;
1873
1874                         for (b = ((char *)def) + sizeof(struct bpf_map_def);
1875                              b < ((char *)def) + map_def_sz; b++) {
1876                                 if (*b != 0) {
1877                                         pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1878                                                 obj->path, map_name);
1879                                         if (strict)
1880                                                 return -EINVAL;
1881                                 }
1882                         }
1883                         memcpy(&map->def, def, sizeof(struct bpf_map_def));
1884                 }
1885         }
1886         return 0;
1887 }
1888
1889 static const struct btf_type *
1890 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1891 {
1892         const struct btf_type *t = btf__type_by_id(btf, id);
1893
1894         if (res_id)
1895                 *res_id = id;
1896
1897         while (btf_is_mod(t) || btf_is_typedef(t)) {
1898                 if (res_id)
1899                         *res_id = t->type;
1900                 t = btf__type_by_id(btf, t->type);
1901         }
1902
1903         return t;
1904 }
1905
1906 static const struct btf_type *
1907 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1908 {
1909         const struct btf_type *t;
1910
1911         t = skip_mods_and_typedefs(btf, id, NULL);
1912         if (!btf_is_ptr(t))
1913                 return NULL;
1914
1915         t = skip_mods_and_typedefs(btf, t->type, res_id);
1916
1917         return btf_is_func_proto(t) ? t : NULL;
1918 }
1919
1920 static const char *btf_kind_str(const struct btf_type *t)
1921 {
1922         switch (btf_kind(t)) {
1923         case BTF_KIND_UNKN: return "void";
1924         case BTF_KIND_INT: return "int";
1925         case BTF_KIND_PTR: return "ptr";
1926         case BTF_KIND_ARRAY: return "array";
1927         case BTF_KIND_STRUCT: return "struct";
1928         case BTF_KIND_UNION: return "union";
1929         case BTF_KIND_ENUM: return "enum";
1930         case BTF_KIND_FWD: return "fwd";
1931         case BTF_KIND_TYPEDEF: return "typedef";
1932         case BTF_KIND_VOLATILE: return "volatile";
1933         case BTF_KIND_CONST: return "const";
1934         case BTF_KIND_RESTRICT: return "restrict";
1935         case BTF_KIND_FUNC: return "func";
1936         case BTF_KIND_FUNC_PROTO: return "func_proto";
1937         case BTF_KIND_VAR: return "var";
1938         case BTF_KIND_DATASEC: return "datasec";
1939         default: return "unknown";
1940         }
1941 }
1942
1943 /*
1944  * Fetch integer attribute of BTF map definition. Such attributes are
1945  * represented using a pointer to an array, in which dimensionality of array
1946  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1947  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1948  * type definition, while using only sizeof(void *) space in ELF data section.
1949  */
1950 static bool get_map_field_int(const char *map_name, const struct btf *btf,
1951                               const struct btf_member *m, __u32 *res)
1952 {
1953         const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1954         const char *name = btf__name_by_offset(btf, m->name_off);
1955         const struct btf_array *arr_info;
1956         const struct btf_type *arr_t;
1957
1958         if (!btf_is_ptr(t)) {
1959                 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
1960                         map_name, name, btf_kind_str(t));
1961                 return false;
1962         }
1963
1964         arr_t = btf__type_by_id(btf, t->type);
1965         if (!arr_t) {
1966                 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1967                         map_name, name, t->type);
1968                 return false;
1969         }
1970         if (!btf_is_array(arr_t)) {
1971                 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
1972                         map_name, name, btf_kind_str(arr_t));
1973                 return false;
1974         }
1975         arr_info = btf_array(arr_t);
1976         *res = arr_info->nelems;
1977         return true;
1978 }
1979
1980 static int build_map_pin_path(struct bpf_map *map, const char *path)
1981 {
1982         char buf[PATH_MAX];
1983         int len;
1984
1985         if (!path)
1986                 path = "/sys/fs/bpf";
1987
1988         len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1989         if (len < 0)
1990                 return -EINVAL;
1991         else if (len >= PATH_MAX)
1992                 return -ENAMETOOLONG;
1993
1994         return bpf_map__set_pin_path(map, buf);
1995 }
1996
1997
1998 static int parse_btf_map_def(struct bpf_object *obj,
1999                              struct bpf_map *map,
2000                              const struct btf_type *def,
2001                              bool strict, bool is_inner,
2002                              const char *pin_root_path)
2003 {
2004         const struct btf_type *t;
2005         const struct btf_member *m;
2006         int vlen, i;
2007
2008         vlen = btf_vlen(def);
2009         m = btf_members(def);
2010         for (i = 0; i < vlen; i++, m++) {
2011                 const char *name = btf__name_by_offset(obj->btf, m->name_off);
2012
2013                 if (!name) {
2014                         pr_warn("map '%s': invalid field #%d.\n", map->name, i);
2015                         return -EINVAL;
2016                 }
2017                 if (strcmp(name, "type") == 0) {
2018                         if (!get_map_field_int(map->name, obj->btf, m,
2019                                                &map->def.type))
2020                                 return -EINVAL;
2021                         pr_debug("map '%s': found type = %u.\n",
2022                                  map->name, map->def.type);
2023                 } else if (strcmp(name, "max_entries") == 0) {
2024                         if (!get_map_field_int(map->name, obj->btf, m,
2025                                                &map->def.max_entries))
2026                                 return -EINVAL;
2027                         pr_debug("map '%s': found max_entries = %u.\n",
2028                                  map->name, map->def.max_entries);
2029                 } else if (strcmp(name, "map_flags") == 0) {
2030                         if (!get_map_field_int(map->name, obj->btf, m,
2031                                                &map->def.map_flags))
2032                                 return -EINVAL;
2033                         pr_debug("map '%s': found map_flags = %u.\n",
2034                                  map->name, map->def.map_flags);
2035                 } else if (strcmp(name, "numa_node") == 0) {
2036                         if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
2037                                 return -EINVAL;
2038                         pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
2039                 } else if (strcmp(name, "key_size") == 0) {
2040                         __u32 sz;
2041
2042                         if (!get_map_field_int(map->name, obj->btf, m, &sz))
2043                                 return -EINVAL;
2044                         pr_debug("map '%s': found key_size = %u.\n",
2045                                  map->name, sz);
2046                         if (map->def.key_size && map->def.key_size != sz) {
2047                                 pr_warn("map '%s': conflicting key size %u != %u.\n",
2048                                         map->name, map->def.key_size, sz);
2049                                 return -EINVAL;
2050                         }
2051                         map->def.key_size = sz;
2052                 } else if (strcmp(name, "key") == 0) {
2053                         __s64 sz;
2054
2055                         t = btf__type_by_id(obj->btf, m->type);
2056                         if (!t) {
2057                                 pr_warn("map '%s': key type [%d] not found.\n",
2058                                         map->name, m->type);
2059                                 return -EINVAL;
2060                         }
2061                         if (!btf_is_ptr(t)) {
2062                                 pr_warn("map '%s': key spec is not PTR: %s.\n",
2063                                         map->name, btf_kind_str(t));
2064                                 return -EINVAL;
2065                         }
2066                         sz = btf__resolve_size(obj->btf, t->type);
2067                         if (sz < 0) {
2068                                 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2069                                         map->name, t->type, (ssize_t)sz);
2070                                 return sz;
2071                         }
2072                         pr_debug("map '%s': found key [%u], sz = %zd.\n",
2073                                  map->name, t->type, (ssize_t)sz);
2074                         if (map->def.key_size && map->def.key_size != sz) {
2075                                 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2076                                         map->name, map->def.key_size, (ssize_t)sz);
2077                                 return -EINVAL;
2078                         }
2079                         map->def.key_size = sz;
2080                         map->btf_key_type_id = t->type;
2081                 } else if (strcmp(name, "value_size") == 0) {
2082                         __u32 sz;
2083
2084                         if (!get_map_field_int(map->name, obj->btf, m, &sz))
2085                                 return -EINVAL;
2086                         pr_debug("map '%s': found value_size = %u.\n",
2087                                  map->name, sz);
2088                         if (map->def.value_size && map->def.value_size != sz) {
2089                                 pr_warn("map '%s': conflicting value size %u != %u.\n",
2090                                         map->name, map->def.value_size, sz);
2091                                 return -EINVAL;
2092                         }
2093                         map->def.value_size = sz;
2094                 } else if (strcmp(name, "value") == 0) {
2095                         __s64 sz;
2096
2097                         t = btf__type_by_id(obj->btf, m->type);
2098                         if (!t) {
2099                                 pr_warn("map '%s': value type [%d] not found.\n",
2100                                         map->name, m->type);
2101                                 return -EINVAL;
2102                         }
2103                         if (!btf_is_ptr(t)) {
2104                                 pr_warn("map '%s': value spec is not PTR: %s.\n",
2105                                         map->name, btf_kind_str(t));
2106                                 return -EINVAL;
2107                         }
2108                         sz = btf__resolve_size(obj->btf, t->type);
2109                         if (sz < 0) {
2110                                 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2111                                         map->name, t->type, (ssize_t)sz);
2112                                 return sz;
2113                         }
2114                         pr_debug("map '%s': found value [%u], sz = %zd.\n",
2115                                  map->name, t->type, (ssize_t)sz);
2116                         if (map->def.value_size && map->def.value_size != sz) {
2117                                 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2118                                         map->name, map->def.value_size, (ssize_t)sz);
2119                                 return -EINVAL;
2120                         }
2121                         map->def.value_size = sz;
2122                         map->btf_value_type_id = t->type;
2123                 }
2124                 else if (strcmp(name, "values") == 0) {
2125                         int err;
2126
2127                         if (is_inner) {
2128                                 pr_warn("map '%s': multi-level inner maps not supported.\n",
2129                                         map->name);
2130                                 return -ENOTSUP;
2131                         }
2132                         if (i != vlen - 1) {
2133                                 pr_warn("map '%s': '%s' member should be last.\n",
2134                                         map->name, name);
2135                                 return -EINVAL;
2136                         }
2137                         if (!bpf_map_type__is_map_in_map(map->def.type)) {
2138                                 pr_warn("map '%s': should be map-in-map.\n",
2139                                         map->name);
2140                                 return -ENOTSUP;
2141                         }
2142                         if (map->def.value_size && map->def.value_size != 4) {
2143                                 pr_warn("map '%s': conflicting value size %u != 4.\n",
2144                                         map->name, map->def.value_size);
2145                                 return -EINVAL;
2146                         }
2147                         map->def.value_size = 4;
2148                         t = btf__type_by_id(obj->btf, m->type);
2149                         if (!t) {
2150                                 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2151                                         map->name, m->type);
2152                                 return -EINVAL;
2153                         }
2154                         if (!btf_is_array(t) || btf_array(t)->nelems) {
2155                                 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2156                                         map->name);
2157                                 return -EINVAL;
2158                         }
2159                         t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2160                                                    NULL);
2161                         if (!btf_is_ptr(t)) {
2162                                 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2163                                         map->name, btf_kind_str(t));
2164                                 return -EINVAL;
2165                         }
2166                         t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2167                         if (!btf_is_struct(t)) {
2168                                 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2169                                         map->name, btf_kind_str(t));
2170                                 return -EINVAL;
2171                         }
2172
2173                         map->inner_map = calloc(1, sizeof(*map->inner_map));
2174                         if (!map->inner_map)
2175                                 return -ENOMEM;
2176                         map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2177                         map->inner_map->name = malloc(strlen(map->name) +
2178                                                       sizeof(".inner") + 1);
2179                         if (!map->inner_map->name)
2180                                 return -ENOMEM;
2181                         sprintf(map->inner_map->name, "%s.inner", map->name);
2182
2183                         err = parse_btf_map_def(obj, map->inner_map, t, strict,
2184                                                 true /* is_inner */, NULL);
2185                         if (err)
2186                                 return err;
2187                 } else if (strcmp(name, "pinning") == 0) {
2188                         __u32 val;
2189                         int err;
2190
2191                         if (is_inner) {
2192                                 pr_debug("map '%s': inner def can't be pinned.\n",
2193                                          map->name);
2194                                 return -EINVAL;
2195                         }
2196                         if (!get_map_field_int(map->name, obj->btf, m, &val))
2197                                 return -EINVAL;
2198                         pr_debug("map '%s': found pinning = %u.\n",
2199                                  map->name, val);
2200
2201                         if (val != LIBBPF_PIN_NONE &&
2202                             val != LIBBPF_PIN_BY_NAME) {
2203                                 pr_warn("map '%s': invalid pinning value %u.\n",
2204                                         map->name, val);
2205                                 return -EINVAL;
2206                         }
2207                         if (val == LIBBPF_PIN_BY_NAME) {
2208                                 err = build_map_pin_path(map, pin_root_path);
2209                                 if (err) {
2210                                         pr_warn("map '%s': couldn't build pin path.\n",
2211                                                 map->name);
2212                                         return err;
2213                                 }
2214                         }
2215                 } else {
2216                         if (strict) {
2217                                 pr_warn("map '%s': unknown field '%s'.\n",
2218                                         map->name, name);
2219                                 return -ENOTSUP;
2220                         }
2221                         pr_debug("map '%s': ignoring unknown field '%s'.\n",
2222                                  map->name, name);
2223                 }
2224         }
2225
2226         if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
2227                 pr_warn("map '%s': map type isn't specified.\n", map->name);
2228                 return -EINVAL;
2229         }
2230
2231         return 0;
2232 }
2233
2234 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2235                                          const struct btf_type *sec,
2236                                          int var_idx, int sec_idx,
2237                                          const Elf_Data *data, bool strict,
2238                                          const char *pin_root_path)
2239 {
2240         const struct btf_type *var, *def;
2241         const struct btf_var_secinfo *vi;
2242         const struct btf_var *var_extra;
2243         const char *map_name;
2244         struct bpf_map *map;
2245
2246         vi = btf_var_secinfos(sec) + var_idx;
2247         var = btf__type_by_id(obj->btf, vi->type);
2248         var_extra = btf_var(var);
2249         map_name = btf__name_by_offset(obj->btf, var->name_off);
2250
2251         if (map_name == NULL || map_name[0] == '\0') {
2252                 pr_warn("map #%d: empty name.\n", var_idx);
2253                 return -EINVAL;
2254         }
2255         if ((__u64)vi->offset + vi->size > data->d_size) {
2256                 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2257                 return -EINVAL;
2258         }
2259         if (!btf_is_var(var)) {
2260                 pr_warn("map '%s': unexpected var kind %s.\n",
2261                         map_name, btf_kind_str(var));
2262                 return -EINVAL;
2263         }
2264         if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2265             var_extra->linkage != BTF_VAR_STATIC) {
2266                 pr_warn("map '%s': unsupported var linkage %u.\n",
2267                         map_name, var_extra->linkage);
2268                 return -EOPNOTSUPP;
2269         }
2270
2271         def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2272         if (!btf_is_struct(def)) {
2273                 pr_warn("map '%s': unexpected def kind %s.\n",
2274                         map_name, btf_kind_str(var));
2275                 return -EINVAL;
2276         }
2277         if (def->size > vi->size) {
2278                 pr_warn("map '%s': invalid def size.\n", map_name);
2279                 return -EINVAL;
2280         }
2281
2282         map = bpf_object__add_map(obj);
2283         if (IS_ERR(map))
2284                 return PTR_ERR(map);
2285         map->name = strdup(map_name);
2286         if (!map->name) {
2287                 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2288                 return -ENOMEM;
2289         }
2290         map->libbpf_type = LIBBPF_MAP_UNSPEC;
2291         map->def.type = BPF_MAP_TYPE_UNSPEC;
2292         map->sec_idx = sec_idx;
2293         map->sec_offset = vi->offset;
2294         map->btf_var_idx = var_idx;
2295         pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2296                  map_name, map->sec_idx, map->sec_offset);
2297
2298         return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
2299 }
2300
2301 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2302                                           const char *pin_root_path)
2303 {
2304         const struct btf_type *sec = NULL;
2305         int nr_types, i, vlen, err;
2306         const struct btf_type *t;
2307         const char *name;
2308         Elf_Data *data;
2309         Elf_Scn *scn;
2310
2311         if (obj->efile.btf_maps_shndx < 0)
2312                 return 0;
2313
2314         scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2315         data = elf_sec_data(obj, scn);
2316         if (!scn || !data) {
2317                 pr_warn("elf: failed to get %s map definitions for %s\n",
2318                         MAPS_ELF_SEC, obj->path);
2319                 return -EINVAL;
2320         }
2321
2322         nr_types = btf__get_nr_types(obj->btf);
2323         for (i = 1; i <= nr_types; i++) {
2324                 t = btf__type_by_id(obj->btf, i);
2325                 if (!btf_is_datasec(t))
2326                         continue;
2327                 name = btf__name_by_offset(obj->btf, t->name_off);
2328                 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2329                         sec = t;
2330                         obj->efile.btf_maps_sec_btf_id = i;
2331                         break;
2332                 }
2333         }
2334
2335         if (!sec) {
2336                 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2337                 return -ENOENT;
2338         }
2339
2340         vlen = btf_vlen(sec);
2341         for (i = 0; i < vlen; i++) {
2342                 err = bpf_object__init_user_btf_map(obj, sec, i,
2343                                                     obj->efile.btf_maps_shndx,
2344                                                     data, strict,
2345                                                     pin_root_path);
2346                 if (err)
2347                         return err;
2348         }
2349
2350         return 0;
2351 }
2352
2353 static int bpf_object__init_maps(struct bpf_object *obj,
2354                                  const struct bpf_object_open_opts *opts)
2355 {
2356         const char *pin_root_path;
2357         bool strict;
2358         int err;
2359
2360         strict = !OPTS_GET(opts, relaxed_maps, false);
2361         pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2362
2363         err = bpf_object__init_user_maps(obj, strict);
2364         err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2365         err = err ?: bpf_object__init_global_data_maps(obj);
2366         err = err ?: bpf_object__init_kconfig_map(obj);
2367         err = err ?: bpf_object__init_struct_ops_maps(obj);
2368         if (err)
2369                 return err;
2370
2371         return 0;
2372 }
2373
2374 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2375 {
2376         GElf_Shdr sh;
2377
2378         if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2379                 return false;
2380
2381         return sh.sh_flags & SHF_EXECINSTR;
2382 }
2383
2384 static bool btf_needs_sanitization(struct bpf_object *obj)
2385 {
2386         bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2387         bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2388         bool has_func = kernel_supports(FEAT_BTF_FUNC);
2389
2390         return !has_func || !has_datasec || !has_func_global;
2391 }
2392
2393 static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2394 {
2395         bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2396         bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2397         bool has_func = kernel_supports(FEAT_BTF_FUNC);
2398         struct btf_type *t;
2399         int i, j, vlen;
2400
2401         for (i = 1; i <= btf__get_nr_types(btf); i++) {
2402                 t = (struct btf_type *)btf__type_by_id(btf, i);
2403
2404                 if (!has_datasec && btf_is_var(t)) {
2405                         /* replace VAR with INT */
2406                         t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2407                         /*
2408                          * using size = 1 is the safest choice, 4 will be too
2409                          * big and cause kernel BTF validation failure if
2410                          * original variable took less than 4 bytes
2411                          */
2412                         t->size = 1;
2413                         *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2414                 } else if (!has_datasec && btf_is_datasec(t)) {
2415                         /* replace DATASEC with STRUCT */
2416                         const struct btf_var_secinfo *v = btf_var_secinfos(t);
2417                         struct btf_member *m = btf_members(t);
2418                         struct btf_type *vt;
2419                         char *name;
2420
2421                         name = (char *)btf__name_by_offset(btf, t->name_off);
2422                         while (*name) {
2423                                 if (*name == '.')
2424                                         *name = '_';
2425                                 name++;
2426                         }
2427
2428                         vlen = btf_vlen(t);
2429                         t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2430                         for (j = 0; j < vlen; j++, v++, m++) {
2431                                 /* order of field assignments is important */
2432                                 m->offset = v->offset * 8;
2433                                 m->type = v->type;
2434                                 /* preserve variable name as member name */
2435                                 vt = (void *)btf__type_by_id(btf, v->type);
2436                                 m->name_off = vt->name_off;
2437                         }
2438                 } else if (!has_func && btf_is_func_proto(t)) {
2439                         /* replace FUNC_PROTO with ENUM */
2440                         vlen = btf_vlen(t);
2441                         t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2442                         t->size = sizeof(__u32); /* kernel enforced */
2443                 } else if (!has_func && btf_is_func(t)) {
2444                         /* replace FUNC with TYPEDEF */
2445                         t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2446                 } else if (!has_func_global && btf_is_func(t)) {
2447                         /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2448                         t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2449                 }
2450         }
2451 }
2452
2453 static bool libbpf_needs_btf(const struct bpf_object *obj)
2454 {
2455         return obj->efile.btf_maps_shndx >= 0 ||
2456                obj->efile.st_ops_shndx >= 0 ||
2457                obj->nr_extern > 0;
2458 }
2459
2460 static bool kernel_needs_btf(const struct bpf_object *obj)
2461 {
2462         return obj->efile.st_ops_shndx >= 0;
2463 }
2464
2465 static int bpf_object__init_btf(struct bpf_object *obj,
2466                                 Elf_Data *btf_data,
2467                                 Elf_Data *btf_ext_data)
2468 {
2469         int err = -ENOENT;
2470
2471         if (btf_data) {
2472                 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2473                 if (IS_ERR(obj->btf)) {
2474                         err = PTR_ERR(obj->btf);
2475                         obj->btf = NULL;
2476                         pr_warn("Error loading ELF section %s: %d.\n",
2477                                 BTF_ELF_SEC, err);
2478                         goto out;
2479                 }
2480                 /* enforce 8-byte pointers for BPF-targeted BTFs */
2481                 btf__set_pointer_size(obj->btf, 8);
2482                 err = 0;
2483         }
2484         if (btf_ext_data) {
2485                 if (!obj->btf) {
2486                         pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2487                                  BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2488                         goto out;
2489                 }
2490                 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2491                                             btf_ext_data->d_size);
2492                 if (IS_ERR(obj->btf_ext)) {
2493                         pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2494                                 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2495                         obj->btf_ext = NULL;
2496                         goto out;
2497                 }
2498         }
2499 out:
2500         if (err && libbpf_needs_btf(obj)) {
2501                 pr_warn("BTF is required, but is missing or corrupted.\n");
2502                 return err;
2503         }
2504         return 0;
2505 }
2506
2507 static int bpf_object__finalize_btf(struct bpf_object *obj)
2508 {
2509         int err;
2510
2511         if (!obj->btf)
2512                 return 0;
2513
2514         err = btf__finalize_data(obj, obj->btf);
2515         if (err) {
2516                 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2517                 return err;
2518         }
2519
2520         return 0;
2521 }
2522
2523 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2524 {
2525         if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2526             prog->type == BPF_PROG_TYPE_LSM)
2527                 return true;
2528
2529         /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2530          * also need vmlinux BTF
2531          */
2532         if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2533                 return true;
2534
2535         return false;
2536 }
2537
2538 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2539 {
2540         struct bpf_program *prog;
2541         int i;
2542
2543         /* CO-RE relocations need kernel BTF */
2544         if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2545                 return true;
2546
2547         /* Support for typed ksyms needs kernel BTF */
2548         for (i = 0; i < obj->nr_extern; i++) {
2549                 const struct extern_desc *ext;
2550
2551                 ext = &obj->externs[i];
2552                 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2553                         return true;
2554         }
2555
2556         bpf_object__for_each_program(prog, obj) {
2557                 if (!prog->load)
2558                         continue;
2559                 if (prog_needs_vmlinux_btf(prog))
2560                         return true;
2561         }
2562
2563         return false;
2564 }
2565
2566 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2567 {
2568         int err;
2569
2570         /* btf_vmlinux could be loaded earlier */
2571         if (obj->btf_vmlinux)
2572                 return 0;
2573
2574         if (!force && !obj_needs_vmlinux_btf(obj))
2575                 return 0;
2576
2577         obj->btf_vmlinux = libbpf_find_kernel_btf();
2578         if (IS_ERR(obj->btf_vmlinux)) {
2579                 err = PTR_ERR(obj->btf_vmlinux);
2580                 pr_warn("Error loading vmlinux BTF: %d\n", err);
2581                 obj->btf_vmlinux = NULL;
2582                 return err;
2583         }
2584         return 0;
2585 }
2586
2587 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2588 {
2589         struct btf *kern_btf = obj->btf;
2590         bool btf_mandatory, sanitize;
2591         int err = 0;
2592
2593         if (!obj->btf)
2594                 return 0;
2595
2596         if (!kernel_supports(FEAT_BTF)) {
2597                 if (kernel_needs_btf(obj)) {
2598                         err = -EOPNOTSUPP;
2599                         goto report;
2600                 }
2601                 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2602                 return 0;
2603         }
2604
2605         sanitize = btf_needs_sanitization(obj);
2606         if (sanitize) {
2607                 const void *raw_data;
2608                 __u32 sz;
2609
2610                 /* clone BTF to sanitize a copy and leave the original intact */
2611                 raw_data = btf__get_raw_data(obj->btf, &sz);
2612                 kern_btf = btf__new(raw_data, sz);
2613                 if (IS_ERR(kern_btf))
2614                         return PTR_ERR(kern_btf);
2615
2616                 /* enforce 8-byte pointers for BPF-targeted BTFs */
2617                 btf__set_pointer_size(obj->btf, 8);
2618                 bpf_object__sanitize_btf(obj, kern_btf);
2619         }
2620
2621         err = btf__load(kern_btf);
2622         if (sanitize) {
2623                 if (!err) {
2624                         /* move fd to libbpf's BTF */
2625                         btf__set_fd(obj->btf, btf__fd(kern_btf));
2626                         btf__set_fd(kern_btf, -1);
2627                 }
2628                 btf__free(kern_btf);
2629         }
2630 report:
2631         if (err) {
2632                 btf_mandatory = kernel_needs_btf(obj);
2633                 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2634                         btf_mandatory ? "BTF is mandatory, can't proceed."
2635                                       : "BTF is optional, ignoring.");
2636                 if (!btf_mandatory)
2637                         err = 0;
2638         }
2639         return err;
2640 }
2641
2642 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2643 {
2644         const char *name;
2645
2646         name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2647         if (!name) {
2648                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2649                         off, obj->path, elf_errmsg(-1));
2650                 return NULL;
2651         }
2652
2653         return name;
2654 }
2655
2656 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2657 {
2658         const char *name;
2659
2660         name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2661         if (!name) {
2662                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2663                         off, obj->path, elf_errmsg(-1));
2664                 return NULL;
2665         }
2666
2667         return name;
2668 }
2669
2670 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2671 {
2672         Elf_Scn *scn;
2673
2674         scn = elf_getscn(obj->efile.elf, idx);
2675         if (!scn) {
2676                 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2677                         idx, obj->path, elf_errmsg(-1));
2678                 return NULL;
2679         }
2680         return scn;
2681 }
2682
2683 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2684 {
2685         Elf_Scn *scn = NULL;
2686         Elf *elf = obj->efile.elf;
2687         const char *sec_name;
2688
2689         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2690                 sec_name = elf_sec_name(obj, scn);
2691                 if (!sec_name)
2692                         return NULL;
2693
2694                 if (strcmp(sec_name, name) != 0)
2695                         continue;
2696
2697                 return scn;
2698         }
2699         return NULL;
2700 }
2701
2702 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2703 {
2704         if (!scn)
2705                 return -EINVAL;
2706
2707         if (gelf_getshdr(scn, hdr) != hdr) {
2708                 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2709                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2710                 return -EINVAL;
2711         }
2712
2713         return 0;
2714 }
2715
2716 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2717 {
2718         const char *name;
2719         GElf_Shdr sh;
2720
2721         if (!scn)
2722                 return NULL;
2723
2724         if (elf_sec_hdr(obj, scn, &sh))
2725                 return NULL;
2726
2727         name = elf_sec_str(obj, sh.sh_name);
2728         if (!name) {
2729                 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2730                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2731                 return NULL;
2732         }
2733
2734         return name;
2735 }
2736
2737 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2738 {
2739         Elf_Data *data;
2740
2741         if (!scn)
2742                 return NULL;
2743
2744         data = elf_getdata(scn, 0);
2745         if (!data) {
2746                 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2747                         elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2748                         obj->path, elf_errmsg(-1));
2749                 return NULL;
2750         }
2751
2752         return data;
2753 }
2754
2755 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
2756                               size_t off, __u32 sym_type, GElf_Sym *sym)
2757 {
2758         Elf_Data *symbols = obj->efile.symbols;
2759         size_t n = symbols->d_size / sizeof(GElf_Sym);
2760         int i;
2761
2762         for (i = 0; i < n; i++) {
2763                 if (!gelf_getsym(symbols, i, sym))
2764                         continue;
2765                 if (sym->st_shndx != sec_idx || sym->st_value != off)
2766                         continue;
2767                 if (GELF_ST_TYPE(sym->st_info) != sym_type)
2768                         continue;
2769                 return 0;
2770         }
2771
2772         return -ENOENT;
2773 }
2774
2775 static bool is_sec_name_dwarf(const char *name)
2776 {
2777         /* approximation, but the actual list is too long */
2778         return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2779 }
2780
2781 static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2782 {
2783         /* no special handling of .strtab */
2784         if (hdr->sh_type == SHT_STRTAB)
2785                 return true;
2786
2787         /* ignore .llvm_addrsig section as well */
2788         if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
2789                 return true;
2790
2791         /* no subprograms will lead to an empty .text section, ignore it */
2792         if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2793             strcmp(name, ".text") == 0)
2794                 return true;
2795
2796         /* DWARF sections */
2797         if (is_sec_name_dwarf(name))
2798                 return true;
2799
2800         if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2801                 name += sizeof(".rel") - 1;
2802                 /* DWARF section relocations */
2803                 if (is_sec_name_dwarf(name))
2804                         return true;
2805
2806                 /* .BTF and .BTF.ext don't need relocations */
2807                 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2808                     strcmp(name, BTF_EXT_ELF_SEC) == 0)
2809                         return true;
2810         }
2811
2812         return false;
2813 }
2814
2815 static int cmp_progs(const void *_a, const void *_b)
2816 {
2817         const struct bpf_program *a = _a;
2818         const struct bpf_program *b = _b;
2819
2820         if (a->sec_idx != b->sec_idx)
2821                 return a->sec_idx < b->sec_idx ? -1 : 1;
2822
2823         /* sec_insn_off can't be the same within the section */
2824         return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2825 }
2826
2827 static int bpf_object__elf_collect(struct bpf_object *obj)
2828 {
2829         Elf *elf = obj->efile.elf;
2830         Elf_Data *btf_ext_data = NULL;
2831         Elf_Data *btf_data = NULL;
2832         int idx = 0, err = 0;
2833         const char *name;
2834         Elf_Data *data;
2835         Elf_Scn *scn;
2836         GElf_Shdr sh;
2837
2838         /* a bunch of ELF parsing functionality depends on processing symbols,
2839          * so do the first pass and find the symbol table
2840          */
2841         scn = NULL;
2842         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2843                 if (elf_sec_hdr(obj, scn, &sh))
2844                         return -LIBBPF_ERRNO__FORMAT;
2845
2846                 if (sh.sh_type == SHT_SYMTAB) {
2847                         if (obj->efile.symbols) {
2848                                 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2849                                 return -LIBBPF_ERRNO__FORMAT;
2850                         }
2851
2852                         data = elf_sec_data(obj, scn);
2853                         if (!data)
2854                                 return -LIBBPF_ERRNO__FORMAT;
2855
2856                         obj->efile.symbols = data;
2857                         obj->efile.symbols_shndx = elf_ndxscn(scn);
2858                         obj->efile.strtabidx = sh.sh_link;
2859                 }
2860         }
2861
2862         scn = NULL;
2863         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2864                 idx++;
2865
2866                 if (elf_sec_hdr(obj, scn, &sh))
2867                         return -LIBBPF_ERRNO__FORMAT;
2868
2869                 name = elf_sec_str(obj, sh.sh_name);
2870                 if (!name)
2871                         return -LIBBPF_ERRNO__FORMAT;
2872
2873                 if (ignore_elf_section(&sh, name))
2874                         continue;
2875
2876                 data = elf_sec_data(obj, scn);
2877                 if (!data)
2878                         return -LIBBPF_ERRNO__FORMAT;
2879
2880                 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2881                          idx, name, (unsigned long)data->d_size,
2882                          (int)sh.sh_link, (unsigned long)sh.sh_flags,
2883                          (int)sh.sh_type);
2884
2885                 if (strcmp(name, "license") == 0) {
2886                         err = bpf_object__init_license(obj, data->d_buf, data->d_size);
2887                         if (err)
2888                                 return err;
2889                 } else if (strcmp(name, "version") == 0) {
2890                         err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
2891                         if (err)
2892                                 return err;
2893                 } else if (strcmp(name, "maps") == 0) {
2894                         obj->efile.maps_shndx = idx;
2895                 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2896                         obj->efile.btf_maps_shndx = idx;
2897                 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2898                         btf_data = data;
2899                 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
2900                         btf_ext_data = data;
2901                 } else if (sh.sh_type == SHT_SYMTAB) {
2902                         /* already processed during the first pass above */
2903                 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2904                         if (sh.sh_flags & SHF_EXECINSTR) {
2905                                 if (strcmp(name, ".text") == 0)
2906                                         obj->efile.text_shndx = idx;
2907                                 err = bpf_object__add_programs(obj, data, name, idx);
2908                                 if (err)
2909                                         return err;
2910                         } else if (strcmp(name, DATA_SEC) == 0) {
2911                                 obj->efile.data = data;
2912                                 obj->efile.data_shndx = idx;
2913                         } else if (strcmp(name, RODATA_SEC) == 0) {
2914                                 obj->efile.rodata = data;
2915                                 obj->efile.rodata_shndx = idx;
2916                         } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2917                                 obj->efile.st_ops_data = data;
2918                                 obj->efile.st_ops_shndx = idx;
2919                         } else {
2920                                 pr_info("elf: skipping unrecognized data section(%d) %s\n",
2921                                         idx, name);
2922                         }
2923                 } else if (sh.sh_type == SHT_REL) {
2924                         int nr_sects = obj->efile.nr_reloc_sects;
2925                         void *sects = obj->efile.reloc_sects;
2926                         int sec = sh.sh_info; /* points to other section */
2927
2928                         /* Only do relo for section with exec instructions */
2929                         if (!section_have_execinstr(obj, sec) &&
2930                             strcmp(name, ".rel" STRUCT_OPS_SEC) &&
2931                             strcmp(name, ".rel" MAPS_ELF_SEC)) {
2932                                 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
2933                                         idx, name, sec,
2934                                         elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
2935                                 continue;
2936                         }
2937
2938                         sects = libbpf_reallocarray(sects, nr_sects + 1,
2939                                                     sizeof(*obj->efile.reloc_sects));
2940                         if (!sects)
2941                                 return -ENOMEM;
2942
2943                         obj->efile.reloc_sects = sects;
2944                         obj->efile.nr_reloc_sects++;
2945
2946                         obj->efile.reloc_sects[nr_sects].shdr = sh;
2947                         obj->efile.reloc_sects[nr_sects].data = data;
2948                 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
2949                         obj->efile.bss = data;
2950                         obj->efile.bss_shndx = idx;
2951                 } else {
2952                         pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
2953                                 (size_t)sh.sh_size);
2954                 }
2955         }
2956
2957         if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
2958                 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
2959                 return -LIBBPF_ERRNO__FORMAT;
2960         }
2961
2962         /* sort BPF programs by section name and in-section instruction offset
2963          * for faster search */
2964         qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
2965
2966         return bpf_object__init_btf(obj, btf_data, btf_ext_data);
2967 }
2968
2969 static bool sym_is_extern(const GElf_Sym *sym)
2970 {
2971         int bind = GELF_ST_BIND(sym->st_info);
2972         /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
2973         return sym->st_shndx == SHN_UNDEF &&
2974                (bind == STB_GLOBAL || bind == STB_WEAK) &&
2975                GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2976 }
2977
2978 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2979 {
2980         const struct btf_type *t;
2981         const char *var_name;
2982         int i, n;
2983
2984         if (!btf)
2985                 return -ESRCH;
2986
2987         n = btf__get_nr_types(btf);
2988         for (i = 1; i <= n; i++) {
2989                 t = btf__type_by_id(btf, i);
2990
2991                 if (!btf_is_var(t))
2992                         continue;
2993
2994                 var_name = btf__name_by_offset(btf, t->name_off);
2995                 if (strcmp(var_name, ext_name))
2996                         continue;
2997
2998                 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
2999                         return -EINVAL;
3000
3001                 return i;
3002         }
3003
3004         return -ENOENT;
3005 }
3006
3007 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3008         const struct btf_var_secinfo *vs;
3009         const struct btf_type *t;
3010         int i, j, n;
3011
3012         if (!btf)
3013                 return -ESRCH;
3014
3015         n = btf__get_nr_types(btf);
3016         for (i = 1; i <= n; i++) {
3017                 t = btf__type_by_id(btf, i);
3018
3019                 if (!btf_is_datasec(t))
3020                         continue;
3021
3022                 vs = btf_var_secinfos(t);
3023                 for (j = 0; j < btf_vlen(t); j++, vs++) {
3024                         if (vs->type == ext_btf_id)
3025                                 return i;
3026                 }
3027         }
3028
3029         return -ENOENT;
3030 }
3031
3032 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3033                                      bool *is_signed)
3034 {
3035         const struct btf_type *t;
3036         const char *name;
3037
3038         t = skip_mods_and_typedefs(btf, id, NULL);
3039         name = btf__name_by_offset(btf, t->name_off);
3040
3041         if (is_signed)
3042                 *is_signed = false;
3043         switch (btf_kind(t)) {
3044         case BTF_KIND_INT: {
3045                 int enc = btf_int_encoding(t);
3046
3047                 if (enc & BTF_INT_BOOL)
3048                         return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3049                 if (is_signed)
3050                         *is_signed = enc & BTF_INT_SIGNED;
3051                 if (t->size == 1)
3052                         return KCFG_CHAR;
3053                 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3054                         return KCFG_UNKNOWN;
3055                 return KCFG_INT;
3056         }
3057         case BTF_KIND_ENUM:
3058                 if (t->size != 4)
3059                         return KCFG_UNKNOWN;
3060                 if (strcmp(name, "libbpf_tristate"))
3061                         return KCFG_UNKNOWN;
3062                 return KCFG_TRISTATE;
3063         case BTF_KIND_ARRAY:
3064                 if (btf_array(t)->nelems == 0)
3065                         return KCFG_UNKNOWN;
3066                 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3067                         return KCFG_UNKNOWN;
3068                 return KCFG_CHAR_ARR;
3069         default:
3070                 return KCFG_UNKNOWN;
3071         }
3072 }
3073
3074 static int cmp_externs(const void *_a, const void *_b)
3075 {
3076         const struct extern_desc *a = _a;
3077         const struct extern_desc *b = _b;
3078
3079         if (a->type != b->type)
3080                 return a->type < b->type ? -1 : 1;
3081
3082         if (a->type == EXT_KCFG) {
3083                 /* descending order by alignment requirements */
3084                 if (a->kcfg.align != b->kcfg.align)
3085                         return a->kcfg.align > b->kcfg.align ? -1 : 1;
3086                 /* ascending order by size, within same alignment class */
3087                 if (a->kcfg.sz != b->kcfg.sz)
3088                         return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3089         }
3090
3091         /* resolve ties by name */
3092         return strcmp(a->name, b->name);
3093 }
3094
3095 static int find_int_btf_id(const struct btf *btf)
3096 {
3097         const struct btf_type *t;
3098         int i, n;
3099
3100         n = btf__get_nr_types(btf);
3101         for (i = 1; i <= n; i++) {
3102                 t = btf__type_by_id(btf, i);
3103
3104                 if (btf_is_int(t) && btf_int_bits(t) == 32)
3105                         return i;
3106         }
3107
3108         return 0;
3109 }
3110
3111 static int bpf_object__collect_externs(struct bpf_object *obj)
3112 {
3113         struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3114         const struct btf_type *t;
3115         struct extern_desc *ext;
3116         int i, n, off;
3117         const char *ext_name, *sec_name;
3118         Elf_Scn *scn;
3119         GElf_Shdr sh;
3120
3121         if (!obj->efile.symbols)
3122                 return 0;
3123
3124         scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3125         if (elf_sec_hdr(obj, scn, &sh))
3126                 return -LIBBPF_ERRNO__FORMAT;
3127
3128         n = sh.sh_size / sh.sh_entsize;
3129         pr_debug("looking for externs among %d symbols...\n", n);
3130
3131         for (i = 0; i < n; i++) {
3132                 GElf_Sym sym;
3133
3134                 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3135                         return -LIBBPF_ERRNO__FORMAT;
3136                 if (!sym_is_extern(&sym))
3137                         continue;
3138                 ext_name = elf_sym_str(obj, sym.st_name);
3139                 if (!ext_name || !ext_name[0])
3140                         continue;
3141
3142                 ext = obj->externs;
3143                 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3144                 if (!ext)
3145                         return -ENOMEM;
3146                 obj->externs = ext;
3147                 ext = &ext[obj->nr_extern];
3148                 memset(ext, 0, sizeof(*ext));
3149                 obj->nr_extern++;
3150
3151                 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3152                 if (ext->btf_id <= 0) {
3153                         pr_warn("failed to find BTF for extern '%s': %d\n",
3154                                 ext_name, ext->btf_id);
3155                         return ext->btf_id;
3156                 }
3157                 t = btf__type_by_id(obj->btf, ext->btf_id);
3158                 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3159                 ext->sym_idx = i;
3160                 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3161
3162                 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3163                 if (ext->sec_btf_id <= 0) {
3164                         pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3165                                 ext_name, ext->btf_id, ext->sec_btf_id);
3166                         return ext->sec_btf_id;
3167                 }
3168                 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3169                 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3170
3171                 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3172                         kcfg_sec = sec;
3173                         ext->type = EXT_KCFG;
3174                         ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3175                         if (ext->kcfg.sz <= 0) {
3176                                 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3177                                         ext_name, ext->kcfg.sz);
3178                                 return ext->kcfg.sz;
3179                         }
3180                         ext->kcfg.align = btf__align_of(obj->btf, t->type);
3181                         if (ext->kcfg.align <= 0) {
3182                                 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3183                                         ext_name, ext->kcfg.align);
3184                                 return -EINVAL;
3185                         }
3186                         ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3187                                                         &ext->kcfg.is_signed);
3188                         if (ext->kcfg.type == KCFG_UNKNOWN) {
3189                                 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3190                                 return -ENOTSUP;
3191                         }
3192                 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3193                         ksym_sec = sec;
3194                         ext->type = EXT_KSYM;
3195                         skip_mods_and_typedefs(obj->btf, t->type,
3196                                                &ext->ksym.type_id);
3197                 } else {
3198                         pr_warn("unrecognized extern section '%s'\n", sec_name);
3199                         return -ENOTSUP;
3200                 }
3201         }
3202         pr_debug("collected %d externs total\n", obj->nr_extern);
3203
3204         if (!obj->nr_extern)
3205                 return 0;
3206
3207         /* sort externs by type, for kcfg ones also by (align, size, name) */
3208         qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3209
3210         /* for .ksyms section, we need to turn all externs into allocated
3211          * variables in BTF to pass kernel verification; we do this by
3212          * pretending that each extern is a 8-byte variable
3213          */
3214         if (ksym_sec) {
3215                 /* find existing 4-byte integer type in BTF to use for fake
3216                  * extern variables in DATASEC
3217                  */
3218                 int int_btf_id = find_int_btf_id(obj->btf);
3219
3220                 for (i = 0; i < obj->nr_extern; i++) {
3221                         ext = &obj->externs[i];
3222                         if (ext->type != EXT_KSYM)
3223                                 continue;
3224                         pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3225                                  i, ext->sym_idx, ext->name);
3226                 }
3227
3228                 sec = ksym_sec;
3229                 n = btf_vlen(sec);
3230                 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3231                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3232                         struct btf_type *vt;
3233
3234                         vt = (void *)btf__type_by_id(obj->btf, vs->type);
3235                         ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3236                         ext = find_extern_by_name(obj, ext_name);
3237                         if (!ext) {
3238                                 pr_warn("failed to find extern definition for BTF var '%s'\n",
3239                                         ext_name);
3240                                 return -ESRCH;
3241                         }
3242                         btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3243                         vt->type = int_btf_id;
3244                         vs->offset = off;
3245                         vs->size = sizeof(int);
3246                 }
3247                 sec->size = off;
3248         }
3249
3250         if (kcfg_sec) {
3251                 sec = kcfg_sec;
3252                 /* for kcfg externs calculate their offsets within a .kconfig map */
3253                 off = 0;
3254                 for (i = 0; i < obj->nr_extern; i++) {
3255                         ext = &obj->externs[i];
3256                         if (ext->type != EXT_KCFG)
3257                                 continue;
3258
3259                         ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3260                         off = ext->kcfg.data_off + ext->kcfg.sz;
3261                         pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3262                                  i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3263                 }
3264                 sec->size = off;
3265                 n = btf_vlen(sec);
3266                 for (i = 0; i < n; i++) {
3267                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3268
3269                         t = btf__type_by_id(obj->btf, vs->type);
3270                         ext_name = btf__name_by_offset(obj->btf, t->name_off);
3271                         ext = find_extern_by_name(obj, ext_name);
3272                         if (!ext) {
3273                                 pr_warn("failed to find extern definition for BTF var '%s'\n",
3274                                         ext_name);
3275                                 return -ESRCH;
3276                         }
3277                         btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3278                         vs->offset = ext->kcfg.data_off;
3279                 }
3280         }
3281         return 0;
3282 }
3283
3284 struct bpf_program *
3285 bpf_object__find_program_by_title(const struct bpf_object *obj,
3286                                   const char *title)
3287 {
3288         struct bpf_program *pos;
3289
3290         bpf_object__for_each_program(pos, obj) {
3291                 if (pos->sec_name && !strcmp(pos->sec_name, title))
3292                         return pos;
3293         }
3294         return NULL;
3295 }
3296
3297 static bool prog_is_subprog(const struct bpf_object *obj,
3298                             const struct bpf_program *prog)
3299 {
3300         /* For legacy reasons, libbpf supports an entry-point BPF programs
3301          * without SEC() attribute, i.e., those in the .text section. But if
3302          * there are 2 or more such programs in the .text section, they all
3303          * must be subprograms called from entry-point BPF programs in
3304          * designated SEC()'tions, otherwise there is no way to distinguish
3305          * which of those programs should be loaded vs which are a subprogram.
3306          * Similarly, if there is a function/program in .text and at least one
3307          * other BPF program with custom SEC() attribute, then we just assume
3308          * .text programs are subprograms (even if they are not called from
3309          * other programs), because libbpf never explicitly supported mixing
3310          * SEC()-designated BPF programs and .text entry-point BPF programs.
3311          */
3312         return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3313 }
3314
3315 struct bpf_program *
3316 bpf_object__find_program_by_name(const struct bpf_object *obj,
3317                                  const char *name)
3318 {
3319         struct bpf_program *prog;
3320
3321         bpf_object__for_each_program(prog, obj) {
3322                 if (prog_is_subprog(obj, prog))
3323                         continue;
3324                 if (!strcmp(prog->name, name))
3325                         return prog;
3326         }
3327         return NULL;
3328 }
3329
3330 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3331                                       int shndx)
3332 {
3333         return shndx == obj->efile.data_shndx ||
3334                shndx == obj->efile.bss_shndx ||
3335                shndx == obj->efile.rodata_shndx;
3336 }
3337
3338 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3339                                       int shndx)
3340 {
3341         return shndx == obj->efile.maps_shndx ||
3342                shndx == obj->efile.btf_maps_shndx;
3343 }
3344
3345 static enum libbpf_map_type
3346 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3347 {
3348         if (shndx == obj->efile.data_shndx)
3349                 return LIBBPF_MAP_DATA;
3350         else if (shndx == obj->efile.bss_shndx)
3351                 return LIBBPF_MAP_BSS;
3352         else if (shndx == obj->efile.rodata_shndx)
3353                 return LIBBPF_MAP_RODATA;
3354         else if (shndx == obj->efile.symbols_shndx)
3355                 return LIBBPF_MAP_KCONFIG;
3356         else
3357                 return LIBBPF_MAP_UNSPEC;
3358 }
3359
3360 static int bpf_program__record_reloc(struct bpf_program *prog,
3361                                      struct reloc_desc *reloc_desc,
3362                                      __u32 insn_idx, const char *sym_name,
3363                                      const GElf_Sym *sym, const GElf_Rel *rel)
3364 {
3365         struct bpf_insn *insn = &prog->insns[insn_idx];
3366         size_t map_idx, nr_maps = prog->obj->nr_maps;
3367         struct bpf_object *obj = prog->obj;
3368         __u32 shdr_idx = sym->st_shndx;
3369         enum libbpf_map_type type;
3370         const char *sym_sec_name;
3371         struct bpf_map *map;
3372
3373         reloc_desc->processed = false;
3374
3375         /* sub-program call relocation */
3376         if (insn->code == (BPF_JMP | BPF_CALL)) {
3377                 if (insn->src_reg != BPF_PSEUDO_CALL) {
3378                         pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3379                         return -LIBBPF_ERRNO__RELOC;
3380                 }
3381                 /* text_shndx can be 0, if no default "main" program exists */
3382                 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3383                         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3384                         pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3385                                 prog->name, sym_name, sym_sec_name);
3386                         return -LIBBPF_ERRNO__RELOC;
3387                 }
3388                 if (sym->st_value % BPF_INSN_SZ) {
3389                         pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3390                                 prog->name, sym_name, (size_t)sym->st_value);
3391                         return -LIBBPF_ERRNO__RELOC;
3392                 }
3393                 reloc_desc->type = RELO_CALL;
3394                 reloc_desc->insn_idx = insn_idx;
3395                 reloc_desc->sym_off = sym->st_value;
3396                 return 0;
3397         }
3398
3399         if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
3400                 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3401                         prog->name, sym_name, insn_idx, insn->code);
3402                 return -LIBBPF_ERRNO__RELOC;
3403         }
3404
3405         if (sym_is_extern(sym)) {
3406                 int sym_idx = GELF_R_SYM(rel->r_info);
3407                 int i, n = obj->nr_extern;
3408                 struct extern_desc *ext;
3409
3410                 for (i = 0; i < n; i++) {
3411                         ext = &obj->externs[i];
3412                         if (ext->sym_idx == sym_idx)
3413                                 break;
3414                 }
3415                 if (i >= n) {
3416                         pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3417                                 prog->name, sym_name, sym_idx);
3418                         return -LIBBPF_ERRNO__RELOC;
3419                 }
3420                 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3421                          prog->name, i, ext->name, ext->sym_idx, insn_idx);
3422                 reloc_desc->type = RELO_EXTERN;
3423                 reloc_desc->insn_idx = insn_idx;
3424                 reloc_desc->sym_off = i; /* sym_off stores extern index */
3425                 return 0;
3426         }
3427
3428         if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3429                 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3430                         prog->name, sym_name, shdr_idx);
3431                 return -LIBBPF_ERRNO__RELOC;
3432         }
3433
3434         type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3435         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3436
3437         /* generic map reference relocation */
3438         if (type == LIBBPF_MAP_UNSPEC) {
3439                 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3440                         pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3441                                 prog->name, sym_name, sym_sec_name);
3442                         return -LIBBPF_ERRNO__RELOC;
3443                 }
3444                 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3445                         map = &obj->maps[map_idx];
3446                         if (map->libbpf_type != type ||
3447                             map->sec_idx != sym->st_shndx ||
3448                             map->sec_offset != sym->st_value)
3449                                 continue;
3450                         pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3451                                  prog->name, map_idx, map->name, map->sec_idx,
3452                                  map->sec_offset, insn_idx);
3453                         break;
3454                 }
3455                 if (map_idx >= nr_maps) {
3456                         pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3457                                 prog->name, sym_sec_name, (size_t)sym->st_value);
3458                         return -LIBBPF_ERRNO__RELOC;
3459                 }
3460                 reloc_desc->type = RELO_LD64;
3461                 reloc_desc->insn_idx = insn_idx;
3462                 reloc_desc->map_idx = map_idx;
3463                 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
3464                 return 0;
3465         }
3466
3467         /* global data map relocation */
3468         if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3469                 pr_warn("prog '%s': bad data relo against section '%s'\n",
3470                         prog->name, sym_sec_name);
3471                 return -LIBBPF_ERRNO__RELOC;
3472         }
3473         for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3474                 map = &obj->maps[map_idx];
3475                 if (map->libbpf_type != type)
3476                         continue;
3477                 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3478                          prog->name, map_idx, map->name, map->sec_idx,
3479                          map->sec_offset, insn_idx);
3480                 break;
3481         }
3482         if (map_idx >= nr_maps) {
3483                 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3484                         prog->name, sym_sec_name);
3485                 return -LIBBPF_ERRNO__RELOC;
3486         }
3487
3488         reloc_desc->type = RELO_DATA;
3489         reloc_desc->insn_idx = insn_idx;
3490         reloc_desc->map_idx = map_idx;
3491         reloc_desc->sym_off = sym->st_value;
3492         return 0;
3493 }
3494
3495 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3496 {
3497         return insn_idx >= prog->sec_insn_off &&
3498                insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3499 }
3500
3501 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3502                                                  size_t sec_idx, size_t insn_idx)
3503 {
3504         int l = 0, r = obj->nr_programs - 1, m;
3505         struct bpf_program *prog;
3506
3507         while (l < r) {
3508                 m = l + (r - l + 1) / 2;
3509                 prog = &obj->programs[m];
3510
3511                 if (prog->sec_idx < sec_idx ||
3512                     (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3513                         l = m;
3514                 else
3515                         r = m - 1;
3516         }
3517         /* matching program could be at index l, but it still might be the
3518          * wrong one, so we need to double check conditions for the last time
3519          */
3520         prog = &obj->programs[l];
3521         if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3522                 return prog;
3523         return NULL;
3524 }
3525
3526 static int
3527 bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3528 {
3529         Elf_Data *symbols = obj->efile.symbols;
3530         const char *relo_sec_name, *sec_name;
3531         size_t sec_idx = shdr->sh_info;
3532         struct bpf_program *prog;
3533         struct reloc_desc *relos;
3534         int err, i, nrels;
3535         const char *sym_name;
3536         __u32 insn_idx;
3537         GElf_Sym sym;
3538         GElf_Rel rel;
3539
3540         relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3541         sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
3542         if (!relo_sec_name || !sec_name)
3543                 return -EINVAL;
3544
3545         pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3546                  relo_sec_name, sec_idx, sec_name);
3547         nrels = shdr->sh_size / shdr->sh_entsize;
3548
3549         for (i = 0; i < nrels; i++) {
3550                 if (!gelf_getrel(data, i, &rel)) {
3551                         pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3552                         return -LIBBPF_ERRNO__FORMAT;
3553                 }
3554                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3555                         pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3556                                 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3557                         return -LIBBPF_ERRNO__FORMAT;
3558                 }
3559                 if (rel.r_offset % BPF_INSN_SZ) {
3560                         pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3561                                 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3562                         return -LIBBPF_ERRNO__FORMAT;
3563                 }
3564
3565                 insn_idx = rel.r_offset / BPF_INSN_SZ;
3566                 /* relocations against static functions are recorded as
3567                  * relocations against the section that contains a function;
3568                  * in such case, symbol will be STT_SECTION and sym.st_name
3569                  * will point to empty string (0), so fetch section name
3570                  * instead
3571                  */
3572                 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3573                         sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3574                 else
3575                         sym_name = elf_sym_str(obj, sym.st_name);
3576                 sym_name = sym_name ?: "<?";
3577
3578                 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3579                          relo_sec_name, i, insn_idx, sym_name);
3580
3581                 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3582                 if (!prog) {
3583                         pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
3584                                 relo_sec_name, i, sec_name, insn_idx);
3585                         return -LIBBPF_ERRNO__RELOC;
3586                 }
3587
3588                 relos = libbpf_reallocarray(prog->reloc_desc,
3589                                             prog->nr_reloc + 1, sizeof(*relos));
3590                 if (!relos)
3591                         return -ENOMEM;
3592                 prog->reloc_desc = relos;
3593
3594                 /* adjust insn_idx to local BPF program frame of reference */
3595                 insn_idx -= prog->sec_insn_off;
3596                 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3597                                                 insn_idx, sym_name, &sym, &rel);
3598                 if (err)
3599                         return err;
3600
3601                 prog->nr_reloc++;
3602         }
3603         return 0;
3604 }
3605
3606 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3607 {
3608         struct bpf_map_def *def = &map->def;
3609         __u32 key_type_id = 0, value_type_id = 0;
3610         int ret;
3611
3612         /* if it's BTF-defined map, we don't need to search for type IDs.
3613          * For struct_ops map, it does not need btf_key_type_id and
3614          * btf_value_type_id.
3615          */
3616         if (map->sec_idx == obj->efile.btf_maps_shndx ||
3617             bpf_map__is_struct_ops(map))
3618                 return 0;
3619
3620         if (!bpf_map__is_internal(map)) {
3621                 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3622                                            def->value_size, &key_type_id,
3623                                            &value_type_id);
3624         } else {
3625                 /*
3626                  * LLVM annotates global data differently in BTF, that is,
3627                  * only as '.data', '.bss' or '.rodata'.
3628                  */
3629                 ret = btf__find_by_name(obj->btf,
3630                                 libbpf_type_to_btf_name[map->libbpf_type]);
3631         }
3632         if (ret < 0)
3633                 return ret;
3634
3635         map->btf_key_type_id = key_type_id;
3636         map->btf_value_type_id = bpf_map__is_internal(map) ?
3637                                  ret : value_type_id;
3638         return 0;
3639 }
3640
3641 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3642 {
3643         struct bpf_map_info info = {};
3644         __u32 len = sizeof(info);
3645         int new_fd, err;
3646         char *new_name;
3647
3648         err = bpf_obj_get_info_by_fd(fd, &info, &len);
3649         if (err)
3650                 return err;
3651
3652         new_name = strdup(info.name);
3653         if (!new_name)
3654                 return -errno;
3655
3656         new_fd = open("/", O_RDONLY | O_CLOEXEC);
3657         if (new_fd < 0) {
3658                 err = -errno;
3659                 goto err_free_new_name;
3660         }
3661
3662         new_fd = dup3(fd, new_fd, O_CLOEXEC);
3663         if (new_fd < 0) {
3664                 err = -errno;
3665                 goto err_close_new_fd;
3666         }
3667
3668         err = zclose(map->fd);
3669         if (err) {
3670                 err = -errno;
3671                 goto err_close_new_fd;
3672         }
3673         free(map->name);
3674
3675         map->fd = new_fd;
3676         map->name = new_name;
3677         map->def.type = info.type;
3678         map->def.key_size = info.key_size;
3679         map->def.value_size = info.value_size;
3680         map->def.max_entries = info.max_entries;
3681         map->def.map_flags = info.map_flags;
3682         map->btf_key_type_id = info.btf_key_type_id;
3683         map->btf_value_type_id = info.btf_value_type_id;
3684         map->reused = true;
3685
3686         return 0;
3687
3688 err_close_new_fd:
3689         close(new_fd);
3690 err_free_new_name:
3691         free(new_name);
3692         return err;
3693 }
3694
3695 __u32 bpf_map__max_entries(const struct bpf_map *map)
3696 {
3697         return map->def.max_entries;
3698 }
3699
3700 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3701 {
3702         if (map->fd >= 0)
3703                 return -EBUSY;
3704         map->def.max_entries = max_entries;
3705         return 0;
3706 }
3707
3708 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3709 {
3710         if (!map || !max_entries)
3711                 return -EINVAL;
3712
3713         return bpf_map__set_max_entries(map, max_entries);
3714 }
3715
3716 static int
3717 bpf_object__probe_loading(struct bpf_object *obj)
3718 {
3719         struct bpf_load_program_attr attr;
3720         char *cp, errmsg[STRERR_BUFSIZE];
3721         struct bpf_insn insns[] = {
3722                 BPF_MOV64_IMM(BPF_REG_0, 0),
3723                 BPF_EXIT_INSN(),
3724         };
3725         int ret;
3726
3727         /* make sure basic loading works */
3728
3729         memset(&attr, 0, sizeof(attr));
3730         attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3731         attr.insns = insns;
3732         attr.insns_cnt = ARRAY_SIZE(insns);
3733         attr.license = "GPL";
3734
3735         ret = bpf_load_program_xattr(&attr, NULL, 0);
3736         if (ret < 0) {
3737                 ret = errno;
3738                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3739                 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3740                         "program. Make sure your kernel supports BPF "
3741                         "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3742                         "set to big enough value.\n", __func__, cp, ret);
3743                 return -ret;
3744         }
3745         close(ret);
3746
3747         return 0;
3748 }
3749
3750 static int probe_fd(int fd)
3751 {
3752         if (fd >= 0)
3753                 close(fd);
3754         return fd >= 0;
3755 }
3756
3757 static int probe_kern_prog_name(void)
3758 {
3759         struct bpf_load_program_attr attr;
3760         struct bpf_insn insns[] = {
3761                 BPF_MOV64_IMM(BPF_REG_0, 0),
3762                 BPF_EXIT_INSN(),
3763         };
3764         int ret;
3765
3766         /* make sure loading with name works */
3767
3768         memset(&attr, 0, sizeof(attr));
3769         attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3770         attr.insns = insns;
3771         attr.insns_cnt = ARRAY_SIZE(insns);
3772         attr.license = "GPL";
3773         attr.name = "test";
3774         ret = bpf_load_program_xattr(&attr, NULL, 0);
3775         return probe_fd(ret);
3776 }
3777
3778 static int probe_kern_global_data(void)
3779 {
3780         struct bpf_load_program_attr prg_attr;
3781         struct bpf_create_map_attr map_attr;
3782         char *cp, errmsg[STRERR_BUFSIZE];
3783         struct bpf_insn insns[] = {
3784                 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3785                 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3786                 BPF_MOV64_IMM(BPF_REG_0, 0),
3787                 BPF_EXIT_INSN(),
3788         };
3789         int ret, map;
3790
3791         memset(&map_attr, 0, sizeof(map_attr));
3792         map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3793         map_attr.key_size = sizeof(int);
3794         map_attr.value_size = 32;
3795         map_attr.max_entries = 1;
3796
3797         map = bpf_create_map_xattr(&map_attr);
3798         if (map < 0) {
3799                 ret = -errno;
3800                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3801                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3802                         __func__, cp, -ret);
3803                 return ret;
3804         }
3805
3806         insns[0].imm = map;
3807
3808         memset(&prg_attr, 0, sizeof(prg_attr));
3809         prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3810         prg_attr.insns = insns;
3811         prg_attr.insns_cnt = ARRAY_SIZE(insns);
3812         prg_attr.license = "GPL";
3813
3814         ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
3815         close(map);
3816         return probe_fd(ret);
3817 }
3818
3819 static int probe_kern_btf(void)
3820 {
3821         static const char strs[] = "\0int";
3822         __u32 types[] = {
3823                 /* int */
3824                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3825         };
3826
3827         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3828                                              strs, sizeof(strs)));
3829 }
3830
3831 static int probe_kern_btf_func(void)
3832 {
3833         static const char strs[] = "\0int\0x\0a";
3834         /* void x(int a) {} */
3835         __u32 types[] = {
3836                 /* int */
3837                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
3838                 /* FUNC_PROTO */                                /* [2] */
3839                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3840                 BTF_PARAM_ENC(7, 1),
3841                 /* FUNC x */                                    /* [3] */
3842                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3843         };
3844
3845         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3846                                              strs, sizeof(strs)));
3847 }
3848
3849 static int probe_kern_btf_func_global(void)
3850 {
3851         static const char strs[] = "\0int\0x\0a";
3852         /* static void x(int a) {} */
3853         __u32 types[] = {
3854                 /* int */
3855                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
3856                 /* FUNC_PROTO */                                /* [2] */
3857                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3858                 BTF_PARAM_ENC(7, 1),
3859                 /* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
3860                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3861         };
3862
3863         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3864                                              strs, sizeof(strs)));
3865 }
3866
3867 static int probe_kern_btf_datasec(void)
3868 {
3869         static const char strs[] = "\0x\0.data";
3870         /* static int a; */
3871         __u32 types[] = {
3872                 /* int */
3873                 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
3874                 /* VAR x */                                     /* [2] */
3875                 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3876                 BTF_VAR_STATIC,
3877                 /* DATASEC val */                               /* [3] */
3878                 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3879                 BTF_VAR_SECINFO_ENC(2, 0, 4),
3880         };
3881
3882         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3883                                              strs, sizeof(strs)));
3884 }
3885
3886 static int probe_kern_array_mmap(void)
3887 {
3888         struct bpf_create_map_attr attr = {
3889                 .map_type = BPF_MAP_TYPE_ARRAY,
3890                 .map_flags = BPF_F_MMAPABLE,
3891                 .key_size = sizeof(int),
3892                 .value_size = sizeof(int),
3893                 .max_entries = 1,
3894         };
3895
3896         return probe_fd(bpf_create_map_xattr(&attr));
3897 }
3898
3899 static int probe_kern_exp_attach_type(void)
3900 {
3901         struct bpf_load_program_attr attr;
3902         struct bpf_insn insns[] = {
3903                 BPF_MOV64_IMM(BPF_REG_0, 0),
3904                 BPF_EXIT_INSN(),
3905         };
3906
3907         memset(&attr, 0, sizeof(attr));
3908         /* use any valid combination of program type and (optional)
3909          * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
3910          * to see if kernel supports expected_attach_type field for
3911          * BPF_PROG_LOAD command
3912          */
3913         attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
3914         attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
3915         attr.insns = insns;
3916         attr.insns_cnt = ARRAY_SIZE(insns);
3917         attr.license = "GPL";
3918
3919         return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3920 }
3921
3922 static int probe_kern_probe_read_kernel(void)
3923 {
3924         struct bpf_load_program_attr attr;
3925         struct bpf_insn insns[] = {
3926                 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),   /* r1 = r10 (fp) */
3927                 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),  /* r1 += -8 */
3928                 BPF_MOV64_IMM(BPF_REG_2, 8),            /* r2 = 8 */
3929                 BPF_MOV64_IMM(BPF_REG_3, 0),            /* r3 = 0 */
3930                 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
3931                 BPF_EXIT_INSN(),
3932         };
3933
3934         memset(&attr, 0, sizeof(attr));
3935         attr.prog_type = BPF_PROG_TYPE_KPROBE;
3936         attr.insns = insns;
3937         attr.insns_cnt = ARRAY_SIZE(insns);
3938         attr.license = "GPL";
3939
3940         return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3941 }
3942
3943 static int probe_prog_bind_map(void)
3944 {
3945         struct bpf_load_program_attr prg_attr;
3946         struct bpf_create_map_attr map_attr;
3947         char *cp, errmsg[STRERR_BUFSIZE];
3948         struct bpf_insn insns[] = {
3949                 BPF_MOV64_IMM(BPF_REG_0, 0),
3950                 BPF_EXIT_INSN(),
3951         };
3952         int ret, map, prog;
3953
3954         memset(&map_attr, 0, sizeof(map_attr));
3955         map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3956         map_attr.key_size = sizeof(int);
3957         map_attr.value_size = 32;
3958         map_attr.max_entries = 1;
3959
3960         map = bpf_create_map_xattr(&map_attr);
3961         if (map < 0) {
3962                 ret = -errno;
3963                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3964                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3965                         __func__, cp, -ret);
3966                 return ret;
3967         }
3968
3969         memset(&prg_attr, 0, sizeof(prg_attr));
3970         prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3971         prg_attr.insns = insns;
3972         prg_attr.insns_cnt = ARRAY_SIZE(insns);
3973         prg_attr.license = "GPL";
3974
3975         prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
3976         if (prog < 0) {
3977                 close(map);
3978                 return 0;
3979         }
3980
3981         ret = bpf_prog_bind_map(prog, map, NULL);
3982
3983         close(map);
3984         close(prog);
3985
3986         return ret >= 0;
3987 }
3988
3989 static int probe_module_btf(void)
3990 {
3991         static const char strs[] = "\0int";
3992         __u32 types[] = {
3993                 /* int */
3994                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3995         };
3996         struct bpf_btf_info info;
3997         __u32 len = sizeof(info);
3998         char name[16];
3999         int fd, err;
4000
4001         fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4002         if (fd < 0)
4003                 return 0; /* BTF not supported at all */
4004
4005         memset(&info, 0, sizeof(info));
4006         info.name = ptr_to_u64(name);
4007         info.name_len = sizeof(name);
4008
4009         /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4010          * kernel's module BTF support coincides with support for
4011          * name/name_len fields in struct bpf_btf_info.
4012          */
4013         err = bpf_obj_get_info_by_fd(fd, &info, &len);
4014         close(fd);
4015         return !err;
4016 }
4017
4018 enum kern_feature_result {
4019         FEAT_UNKNOWN = 0,
4020         FEAT_SUPPORTED = 1,
4021         FEAT_MISSING = 2,
4022 };
4023
4024 typedef int (*feature_probe_fn)(void);
4025
4026 static struct kern_feature_desc {
4027         const char *desc;
4028         feature_probe_fn probe;
4029         enum kern_feature_result res;
4030 } feature_probes[__FEAT_CNT] = {
4031         [FEAT_PROG_NAME] = {
4032                 "BPF program name", probe_kern_prog_name,
4033         },
4034         [FEAT_GLOBAL_DATA] = {
4035                 "global variables", probe_kern_global_data,
4036         },
4037         [FEAT_BTF] = {
4038                 "minimal BTF", probe_kern_btf,
4039         },
4040         [FEAT_BTF_FUNC] = {
4041                 "BTF functions", probe_kern_btf_func,
4042         },
4043         [FEAT_BTF_GLOBAL_FUNC] = {
4044                 "BTF global function", probe_kern_btf_func_global,
4045         },
4046         [FEAT_BTF_DATASEC] = {
4047                 "BTF data section and variable", probe_kern_btf_datasec,
4048         },
4049         [FEAT_ARRAY_MMAP] = {
4050                 "ARRAY map mmap()", probe_kern_array_mmap,
4051         },
4052         [FEAT_EXP_ATTACH_TYPE] = {
4053                 "BPF_PROG_LOAD expected_attach_type attribute",
4054                 probe_kern_exp_attach_type,
4055         },
4056         [FEAT_PROBE_READ_KERN] = {
4057                 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4058         },
4059         [FEAT_PROG_BIND_MAP] = {
4060                 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4061         },
4062         [FEAT_MODULE_BTF] = {
4063                 "module BTF support", probe_module_btf,
4064         },
4065 };
4066
4067 static bool kernel_supports(enum kern_feature_id feat_id)
4068 {
4069         struct kern_feature_desc *feat = &feature_probes[feat_id];
4070         int ret;
4071
4072         if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4073                 ret = feat->probe();
4074                 if (ret > 0) {
4075                         WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4076                 } else if (ret == 0) {
4077                         WRITE_ONCE(feat->res, FEAT_MISSING);
4078                 } else {
4079                         pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4080                         WRITE_ONCE(feat->res, FEAT_MISSING);
4081                 }
4082         }
4083
4084         return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4085 }
4086
4087 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4088 {
4089         struct bpf_map_info map_info = {};
4090         char msg[STRERR_BUFSIZE];
4091         __u32 map_info_len;
4092
4093         map_info_len = sizeof(map_info);
4094
4095         if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4096                 pr_warn("failed to get map info for map FD %d: %s\n",
4097                         map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4098                 return false;
4099         }
4100
4101         return (map_info.type == map->def.type &&
4102                 map_info.key_size == map->def.key_size &&
4103                 map_info.value_size == map->def.value_size &&
4104                 map_info.max_entries == map->def.max_entries &&
4105                 map_info.map_flags == map->def.map_flags);
4106 }
4107
4108 static int
4109 bpf_object__reuse_map(struct bpf_map *map)
4110 {
4111         char *cp, errmsg[STRERR_BUFSIZE];
4112         int err, pin_fd;
4113
4114         pin_fd = bpf_obj_get(map->pin_path);
4115         if (pin_fd < 0) {
4116                 err = -errno;
4117                 if (err == -ENOENT) {
4118                         pr_debug("found no pinned map to reuse at '%s'\n",
4119                                  map->pin_path);
4120                         return 0;
4121                 }
4122
4123                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4124                 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4125                         map->pin_path, cp);
4126                 return err;
4127         }
4128
4129         if (!map_is_reuse_compat(map, pin_fd)) {
4130                 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4131                         map->pin_path);
4132                 close(pin_fd);
4133                 return -EINVAL;
4134         }
4135
4136         err = bpf_map__reuse_fd(map, pin_fd);
4137         if (err) {
4138                 close(pin_fd);
4139                 return err;
4140         }
4141         map->pinned = true;
4142         pr_debug("reused pinned map at '%s'\n", map->pin_path);
4143
4144         return 0;
4145 }
4146
4147 static int
4148 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4149 {
4150         enum libbpf_map_type map_type = map->libbpf_type;
4151         char *cp, errmsg[STRERR_BUFSIZE];
4152         int err, zero = 0;
4153
4154         err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4155         if (err) {
4156                 err = -errno;
4157                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4158                 pr_warn("Error setting initial map(%s) contents: %s\n",
4159                         map->name, cp);
4160                 return err;
4161         }
4162
4163         /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4164         if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4165                 err = bpf_map_freeze(map->fd);
4166                 if (err) {
4167                         err = -errno;
4168                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4169                         pr_warn("Error freezing map(%s) as read-only: %s\n",
4170                                 map->name, cp);
4171                         return err;
4172                 }
4173         }
4174         return 0;
4175 }
4176
4177 static void bpf_map__destroy(struct bpf_map *map);
4178
4179 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4180 {
4181         struct bpf_create_map_attr create_attr;
4182         struct bpf_map_def *def = &map->def;
4183
4184         memset(&create_attr, 0, sizeof(create_attr));
4185
4186         if (kernel_supports(FEAT_PROG_NAME))
4187                 create_attr.name = map->name;
4188         create_attr.map_ifindex = map->map_ifindex;
4189         create_attr.map_type = def->type;
4190         create_attr.map_flags = def->map_flags;
4191         create_attr.key_size = def->key_size;
4192         create_attr.value_size = def->value_size;
4193         create_attr.numa_node = map->numa_node;
4194
4195         if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4196                 int nr_cpus;
4197
4198                 nr_cpus = libbpf_num_possible_cpus();
4199                 if (nr_cpus < 0) {
4200                         pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4201                                 map->name, nr_cpus);
4202                         return nr_cpus;
4203                 }
4204                 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4205                 create_attr.max_entries = nr_cpus;
4206         } else {
4207                 create_attr.max_entries = def->max_entries;
4208         }
4209
4210         if (bpf_map__is_struct_ops(map))
4211                 create_attr.btf_vmlinux_value_type_id =
4212                         map->btf_vmlinux_value_type_id;
4213
4214         create_attr.btf_fd = 0;
4215         create_attr.btf_key_type_id = 0;
4216         create_attr.btf_value_type_id = 0;
4217         if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4218                 create_attr.btf_fd = btf__fd(obj->btf);
4219                 create_attr.btf_key_type_id = map->btf_key_type_id;
4220                 create_attr.btf_value_type_id = map->btf_value_type_id;
4221         }
4222
4223         if (bpf_map_type__is_map_in_map(def->type)) {
4224                 if (map->inner_map) {
4225                         int err;
4226
4227                         err = bpf_object__create_map(obj, map->inner_map);
4228                         if (err) {
4229                                 pr_warn("map '%s': failed to create inner map: %d\n",
4230                                         map->name, err);
4231                                 return err;
4232                         }
4233                         map->inner_map_fd = bpf_map__fd(map->inner_map);
4234                 }
4235                 if (map->inner_map_fd >= 0)
4236                         create_attr.inner_map_fd = map->inner_map_fd;
4237         }
4238
4239         map->fd = bpf_create_map_xattr(&create_attr);
4240         if (map->fd < 0 && (create_attr.btf_key_type_id ||
4241                             create_attr.btf_value_type_id)) {
4242                 char *cp, errmsg[STRERR_BUFSIZE];
4243                 int err = -errno;
4244
4245                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4246                 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4247                         map->name, cp, err);
4248                 create_attr.btf_fd = 0;
4249                 create_attr.btf_key_type_id = 0;
4250                 create_attr.btf_value_type_id = 0;
4251                 map->btf_key_type_id = 0;
4252                 map->btf_value_type_id = 0;
4253                 map->fd = bpf_create_map_xattr(&create_attr);
4254         }
4255
4256         if (map->fd < 0)
4257                 return -errno;
4258
4259         if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4260                 bpf_map__destroy(map->inner_map);
4261                 zfree(&map->inner_map);
4262         }
4263
4264         return 0;
4265 }
4266
4267 static int init_map_slots(struct bpf_map *map)
4268 {
4269         const struct bpf_map *targ_map;
4270         unsigned int i;
4271         int fd, err;
4272
4273         for (i = 0; i < map->init_slots_sz; i++) {
4274                 if (!map->init_slots[i])
4275                         continue;
4276
4277                 targ_map = map->init_slots[i];
4278                 fd = bpf_map__fd(targ_map);
4279                 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4280                 if (err) {
4281                         err = -errno;
4282                         pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4283                                 map->name, i, targ_map->name,
4284                                 fd, err);
4285                         return err;
4286                 }
4287                 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4288                          map->name, i, targ_map->name, fd);
4289         }
4290
4291         zfree(&map->init_slots);
4292         map->init_slots_sz = 0;
4293
4294         return 0;
4295 }
4296
4297 static int
4298 bpf_object__create_maps(struct bpf_object *obj)
4299 {
4300         struct bpf_map *map;
4301         char *cp, errmsg[STRERR_BUFSIZE];
4302         unsigned int i, j;
4303         int err;
4304
4305         for (i = 0; i < obj->nr_maps; i++) {
4306                 map = &obj->maps[i];
4307
4308                 if (map->pin_path) {
4309                         err = bpf_object__reuse_map(map);
4310                         if (err) {
4311                                 pr_warn("map '%s': error reusing pinned map\n",
4312                                         map->name);
4313                                 goto err_out;
4314                         }
4315                 }
4316
4317                 if (map->fd >= 0) {
4318                         pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4319                                  map->name, map->fd);
4320                 } else {
4321                         err = bpf_object__create_map(obj, map);
4322                         if (err)
4323                                 goto err_out;
4324
4325                         pr_debug("map '%s': created successfully, fd=%d\n",
4326                                  map->name, map->fd);
4327
4328                         if (bpf_map__is_internal(map)) {
4329                                 err = bpf_object__populate_internal_map(obj, map);
4330                                 if (err < 0) {
4331                                         zclose(map->fd);
4332                                         goto err_out;
4333                                 }
4334                         }
4335
4336                         if (map->init_slots_sz) {
4337                                 err = init_map_slots(map);
4338                                 if (err < 0) {
4339                                         zclose(map->fd);
4340                                         goto err_out;
4341                                 }
4342                         }
4343                 }
4344
4345                 if (map->pin_path && !map->pinned) {
4346                         err = bpf_map__pin(map, NULL);
4347                         if (err) {
4348                                 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4349                                         map->name, map->pin_path, err);
4350                                 zclose(map->fd);
4351                                 goto err_out;
4352                         }
4353                 }
4354         }
4355
4356         return 0;
4357
4358 err_out:
4359         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4360         pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4361         pr_perm_msg(err);
4362         for (j = 0; j < i; j++)
4363                 zclose(obj->maps[j].fd);
4364         return err;
4365 }
4366
4367 #define BPF_CORE_SPEC_MAX_LEN 64
4368
4369 /* represents BPF CO-RE field or array element accessor */
4370 struct bpf_core_accessor {
4371         __u32 type_id;          /* struct/union type or array element type */
4372         __u32 idx;              /* field index or array index */
4373         const char *name;       /* field name or NULL for array accessor */
4374 };
4375
4376 struct bpf_core_spec {
4377         const struct btf *btf;
4378         /* high-level spec: named fields and array indices only */
4379         struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
4380         /* original unresolved (no skip_mods_or_typedefs) root type ID */
4381         __u32 root_type_id;
4382         /* CO-RE relocation kind */
4383         enum bpf_core_relo_kind relo_kind;
4384         /* high-level spec length */
4385         int len;
4386         /* raw, low-level spec: 1-to-1 with accessor spec string */
4387         int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4388         /* raw spec length */
4389         int raw_len;
4390         /* field bit offset represented by spec */
4391         __u32 bit_offset;
4392 };
4393
4394 static bool str_is_empty(const char *s)
4395 {
4396         return !s || !s[0];
4397 }
4398
4399 static bool is_flex_arr(const struct btf *btf,
4400                         const struct bpf_core_accessor *acc,
4401                         const struct btf_array *arr)
4402 {
4403         const struct btf_type *t;
4404
4405         /* not a flexible array, if not inside a struct or has non-zero size */
4406         if (!acc->name || arr->nelems > 0)
4407                 return false;
4408
4409         /* has to be the last member of enclosing struct */
4410         t = btf__type_by_id(btf, acc->type_id);
4411         return acc->idx == btf_vlen(t) - 1;
4412 }
4413
4414 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4415 {
4416         switch (kind) {
4417         case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4418         case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4419         case BPF_FIELD_EXISTS: return "field_exists";
4420         case BPF_FIELD_SIGNED: return "signed";
4421         case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4422         case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
4423         case BPF_TYPE_ID_LOCAL: return "local_type_id";
4424         case BPF_TYPE_ID_TARGET: return "target_type_id";
4425         case BPF_TYPE_EXISTS: return "type_exists";
4426         case BPF_TYPE_SIZE: return "type_size";
4427         case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4428         case BPF_ENUMVAL_VALUE: return "enumval_value";
4429         default: return "unknown";
4430         }
4431 }
4432
4433 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4434 {
4435         switch (kind) {
4436         case BPF_FIELD_BYTE_OFFSET:
4437         case BPF_FIELD_BYTE_SIZE:
4438         case BPF_FIELD_EXISTS:
4439         case BPF_FIELD_SIGNED:
4440         case BPF_FIELD_LSHIFT_U64:
4441         case BPF_FIELD_RSHIFT_U64:
4442                 return true;
4443         default:
4444                 return false;
4445         }
4446 }
4447
4448 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4449 {
4450         switch (kind) {
4451         case BPF_TYPE_ID_LOCAL:
4452         case BPF_TYPE_ID_TARGET:
4453         case BPF_TYPE_EXISTS:
4454         case BPF_TYPE_SIZE:
4455                 return true;
4456         default:
4457                 return false;
4458         }
4459 }
4460
4461 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4462 {
4463         switch (kind) {
4464         case BPF_ENUMVAL_EXISTS:
4465         case BPF_ENUMVAL_VALUE:
4466                 return true;
4467         default:
4468                 return false;
4469         }
4470 }
4471
4472 /*
4473  * Turn bpf_core_relo into a low- and high-level spec representation,
4474  * validating correctness along the way, as well as calculating resulting
4475  * field bit offset, specified by accessor string. Low-level spec captures
4476  * every single level of nestedness, including traversing anonymous
4477  * struct/union members. High-level one only captures semantically meaningful
4478  * "turning points": named fields and array indicies.
4479  * E.g., for this case:
4480  *
4481  *   struct sample {
4482  *       int __unimportant;
4483  *       struct {
4484  *           int __1;
4485  *           int __2;
4486  *           int a[7];
4487  *       };
4488  *   };
4489  *
4490  *   struct sample *s = ...;
4491  *
4492  *   int x = &s->a[3]; // access string = '0:1:2:3'
4493  *
4494  * Low-level spec has 1:1 mapping with each element of access string (it's
4495  * just a parsed access string representation): [0, 1, 2, 3].
4496  *
4497  * High-level spec will capture only 3 points:
4498  *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4499  *   - field 'a' access (corresponds to '2' in low-level spec);
4500  *   - array element #3 access (corresponds to '3' in low-level spec).
4501  *
4502  * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4503  * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
4504  * spec and raw_spec are kept empty.
4505  *
4506  * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4507  * string to specify enumerator's value index that need to be relocated.
4508  */
4509 static int bpf_core_parse_spec(const struct btf *btf,
4510                                __u32 type_id,
4511                                const char *spec_str,
4512                                enum bpf_core_relo_kind relo_kind,
4513                                struct bpf_core_spec *spec)
4514 {
4515         int access_idx, parsed_len, i;
4516         struct bpf_core_accessor *acc;
4517         const struct btf_type *t;
4518         const char *name;
4519         __u32 id;
4520         __s64 sz;
4521
4522         if (str_is_empty(spec_str) || *spec_str == ':')
4523                 return -EINVAL;
4524
4525         memset(spec, 0, sizeof(*spec));
4526         spec->btf = btf;
4527         spec->root_type_id = type_id;
4528         spec->relo_kind = relo_kind;
4529
4530         /* type-based relocations don't have a field access string */
4531         if (core_relo_is_type_based(relo_kind)) {
4532                 if (strcmp(spec_str, "0"))
4533                         return -EINVAL;
4534                 return 0;
4535         }
4536
4537         /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
4538         while (*spec_str) {
4539                 if (*spec_str == ':')
4540                         ++spec_str;
4541                 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4542                         return -EINVAL;
4543                 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4544                         return -E2BIG;
4545                 spec_str += parsed_len;
4546                 spec->raw_spec[spec->raw_len++] = access_idx;
4547         }
4548
4549         if (spec->raw_len == 0)
4550                 return -EINVAL;
4551
4552         t = skip_mods_and_typedefs(btf, type_id, &id);
4553         if (!t)
4554                 return -EINVAL;
4555
4556         access_idx = spec->raw_spec[0];
4557         acc = &spec->spec[0];
4558         acc->type_id = id;
4559         acc->idx = access_idx;
4560         spec->len++;
4561
4562         if (core_relo_is_enumval_based(relo_kind)) {
4563                 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4564                         return -EINVAL;
4565
4566                 /* record enumerator name in a first accessor */
4567                 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4568                 return 0;
4569         }
4570
4571         if (!core_relo_is_field_based(relo_kind))
4572                 return -EINVAL;
4573
4574         sz = btf__resolve_size(btf, id);
4575         if (sz < 0)
4576                 return sz;
4577         spec->bit_offset = access_idx * sz * 8;
4578
4579         for (i = 1; i < spec->raw_len; i++) {
4580                 t = skip_mods_and_typedefs(btf, id, &id);
4581                 if (!t)
4582                         return -EINVAL;
4583
4584                 access_idx = spec->raw_spec[i];
4585                 acc = &spec->spec[spec->len];
4586
4587                 if (btf_is_composite(t)) {
4588                         const struct btf_member *m;
4589                         __u32 bit_offset;
4590
4591                         if (access_idx >= btf_vlen(t))
4592                                 return -EINVAL;
4593
4594                         bit_offset = btf_member_bit_offset(t, access_idx);
4595                         spec->bit_offset += bit_offset;
4596
4597                         m = btf_members(t) + access_idx;
4598                         if (m->name_off) {
4599                                 name = btf__name_by_offset(btf, m->name_off);
4600                                 if (str_is_empty(name))
4601                                         return -EINVAL;
4602
4603                                 acc->type_id = id;
4604                                 acc->idx = access_idx;
4605                                 acc->name = name;
4606                                 spec->len++;
4607                         }
4608
4609                         id = m->type;
4610                 } else if (btf_is_array(t)) {
4611                         const struct btf_array *a = btf_array(t);
4612                         bool flex;
4613
4614                         t = skip_mods_and_typedefs(btf, a->type, &id);
4615                         if (!t)
4616                                 return -EINVAL;
4617
4618                         flex = is_flex_arr(btf, acc - 1, a);
4619                         if (!flex && access_idx >= a->nelems)
4620                                 return -EINVAL;
4621
4622                         spec->spec[spec->len].type_id = id;
4623                         spec->spec[spec->len].idx = access_idx;
4624                         spec->len++;
4625
4626                         sz = btf__resolve_size(btf, id);
4627                         if (sz < 0)
4628                                 return sz;
4629                         spec->bit_offset += access_idx * sz * 8;
4630                 } else {
4631                         pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4632                                 type_id, spec_str, i, id, btf_kind_str(t));
4633                         return -EINVAL;
4634                 }
4635         }
4636
4637         return 0;
4638 }
4639
4640 static bool bpf_core_is_flavor_sep(const char *s)
4641 {
4642         /* check X___Y name pattern, where X and Y are not underscores */
4643         return s[0] != '_' &&                                 /* X */
4644                s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
4645                s[4] != '_';                                   /* Y */
4646 }
4647
4648 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
4649  * before last triple underscore. Struct name part after last triple
4650  * underscore is ignored by BPF CO-RE relocation during relocation matching.
4651  */
4652 static size_t bpf_core_essential_name_len(const char *name)
4653 {
4654         size_t n = strlen(name);
4655         int i;
4656
4657         for (i = n - 5; i >= 0; i--) {
4658                 if (bpf_core_is_flavor_sep(name + i))
4659                         return i + 1;
4660         }
4661         return n;
4662 }
4663
4664 struct core_cand
4665 {
4666         const struct btf *btf;
4667         const struct btf_type *t;
4668         const char *name;
4669         __u32 id;
4670 };
4671
4672 /* dynamically sized list of type IDs and its associated struct btf */
4673 struct core_cand_list {
4674         struct core_cand *cands;
4675         int len;
4676 };
4677
4678 static void bpf_core_free_cands(struct core_cand_list *cands)
4679 {
4680         free(cands->cands);
4681         free(cands);
4682 }
4683
4684 static int bpf_core_add_cands(struct core_cand *local_cand,
4685                               size_t local_essent_len,
4686                               const struct btf *targ_btf,
4687                               const char *targ_btf_name,
4688                               int targ_start_id,
4689                               struct core_cand_list *cands)
4690 {
4691         struct core_cand *new_cands, *cand;
4692         const struct btf_type *t;
4693         const char *targ_name;
4694         size_t targ_essent_len;
4695         int n, i;
4696
4697         n = btf__get_nr_types(targ_btf);
4698         for (i = targ_start_id; i <= n; i++) {
4699                 t = btf__type_by_id(targ_btf, i);
4700                 if (btf_kind(t) != btf_kind(local_cand->t))
4701                         continue;
4702
4703                 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4704                 if (str_is_empty(targ_name))
4705                         continue;
4706
4707                 targ_essent_len = bpf_core_essential_name_len(targ_name);
4708                 if (targ_essent_len != local_essent_len)
4709                         continue;
4710
4711                 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
4712                         continue;
4713
4714                 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
4715                          local_cand->id, btf_kind_str(local_cand->t),
4716                          local_cand->name, i, btf_kind_str(t), targ_name,
4717                          targ_btf_name);
4718                 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
4719                                               sizeof(*cands->cands));
4720                 if (!new_cands)
4721                         return -ENOMEM;
4722
4723                 cand = &new_cands[cands->len];
4724                 cand->btf = targ_btf;
4725                 cand->t = t;
4726                 cand->name = targ_name;
4727                 cand->id = i;
4728
4729                 cands->cands = new_cands;
4730                 cands->len++;
4731         }
4732         return 0;
4733 }
4734
4735 static int load_module_btfs(struct bpf_object *obj)
4736 {
4737         struct bpf_btf_info info;
4738         struct module_btf *mod_btf;
4739         struct btf *btf;
4740         char name[64];
4741         __u32 id = 0, len;
4742         int err, fd;
4743
4744         if (obj->btf_modules_loaded)
4745                 return 0;
4746
4747         /* don't do this again, even if we find no module BTFs */
4748         obj->btf_modules_loaded = true;
4749
4750         /* kernel too old to support module BTFs */
4751         if (!kernel_supports(FEAT_MODULE_BTF))
4752                 return 0;
4753
4754         while (true) {
4755                 err = bpf_btf_get_next_id(id, &id);
4756                 if (err && errno == ENOENT)
4757                         return 0;
4758                 if (err) {
4759                         err = -errno;
4760                         pr_warn("failed to iterate BTF objects: %d\n", err);
4761                         return err;
4762                 }
4763
4764                 fd = bpf_btf_get_fd_by_id(id);
4765                 if (fd < 0) {
4766                         if (errno == ENOENT)
4767                                 continue; /* expected race: BTF was unloaded */
4768                         err = -errno;
4769                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
4770                         return err;
4771                 }
4772
4773                 len = sizeof(info);
4774                 memset(&info, 0, sizeof(info));
4775                 info.name = ptr_to_u64(name);
4776                 info.name_len = sizeof(name);
4777
4778                 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4779                 if (err) {
4780                         err = -errno;
4781                         pr_warn("failed to get BTF object #%d info: %d\n", id, err);
4782                         goto err_out;
4783                 }
4784
4785                 /* ignore non-module BTFs */
4786                 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
4787                         close(fd);
4788                         continue;
4789                 }
4790
4791                 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
4792                 if (IS_ERR(btf)) {
4793                         pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
4794                                 name, id, PTR_ERR(btf));
4795                         err = PTR_ERR(btf);
4796                         goto err_out;
4797                 }
4798
4799                 err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
4800                                      sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4801                 if (err)
4802                         goto err_out;
4803
4804                 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
4805
4806                 mod_btf->btf = btf;
4807                 mod_btf->id = id;
4808                 mod_btf->fd = fd;
4809                 mod_btf->name = strdup(name);
4810                 if (!mod_btf->name) {
4811                         err = -ENOMEM;
4812                         goto err_out;
4813                 }
4814                 continue;
4815
4816 err_out:
4817                 close(fd);
4818                 return err;
4819         }
4820
4821         return 0;
4822 }
4823
4824 static struct core_cand_list *
4825 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
4826 {
4827         struct core_cand local_cand = {};
4828         struct core_cand_list *cands;
4829         const struct btf *main_btf;
4830         size_t local_essent_len;
4831         int err, i;
4832
4833         local_cand.btf = local_btf;
4834         local_cand.t = btf__type_by_id(local_btf, local_type_id);
4835         if (!local_cand.t)
4836                 return ERR_PTR(-EINVAL);
4837
4838         local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
4839         if (str_is_empty(local_cand.name))
4840                 return ERR_PTR(-EINVAL);
4841         local_essent_len = bpf_core_essential_name_len(local_cand.name);
4842
4843         cands = calloc(1, sizeof(*cands));
4844         if (!cands)
4845                 return ERR_PTR(-ENOMEM);
4846
4847         /* Attempt to find target candidates in vmlinux BTF first */
4848         main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
4849         err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
4850         if (err)
4851                 goto err_out;
4852
4853         /* if vmlinux BTF has any candidate, don't got for module BTFs */
4854         if (cands->len)
4855                 return cands;
4856
4857         /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
4858         if (obj->btf_vmlinux_override)
4859                 return cands;
4860
4861         /* now look through module BTFs, trying to still find candidates */
4862         err = load_module_btfs(obj);
4863         if (err)
4864                 goto err_out;
4865
4866         for (i = 0; i < obj->btf_module_cnt; i++) {
4867                 err = bpf_core_add_cands(&local_cand, local_essent_len,
4868                                          obj->btf_modules[i].btf,
4869                                          obj->btf_modules[i].name,
4870                                          btf__get_nr_types(obj->btf_vmlinux) + 1,
4871                                          cands);
4872                 if (err)
4873                         goto err_out;
4874         }
4875
4876         return cands;
4877 err_out:
4878         bpf_core_free_cands(cands);
4879         return ERR_PTR(err);
4880 }
4881
4882 /* Check two types for compatibility for the purpose of field access
4883  * relocation. const/volatile/restrict and typedefs are skipped to ensure we
4884  * are relocating semantically compatible entities:
4885  *   - any two STRUCTs/UNIONs are compatible and can be mixed;
4886  *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
4887  *   - any two PTRs are always compatible;
4888  *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
4889  *     least one of enums should be anonymous;
4890  *   - for ENUMs, check sizes, names are ignored;
4891  *   - for INT, size and signedness are ignored;
4892  *   - for ARRAY, dimensionality is ignored, element types are checked for
4893  *     compatibility recursively;
4894  *   - everything else shouldn't be ever a target of relocation.
4895  * These rules are not set in stone and probably will be adjusted as we get
4896  * more experience with using BPF CO-RE relocations.
4897  */
4898 static int bpf_core_fields_are_compat(const struct btf *local_btf,
4899                                       __u32 local_id,
4900                                       const struct btf *targ_btf,
4901                                       __u32 targ_id)
4902 {
4903         const struct btf_type *local_type, *targ_type;
4904
4905 recur:
4906         local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4907         targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4908         if (!local_type || !targ_type)
4909                 return -EINVAL;
4910
4911         if (btf_is_composite(local_type) && btf_is_composite(targ_type))
4912                 return 1;
4913         if (btf_kind(local_type) != btf_kind(targ_type))
4914                 return 0;
4915
4916         switch (btf_kind(local_type)) {
4917         case BTF_KIND_PTR:
4918                 return 1;
4919         case BTF_KIND_FWD:
4920         case BTF_KIND_ENUM: {
4921                 const char *local_name, *targ_name;
4922                 size_t local_len, targ_len;
4923
4924                 local_name = btf__name_by_offset(local_btf,
4925                                                  local_type->name_off);
4926                 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
4927                 local_len = bpf_core_essential_name_len(local_name);
4928                 targ_len = bpf_core_essential_name_len(targ_name);
4929                 /* one of them is anonymous or both w/ same flavor-less names */
4930                 return local_len == 0 || targ_len == 0 ||
4931                        (local_len == targ_len &&
4932                         strncmp(local_name, targ_name, local_len) == 0);
4933         }
4934         case BTF_KIND_INT:
4935                 /* just reject deprecated bitfield-like integers; all other
4936                  * integers are by default compatible between each other
4937                  */
4938                 return btf_int_offset(local_type) == 0 &&
4939                        btf_int_offset(targ_type) == 0;
4940         case BTF_KIND_ARRAY:
4941                 local_id = btf_array(local_type)->type;
4942                 targ_id = btf_array(targ_type)->type;
4943                 goto recur;
4944         default:
4945                 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
4946                         btf_kind(local_type), local_id, targ_id);
4947                 return 0;
4948         }
4949 }
4950
4951 /*
4952  * Given single high-level named field accessor in local type, find
4953  * corresponding high-level accessor for a target type. Along the way,
4954  * maintain low-level spec for target as well. Also keep updating target
4955  * bit offset.
4956  *
4957  * Searching is performed through recursive exhaustive enumeration of all
4958  * fields of a struct/union. If there are any anonymous (embedded)
4959  * structs/unions, they are recursively searched as well. If field with
4960  * desired name is found, check compatibility between local and target types,
4961  * before returning result.
4962  *
4963  * 1 is returned, if field is found.
4964  * 0 is returned if no compatible field is found.
4965  * <0 is returned on error.
4966  */
4967 static int bpf_core_match_member(const struct btf *local_btf,
4968                                  const struct bpf_core_accessor *local_acc,
4969                                  const struct btf *targ_btf,
4970                                  __u32 targ_id,
4971                                  struct bpf_core_spec *spec,
4972                                  __u32 *next_targ_id)
4973 {
4974         const struct btf_type *local_type, *targ_type;
4975         const struct btf_member *local_member, *m;
4976         const char *local_name, *targ_name;
4977         __u32 local_id;
4978         int i, n, found;
4979
4980         targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4981         if (!targ_type)
4982                 return -EINVAL;
4983         if (!btf_is_composite(targ_type))
4984                 return 0;
4985
4986         local_id = local_acc->type_id;
4987         local_type = btf__type_by_id(local_btf, local_id);
4988         local_member = btf_members(local_type) + local_acc->idx;
4989         local_name = btf__name_by_offset(local_btf, local_member->name_off);
4990
4991         n = btf_vlen(targ_type);
4992         m = btf_members(targ_type);
4993         for (i = 0; i < n; i++, m++) {
4994                 __u32 bit_offset;
4995
4996                 bit_offset = btf_member_bit_offset(targ_type, i);
4997
4998                 /* too deep struct/union/array nesting */
4999                 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5000                         return -E2BIG;
5001
5002                 /* speculate this member will be the good one */
5003                 spec->bit_offset += bit_offset;
5004                 spec->raw_spec[spec->raw_len++] = i;
5005
5006                 targ_name = btf__name_by_offset(targ_btf, m->name_off);
5007                 if (str_is_empty(targ_name)) {
5008                         /* embedded struct/union, we need to go deeper */
5009                         found = bpf_core_match_member(local_btf, local_acc,
5010                                                       targ_btf, m->type,
5011                                                       spec, next_targ_id);
5012                         if (found) /* either found or error */
5013                                 return found;
5014                 } else if (strcmp(local_name, targ_name) == 0) {
5015                         /* matching named field */
5016                         struct bpf_core_accessor *targ_acc;
5017
5018                         targ_acc = &spec->spec[spec->len++];
5019                         targ_acc->type_id = targ_id;
5020                         targ_acc->idx = i;
5021                         targ_acc->name = targ_name;
5022
5023                         *next_targ_id = m->type;
5024                         found = bpf_core_fields_are_compat(local_btf,
5025                                                            local_member->type,
5026                                                            targ_btf, m->type);
5027                         if (!found)
5028                                 spec->len--; /* pop accessor */
5029                         return found;
5030                 }
5031                 /* member turned out not to be what we looked for */
5032                 spec->bit_offset -= bit_offset;
5033                 spec->raw_len--;
5034         }
5035
5036         return 0;
5037 }
5038
5039 /* Check local and target types for compatibility. This check is used for
5040  * type-based CO-RE relocations and follow slightly different rules than
5041  * field-based relocations. This function assumes that root types were already
5042  * checked for name match. Beyond that initial root-level name check, names
5043  * are completely ignored. Compatibility rules are as follows:
5044  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5045  *     kind should match for local and target types (i.e., STRUCT is not
5046  *     compatible with UNION);
5047  *   - for ENUMs, the size is ignored;
5048  *   - for INT, size and signedness are ignored;
5049  *   - for ARRAY, dimensionality is ignored, element types are checked for
5050  *     compatibility recursively;
5051  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5052  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5053  *   - FUNC_PROTOs are compatible if they have compatible signature: same
5054  *     number of input args and compatible return and argument types.
5055  * These rules are not set in stone and probably will be adjusted as we get
5056  * more experience with using BPF CO-RE relocations.
5057  */
5058 static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5059                                      const struct btf *targ_btf, __u32 targ_id)
5060 {
5061         const struct btf_type *local_type, *targ_type;
5062         int depth = 32; /* max recursion depth */
5063
5064         /* caller made sure that names match (ignoring flavor suffix) */
5065         local_type = btf__type_by_id(local_btf, local_id);
5066         targ_type = btf__type_by_id(targ_btf, targ_id);
5067         if (btf_kind(local_type) != btf_kind(targ_type))
5068                 return 0;
5069
5070 recur:
5071         depth--;
5072         if (depth < 0)
5073                 return -EINVAL;
5074
5075         local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5076         targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5077         if (!local_type || !targ_type)
5078                 return -EINVAL;
5079
5080         if (btf_kind(local_type) != btf_kind(targ_type))
5081                 return 0;
5082
5083         switch (btf_kind(local_type)) {
5084         case BTF_KIND_UNKN:
5085         case BTF_KIND_STRUCT:
5086         case BTF_KIND_UNION:
5087         case BTF_KIND_ENUM:
5088         case BTF_KIND_FWD:
5089                 return 1;
5090         case BTF_KIND_INT:
5091                 /* just reject deprecated bitfield-like integers; all other
5092                  * integers are by default compatible between each other
5093                  */
5094                 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5095         case BTF_KIND_PTR:
5096                 local_id = local_type->type;
5097                 targ_id = targ_type->type;
5098                 goto recur;
5099         case BTF_KIND_ARRAY:
5100                 local_id = btf_array(local_type)->type;
5101                 targ_id = btf_array(targ_type)->type;
5102                 goto recur;
5103         case BTF_KIND_FUNC_PROTO: {
5104                 struct btf_param *local_p = btf_params(local_type);
5105                 struct btf_param *targ_p = btf_params(targ_type);
5106                 __u16 local_vlen = btf_vlen(local_type);
5107                 __u16 targ_vlen = btf_vlen(targ_type);
5108                 int i, err;
5109
5110                 if (local_vlen != targ_vlen)
5111                         return 0;
5112
5113                 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5114                         skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5115                         skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5116                         err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5117                         if (err <= 0)
5118                                 return err;
5119                 }
5120
5121                 /* tail recurse for return type check */
5122                 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5123                 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5124                 goto recur;
5125         }
5126         default:
5127                 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5128                         btf_kind_str(local_type), local_id, targ_id);
5129                 return 0;
5130         }
5131 }
5132
5133 /*
5134  * Try to match local spec to a target type and, if successful, produce full
5135  * target spec (high-level, low-level + bit offset).
5136  */
5137 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
5138                                const struct btf *targ_btf, __u32 targ_id,
5139                                struct bpf_core_spec *targ_spec)
5140 {
5141         const struct btf_type *targ_type;
5142         const struct bpf_core_accessor *local_acc;
5143         struct bpf_core_accessor *targ_acc;
5144         int i, sz, matched;
5145
5146         memset(targ_spec, 0, sizeof(*targ_spec));
5147         targ_spec->btf = targ_btf;
5148         targ_spec->root_type_id = targ_id;
5149         targ_spec->relo_kind = local_spec->relo_kind;
5150
5151         if (core_relo_is_type_based(local_spec->relo_kind)) {
5152                 return bpf_core_types_are_compat(local_spec->btf,
5153                                                  local_spec->root_type_id,
5154                                                  targ_btf, targ_id);
5155         }
5156
5157         local_acc = &local_spec->spec[0];
5158         targ_acc = &targ_spec->spec[0];
5159
5160         if (core_relo_is_enumval_based(local_spec->relo_kind)) {
5161                 size_t local_essent_len, targ_essent_len;
5162                 const struct btf_enum *e;
5163                 const char *targ_name;
5164
5165                 /* has to resolve to an enum */
5166                 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
5167                 if (!btf_is_enum(targ_type))
5168                         return 0;
5169
5170                 local_essent_len = bpf_core_essential_name_len(local_acc->name);
5171
5172                 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
5173                         targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
5174                         targ_essent_len = bpf_core_essential_name_len(targ_name);
5175                         if (targ_essent_len != local_essent_len)
5176                                 continue;
5177                         if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
5178                                 targ_acc->type_id = targ_id;
5179                                 targ_acc->idx = i;
5180                                 targ_acc->name = targ_name;
5181                                 targ_spec->len++;
5182                                 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5183                                 targ_spec->raw_len++;
5184                                 return 1;
5185                         }
5186                 }
5187                 return 0;
5188         }
5189
5190         if (!core_relo_is_field_based(local_spec->relo_kind))
5191                 return -EINVAL;
5192
5193         for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
5194                 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
5195                                                    &targ_id);
5196                 if (!targ_type)
5197                         return -EINVAL;
5198
5199                 if (local_acc->name) {
5200                         matched = bpf_core_match_member(local_spec->btf,
5201                                                         local_acc,
5202                                                         targ_btf, targ_id,
5203                                                         targ_spec, &targ_id);
5204                         if (matched <= 0)
5205                                 return matched;
5206                 } else {
5207                         /* for i=0, targ_id is already treated as array element
5208                          * type (because it's the original struct), for others
5209                          * we should find array element type first
5210                          */
5211                         if (i > 0) {
5212                                 const struct btf_array *a;
5213                                 bool flex;
5214
5215                                 if (!btf_is_array(targ_type))
5216                                         return 0;
5217
5218                                 a = btf_array(targ_type);
5219                                 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5220                                 if (!flex && local_acc->idx >= a->nelems)
5221                                         return 0;
5222                                 if (!skip_mods_and_typedefs(targ_btf, a->type,
5223                                                             &targ_id))
5224                                         return -EINVAL;
5225                         }
5226
5227                         /* too deep struct/union/array nesting */
5228                         if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5229                                 return -E2BIG;
5230
5231                         targ_acc->type_id = targ_id;
5232                         targ_acc->idx = local_acc->idx;
5233                         targ_acc->name = NULL;
5234                         targ_spec->len++;
5235                         targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5236                         targ_spec->raw_len++;
5237
5238                         sz = btf__resolve_size(targ_btf, targ_id);
5239                         if (sz < 0)
5240                                 return sz;
5241                         targ_spec->bit_offset += local_acc->idx * sz * 8;
5242                 }
5243         }
5244
5245         return 1;
5246 }
5247
5248 static int bpf_core_calc_field_relo(const struct bpf_program *prog,
5249                                     const struct bpf_core_relo *relo,
5250                                     const struct bpf_core_spec *spec,
5251                                     __u32 *val, __u32 *field_sz, __u32 *type_id,
5252                                     bool *validate)
5253 {
5254         const struct bpf_core_accessor *acc;
5255         const struct btf_type *t;
5256         __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
5257         const struct btf_member *m;
5258         const struct btf_type *mt;
5259         bool bitfield;
5260         __s64 sz;
5261
5262         *field_sz = 0;
5263
5264         if (relo->kind == BPF_FIELD_EXISTS) {
5265                 *val = spec ? 1 : 0;
5266                 return 0;
5267         }
5268
5269         if (!spec)
5270                 return -EUCLEAN; /* request instruction poisoning */
5271
5272         acc = &spec->spec[spec->len - 1];
5273         t = btf__type_by_id(spec->btf, acc->type_id);
5274
5275         /* a[n] accessor needs special handling */
5276         if (!acc->name) {
5277                 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5278                         *val = spec->bit_offset / 8;
5279                         /* remember field size for load/store mem size */
5280                         sz = btf__resolve_size(spec->btf, acc->type_id);
5281                         if (sz < 0)
5282                                 return -EINVAL;
5283                         *field_sz = sz;
5284                         *type_id = acc->type_id;
5285                 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5286                         sz = btf__resolve_size(spec->btf, acc->type_id);
5287                         if (sz < 0)
5288                                 return -EINVAL;
5289                         *val = sz;
5290                 } else {
5291                         pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
5292                                 prog->name, relo->kind, relo->insn_off / 8);
5293                         return -EINVAL;
5294                 }
5295                 if (validate)
5296                         *validate = true;
5297                 return 0;
5298         }
5299
5300         m = btf_members(t) + acc->idx;
5301         mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
5302         bit_off = spec->bit_offset;
5303         bit_sz = btf_member_bitfield_size(t, acc->idx);
5304
5305         bitfield = bit_sz > 0;
5306         if (bitfield) {
5307                 byte_sz = mt->size;
5308                 byte_off = bit_off / 8 / byte_sz * byte_sz;
5309                 /* figure out smallest int size necessary for bitfield load */
5310                 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5311                         if (byte_sz >= 8) {
5312                                 /* bitfield can't be read with 64-bit read */
5313                                 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
5314                                         prog->name, relo->kind, relo->insn_off / 8);
5315                                 return -E2BIG;
5316                         }
5317                         byte_sz *= 2;
5318                         byte_off = bit_off / 8 / byte_sz * byte_sz;
5319                 }
5320         } else {
5321                 sz = btf__resolve_size(spec->btf, field_type_id);
5322                 if (sz < 0)
5323                         return -EINVAL;
5324                 byte_sz = sz;
5325                 byte_off = spec->bit_offset / 8;
5326                 bit_sz = byte_sz * 8;
5327         }
5328
5329         /* for bitfields, all the relocatable aspects are ambiguous and we
5330          * might disagree with compiler, so turn off validation of expected
5331          * value, except for signedness
5332          */
5333         if (validate)
5334                 *validate = !bitfield;
5335
5336         switch (relo->kind) {
5337         case BPF_FIELD_BYTE_OFFSET:
5338                 *val = byte_off;
5339                 if (!bitfield) {
5340                         *field_sz = byte_sz;
5341                         *type_id = field_type_id;
5342                 }
5343                 break;
5344         case BPF_FIELD_BYTE_SIZE:
5345                 *val = byte_sz;
5346                 break;
5347         case BPF_FIELD_SIGNED:
5348                 /* enums will be assumed unsigned */
5349                 *val = btf_is_enum(mt) ||
5350                        (btf_int_encoding(mt) & BTF_INT_SIGNED);
5351                 if (validate)
5352                         *validate = true; /* signedness is never ambiguous */
5353                 break;
5354         case BPF_FIELD_LSHIFT_U64:
5355 #if __BYTE_ORDER == __LITTLE_ENDIAN
5356                 *val = 64 - (bit_off + bit_sz - byte_off  * 8);
5357 #else
5358                 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5359 #endif
5360                 break;
5361         case BPF_FIELD_RSHIFT_U64:
5362                 *val = 64 - bit_sz;
5363                 if (validate)
5364                         *validate = true; /* right shift is never ambiguous */
5365                 break;
5366         case BPF_FIELD_EXISTS:
5367         default:
5368                 return -EOPNOTSUPP;
5369         }
5370
5371         return 0;
5372 }
5373
5374 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5375                                    const struct bpf_core_spec *spec,
5376                                    __u32 *val)
5377 {
5378         __s64 sz;
5379
5380         /* type-based relos return zero when target type is not found */
5381         if (!spec) {
5382                 *val = 0;
5383                 return 0;
5384         }
5385
5386         switch (relo->kind) {
5387         case BPF_TYPE_ID_TARGET:
5388                 *val = spec->root_type_id;
5389                 break;
5390         case BPF_TYPE_EXISTS:
5391                 *val = 1;
5392                 break;
5393         case BPF_TYPE_SIZE:
5394                 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5395                 if (sz < 0)
5396                         return -EINVAL;
5397                 *val = sz;
5398                 break;
5399         case BPF_TYPE_ID_LOCAL:
5400         /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
5401         default:
5402                 return -EOPNOTSUPP;
5403         }
5404
5405         return 0;
5406 }
5407
5408 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5409                                       const struct bpf_core_spec *spec,
5410                                       __u32 *val)
5411 {
5412         const struct btf_type *t;
5413         const struct btf_enum *e;
5414
5415         switch (relo->kind) {
5416         case BPF_ENUMVAL_EXISTS:
5417                 *val = spec ? 1 : 0;
5418                 break;
5419         case BPF_ENUMVAL_VALUE:
5420                 if (!spec)
5421                         return -EUCLEAN; /* request instruction poisoning */
5422                 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5423                 e = btf_enum(t) + spec->spec[0].idx;
5424                 *val = e->val;
5425                 break;
5426         default:
5427                 return -EOPNOTSUPP;
5428         }
5429
5430         return 0;
5431 }
5432
5433 struct bpf_core_relo_res
5434 {
5435         /* expected value in the instruction, unless validate == false */
5436         __u32 orig_val;
5437         /* new value that needs to be patched up to */
5438         __u32 new_val;
5439         /* relocation unsuccessful, poison instruction, but don't fail load */
5440         bool poison;
5441         /* some relocations can't be validated against orig_val */
5442         bool validate;
5443         /* for field byte offset relocations or the forms:
5444          *     *(T *)(rX + <off>) = rY
5445          *     rX = *(T *)(rY + <off>),
5446          * we remember original and resolved field size to adjust direct
5447          * memory loads of pointers and integers; this is necessary for 32-bit
5448          * host kernel architectures, but also allows to automatically
5449          * relocate fields that were resized from, e.g., u32 to u64, etc.
5450          */
5451         bool fail_memsz_adjust;
5452         __u32 orig_sz;
5453         __u32 orig_type_id;
5454         __u32 new_sz;
5455         __u32 new_type_id;
5456 };
5457
5458 /* Calculate original and target relocation values, given local and target
5459  * specs and relocation kind. These values are calculated for each candidate.
5460  * If there are multiple candidates, resulting values should all be consistent
5461  * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
5462  * If instruction has to be poisoned, *poison will be set to true.
5463  */
5464 static int bpf_core_calc_relo(const struct bpf_program *prog,
5465                               const struct bpf_core_relo *relo,
5466                               int relo_idx,
5467                               const struct bpf_core_spec *local_spec,
5468                               const struct bpf_core_spec *targ_spec,
5469                               struct bpf_core_relo_res *res)
5470 {
5471         int err = -EOPNOTSUPP;
5472
5473         res->orig_val = 0;
5474         res->new_val = 0;
5475         res->poison = false;
5476         res->validate = true;
5477         res->fail_memsz_adjust = false;
5478         res->orig_sz = res->new_sz = 0;
5479         res->orig_type_id = res->new_type_id = 0;
5480
5481         if (core_relo_is_field_based(relo->kind)) {
5482                 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5483                                                &res->orig_val, &res->orig_sz,
5484                                                &res->orig_type_id, &res->validate);
5485                 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5486                                                       &res->new_val, &res->new_sz,
5487                                                       &res->new_type_id, NULL);
5488                 if (err)
5489                         goto done;
5490                 /* Validate if it's safe to adjust load/store memory size.
5491                  * Adjustments are performed only if original and new memory
5492                  * sizes differ.
5493                  */
5494                 res->fail_memsz_adjust = false;
5495                 if (res->orig_sz != res->new_sz) {
5496                         const struct btf_type *orig_t, *new_t;
5497
5498                         orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5499                         new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5500
5501                         /* There are two use cases in which it's safe to
5502                          * adjust load/store's mem size:
5503                          *   - reading a 32-bit kernel pointer, while on BPF
5504                          *   size pointers are always 64-bit; in this case
5505                          *   it's safe to "downsize" instruction size due to
5506                          *   pointer being treated as unsigned integer with
5507                          *   zero-extended upper 32-bits;
5508                          *   - reading unsigned integers, again due to
5509                          *   zero-extension is preserving the value correctly.
5510                          *
5511                          * In all other cases it's incorrect to attempt to
5512                          * load/store field because read value will be
5513                          * incorrect, so we poison relocated instruction.
5514                          */
5515                         if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5516                                 goto done;
5517                         if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5518                             btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5519                             btf_int_encoding(new_t) != BTF_INT_SIGNED)
5520                                 goto done;
5521
5522                         /* mark as invalid mem size adjustment, but this will
5523                          * only be checked for LDX/STX/ST insns
5524                          */
5525                         res->fail_memsz_adjust = true;
5526                 }
5527         } else if (core_relo_is_type_based(relo->kind)) {
5528                 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5529                 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
5530         } else if (core_relo_is_enumval_based(relo->kind)) {
5531                 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5532                 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
5533         }
5534
5535 done:
5536         if (err == -EUCLEAN) {
5537                 /* EUCLEAN is used to signal instruction poisoning request */
5538                 res->poison = true;
5539                 err = 0;
5540         } else if (err == -EOPNOTSUPP) {
5541                 /* EOPNOTSUPP means unknown/unsupported relocation */
5542                 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
5543                         prog->name, relo_idx, core_relo_kind_str(relo->kind),
5544                         relo->kind, relo->insn_off / 8);
5545         }
5546
5547         return err;
5548 }
5549
5550 /*
5551  * Turn instruction for which CO_RE relocation failed into invalid one with
5552  * distinct signature.
5553  */
5554 static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5555                                  int insn_idx, struct bpf_insn *insn)
5556 {
5557         pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
5558                  prog->name, relo_idx, insn_idx);
5559         insn->code = BPF_JMP | BPF_CALL;
5560         insn->dst_reg = 0;
5561         insn->src_reg = 0;
5562         insn->off = 0;
5563         /* if this instruction is reachable (not a dead code),
5564          * verifier will complain with the following message:
5565          * invalid func unknown#195896080
5566          */
5567         insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
5568 }
5569
5570 static bool is_ldimm64(struct bpf_insn *insn)
5571 {
5572         return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
5573 }
5574
5575 static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5576 {
5577         switch (BPF_SIZE(insn->code)) {
5578         case BPF_DW: return 8;
5579         case BPF_W: return 4;
5580         case BPF_H: return 2;
5581         case BPF_B: return 1;
5582         default: return -1;
5583         }
5584 }
5585
5586 static int insn_bytes_to_bpf_size(__u32 sz)
5587 {
5588         switch (sz) {
5589         case 8: return BPF_DW;
5590         case 4: return BPF_W;
5591         case 2: return BPF_H;
5592         case 1: return BPF_B;
5593         default: return -1;
5594         }
5595 }
5596
5597 /*
5598  * Patch relocatable BPF instruction.
5599  *
5600  * Patched value is determined by relocation kind and target specification.
5601  * For existence relocations target spec will be NULL if field/type is not found.
5602  * Expected insn->imm value is determined using relocation kind and local
5603  * spec, and is checked before patching instruction. If actual insn->imm value
5604  * is wrong, bail out with error.
5605  *
5606  * Currently supported classes of BPF instruction are:
5607  * 1. rX = <imm> (assignment with immediate operand);
5608  * 2. rX += <imm> (arithmetic operations with immediate operand);
5609  * 3. rX = <imm64> (load with 64-bit immediate value);
5610  * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
5611  * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
5612  * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
5613  */
5614 static int bpf_core_patch_insn(struct bpf_program *prog,
5615                                const struct bpf_core_relo *relo,
5616                                int relo_idx,
5617                                const struct bpf_core_relo_res *res)
5618 {
5619         __u32 orig_val, new_val;
5620         struct bpf_insn *insn;
5621         int insn_idx;
5622         __u8 class;
5623
5624         if (relo->insn_off % BPF_INSN_SZ)
5625                 return -EINVAL;
5626         insn_idx = relo->insn_off / BPF_INSN_SZ;
5627         /* adjust insn_idx from section frame of reference to the local
5628          * program's frame of reference; (sub-)program code is not yet
5629          * relocated, so it's enough to just subtract in-section offset
5630          */
5631         insn_idx = insn_idx - prog->sec_insn_off;
5632         insn = &prog->insns[insn_idx];
5633         class = BPF_CLASS(insn->code);
5634
5635         if (res->poison) {
5636 poison:
5637                 /* poison second part of ldimm64 to avoid confusing error from
5638                  * verifier about "unknown opcode 00"
5639                  */
5640                 if (is_ldimm64(insn))
5641                         bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
5642                 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
5643                 return 0;
5644         }
5645
5646         orig_val = res->orig_val;
5647         new_val = res->new_val;
5648
5649         switch (class) {
5650         case BPF_ALU:
5651         case BPF_ALU64:
5652                 if (BPF_SRC(insn->code) != BPF_K)
5653                         return -EINVAL;
5654                 if (res->validate && insn->imm != orig_val) {
5655                         pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
5656                                 prog->name, relo_idx,
5657                                 insn_idx, insn->imm, orig_val, new_val);
5658                         return -EINVAL;
5659                 }
5660                 orig_val = insn->imm;
5661                 insn->imm = new_val;
5662                 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
5663                          prog->name, relo_idx, insn_idx,
5664                          orig_val, new_val);
5665                 break;
5666         case BPF_LDX:
5667         case BPF_ST:
5668         case BPF_STX:
5669                 if (res->validate && insn->off != orig_val) {
5670                         pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
5671                                 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
5672                         return -EINVAL;
5673                 }
5674                 if (new_val > SHRT_MAX) {
5675                         pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
5676                                 prog->name, relo_idx, insn_idx, new_val);
5677                         return -ERANGE;
5678                 }
5679                 if (res->fail_memsz_adjust) {
5680                         pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5681                                 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5682                                 prog->name, relo_idx, insn_idx);
5683                         goto poison;
5684                 }
5685
5686                 orig_val = insn->off;
5687                 insn->off = new_val;
5688                 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
5689                          prog->name, relo_idx, insn_idx, orig_val, new_val);
5690
5691                 if (res->new_sz != res->orig_sz) {
5692                         int insn_bytes_sz, insn_bpf_sz;
5693
5694                         insn_bytes_sz = insn_bpf_size_to_bytes(insn);
5695                         if (insn_bytes_sz != res->orig_sz) {
5696                                 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
5697                                         prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
5698                                 return -EINVAL;
5699                         }
5700
5701                         insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
5702                         if (insn_bpf_sz < 0) {
5703                                 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
5704                                         prog->name, relo_idx, insn_idx, res->new_sz);
5705                                 return -EINVAL;
5706                         }
5707
5708                         insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
5709                         pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
5710                                  prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
5711                 }
5712                 break;
5713         case BPF_LD: {
5714                 __u64 imm;
5715
5716                 if (!is_ldimm64(insn) ||
5717                     insn[0].src_reg != 0 || insn[0].off != 0 ||
5718                     insn_idx + 1 >= prog->insns_cnt ||
5719                     insn[1].code != 0 || insn[1].dst_reg != 0 ||
5720                     insn[1].src_reg != 0 || insn[1].off != 0) {
5721                         pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
5722                                 prog->name, relo_idx, insn_idx);
5723                         return -EINVAL;
5724                 }
5725
5726                 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
5727                 if (res->validate && imm != orig_val) {
5728                         pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
5729                                 prog->name, relo_idx,
5730                                 insn_idx, (unsigned long long)imm,
5731                                 orig_val, new_val);
5732                         return -EINVAL;
5733                 }
5734
5735                 insn[0].imm = new_val;
5736                 insn[1].imm = 0; /* currently only 32-bit values are supported */
5737                 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
5738                          prog->name, relo_idx, insn_idx,
5739                          (unsigned long long)imm, new_val);
5740                 break;
5741         }
5742         default:
5743                 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
5744                         prog->name, relo_idx, insn_idx, insn->code,
5745                         insn->src_reg, insn->dst_reg, insn->off, insn->imm);
5746                 return -EINVAL;
5747         }
5748
5749         return 0;
5750 }
5751
5752 /* Output spec definition in the format:
5753  * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5754  * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5755  */
5756 static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
5757 {
5758         const struct btf_type *t;
5759         const struct btf_enum *e;
5760         const char *s;
5761         __u32 type_id;
5762         int i;
5763
5764         type_id = spec->root_type_id;
5765         t = btf__type_by_id(spec->btf, type_id);
5766         s = btf__name_by_offset(spec->btf, t->name_off);
5767
5768         libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
5769
5770         if (core_relo_is_type_based(spec->relo_kind))
5771                 return;
5772
5773         if (core_relo_is_enumval_based(spec->relo_kind)) {
5774                 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
5775                 e = btf_enum(t) + spec->raw_spec[0];
5776                 s = btf__name_by_offset(spec->btf, e->name_off);
5777
5778                 libbpf_print(level, "::%s = %u", s, e->val);
5779                 return;
5780         }
5781
5782         if (core_relo_is_field_based(spec->relo_kind)) {
5783                 for (i = 0; i < spec->len; i++) {
5784                         if (spec->spec[i].name)
5785                                 libbpf_print(level, ".%s", spec->spec[i].name);
5786                         else if (i > 0 || spec->spec[i].idx > 0)
5787                                 libbpf_print(level, "[%u]", spec->spec[i].idx);
5788                 }
5789
5790                 libbpf_print(level, " (");
5791                 for (i = 0; i < spec->raw_len; i++)
5792                         libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
5793
5794                 if (spec->bit_offset % 8)
5795                         libbpf_print(level, " @ offset %u.%u)",
5796                                      spec->bit_offset / 8, spec->bit_offset % 8);
5797                 else
5798                         libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
5799                 return;
5800         }
5801 }
5802
5803 static size_t bpf_core_hash_fn(const void *key, void *ctx)
5804 {
5805         return (size_t)key;
5806 }
5807
5808 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5809 {
5810         return k1 == k2;
5811 }
5812
5813 static void *u32_as_hash_key(__u32 x)
5814 {
5815         return (void *)(uintptr_t)x;
5816 }
5817
5818 /*
5819  * CO-RE relocate single instruction.
5820  *
5821  * The outline and important points of the algorithm:
5822  * 1. For given local type, find corresponding candidate target types.
5823  *    Candidate type is a type with the same "essential" name, ignoring
5824  *    everything after last triple underscore (___). E.g., `sample`,
5825  *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
5826  *    for each other. Names with triple underscore are referred to as
5827  *    "flavors" and are useful, among other things, to allow to
5828  *    specify/support incompatible variations of the same kernel struct, which
5829  *    might differ between different kernel versions and/or build
5830  *    configurations.
5831  *
5832  *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5833  *    converter, when deduplicated BTF of a kernel still contains more than
5834  *    one different types with the same name. In that case, ___2, ___3, etc
5835  *    are appended starting from second name conflict. But start flavors are
5836  *    also useful to be defined "locally", in BPF program, to extract same
5837  *    data from incompatible changes between different kernel
5838  *    versions/configurations. For instance, to handle field renames between
5839  *    kernel versions, one can use two flavors of the struct name with the
5840  *    same common name and use conditional relocations to extract that field,
5841  *    depending on target kernel version.
5842  * 2. For each candidate type, try to match local specification to this
5843  *    candidate target type. Matching involves finding corresponding
5844  *    high-level spec accessors, meaning that all named fields should match,
5845  *    as well as all array accesses should be within the actual bounds. Also,
5846  *    types should be compatible (see bpf_core_fields_are_compat for details).
5847  * 3. It is supported and expected that there might be multiple flavors
5848  *    matching the spec. As long as all the specs resolve to the same set of
5849  *    offsets across all candidates, there is no error. If there is any
5850  *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5851  *    imprefection of BTF deduplication, which can cause slight duplication of
5852  *    the same BTF type, if some directly or indirectly referenced (by
5853  *    pointer) type gets resolved to different actual types in different
5854  *    object files. If such situation occurs, deduplicated BTF will end up
5855  *    with two (or more) structurally identical types, which differ only in
5856  *    types they refer to through pointer. This should be OK in most cases and
5857  *    is not an error.
5858  * 4. Candidate types search is performed by linearly scanning through all
5859  *    types in target BTF. It is anticipated that this is overall more
5860  *    efficient memory-wise and not significantly worse (if not better)
5861  *    CPU-wise compared to prebuilding a map from all local type names to
5862  *    a list of candidate type names. It's also sped up by caching resolved
5863  *    list of matching candidates per each local "root" type ID, that has at
5864  *    least one bpf_core_relo associated with it. This list is shared
5865  *    between multiple relocations for the same type ID and is updated as some
5866  *    of the candidates are pruned due to structural incompatibility.
5867  */
5868 static int bpf_core_apply_relo(struct bpf_program *prog,
5869                                const struct bpf_core_relo *relo,
5870                                int relo_idx,
5871                                const struct btf *local_btf,
5872                                struct hashmap *cand_cache)
5873 {
5874         struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
5875         const void *type_key = u32_as_hash_key(relo->type_id);
5876         struct bpf_core_relo_res cand_res, targ_res;
5877         const struct btf_type *local_type;
5878         const char *local_name;
5879         struct core_cand_list *cands = NULL;
5880         __u32 local_id;
5881         const char *spec_str;
5882         int i, j, err;
5883
5884         local_id = relo->type_id;
5885         local_type = btf__type_by_id(local_btf, local_id);
5886         if (!local_type)
5887                 return -EINVAL;
5888
5889         local_name = btf__name_by_offset(local_btf, local_type->name_off);
5890         if (!local_name)
5891                 return -EINVAL;
5892
5893         spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
5894         if (str_is_empty(spec_str))
5895                 return -EINVAL;
5896
5897         err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
5898         if (err) {
5899                 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
5900                         prog->name, relo_idx, local_id, btf_kind_str(local_type),
5901                         str_is_empty(local_name) ? "<anon>" : local_name,
5902                         spec_str, err);
5903                 return -EINVAL;
5904         }
5905
5906         pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
5907                  relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5908         bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
5909         libbpf_print(LIBBPF_DEBUG, "\n");
5910
5911         /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
5912         if (relo->kind == BPF_TYPE_ID_LOCAL) {
5913                 targ_res.validate = true;
5914                 targ_res.poison = false;
5915                 targ_res.orig_val = local_spec.root_type_id;
5916                 targ_res.new_val = local_spec.root_type_id;
5917                 goto patch_insn;
5918         }
5919
5920         /* libbpf doesn't support candidate search for anonymous types */
5921         if (str_is_empty(spec_str)) {
5922                 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
5923                         prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5924                 return -EOPNOTSUPP;
5925         }
5926
5927         if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
5928                 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5929                 if (IS_ERR(cands)) {
5930                         pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5931                                 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5932                                 local_name, PTR_ERR(cands));
5933                         return PTR_ERR(cands);
5934                 }
5935                 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5936                 if (err) {
5937                         bpf_core_free_cands(cands);
5938                         return err;
5939                 }
5940         }
5941
5942         for (i = 0, j = 0; i < cands->len; i++) {
5943                 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
5944                                           cands->cands[i].id, &cand_spec);
5945                 if (err < 0) {
5946                         pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
5947                                 prog->name, relo_idx, i);
5948                         bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
5949                         libbpf_print(LIBBPF_WARN, ": %d\n", err);
5950                         return err;
5951                 }
5952
5953                 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
5954                          relo_idx, err == 0 ? "non-matching" : "matching", i);
5955                 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
5956                 libbpf_print(LIBBPF_DEBUG, "\n");
5957
5958                 if (err == 0)
5959                         continue;
5960
5961                 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
5962                 if (err)
5963                         return err;
5964
5965                 if (j == 0) {
5966                         targ_res = cand_res;
5967                         targ_spec = cand_spec;
5968                 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
5969                         /* if there are many field relo candidates, they
5970                          * should all resolve to the same bit offset
5971                          */
5972                         pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
5973                                 prog->name, relo_idx, cand_spec.bit_offset,
5974                                 targ_spec.bit_offset);
5975                         return -EINVAL;
5976                 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
5977                         /* all candidates should result in the same relocation
5978                          * decision and value, otherwise it's dangerous to
5979                          * proceed due to ambiguity
5980                          */
5981                         pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
5982                                 prog->name, relo_idx,
5983                                 cand_res.poison ? "failure" : "success", cand_res.new_val,
5984                                 targ_res.poison ? "failure" : "success", targ_res.new_val);
5985                         return -EINVAL;
5986                 }
5987
5988                 cands->cands[j++] = cands->cands[i];
5989         }
5990
5991         /*
5992          * For BPF_FIELD_EXISTS relo or when used BPF program has field
5993          * existence checks or kernel version/config checks, it's expected
5994          * that we might not find any candidates. In this case, if field
5995          * wasn't found in any candidate, the list of candidates shouldn't
5996          * change at all, we'll just handle relocating appropriately,
5997          * depending on relo's kind.
5998          */
5999         if (j > 0)
6000                 cands->len = j;
6001
6002         /*
6003          * If no candidates were found, it might be both a programmer error,
6004          * as well as expected case, depending whether instruction w/
6005          * relocation is guarded in some way that makes it unreachable (dead
6006          * code) if relocation can't be resolved. This is handled in
6007          * bpf_core_patch_insn() uniformly by replacing that instruction with
6008          * BPF helper call insn (using invalid helper ID). If that instruction
6009          * is indeed unreachable, then it will be ignored and eliminated by
6010          * verifier. If it was an error, then verifier will complain and point
6011          * to a specific instruction number in its log.
6012          */
6013         if (j == 0) {
6014                 pr_debug("prog '%s': relo #%d: no matching targets found\n",
6015                          prog->name, relo_idx);
6016
6017                 /* calculate single target relo result explicitly */
6018                 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
6019                 if (err)
6020                         return err;
6021         }
6022
6023 patch_insn:
6024         /* bpf_core_patch_insn() should know how to handle missing targ_spec */
6025         err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
6026         if (err) {
6027                 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
6028                         prog->name, relo_idx, relo->insn_off, err);
6029                 return -EINVAL;
6030         }
6031
6032         return 0;
6033 }
6034
6035 static int
6036 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6037 {
6038         const struct btf_ext_info_sec *sec;
6039         const struct bpf_core_relo *rec;
6040         const struct btf_ext_info *seg;
6041         struct hashmap_entry *entry;
6042         struct hashmap *cand_cache = NULL;
6043         struct bpf_program *prog;
6044         const char *sec_name;
6045         int i, err = 0, insn_idx, sec_idx;
6046
6047         if (obj->btf_ext->core_relo_info.len == 0)
6048                 return 0;
6049
6050         if (targ_btf_path) {
6051                 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6052                 if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
6053                         err = PTR_ERR(obj->btf_vmlinux_override);
6054                         pr_warn("failed to parse target BTF: %d\n", err);
6055                         return err;
6056                 }
6057         }
6058
6059         cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6060         if (IS_ERR(cand_cache)) {
6061                 err = PTR_ERR(cand_cache);
6062                 goto out;
6063         }
6064
6065         seg = &obj->btf_ext->core_relo_info;
6066         for_each_btf_ext_sec(seg, sec) {
6067                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6068                 if (str_is_empty(sec_name)) {
6069                         err = -EINVAL;
6070                         goto out;
6071                 }
6072                 /* bpf_object's ELF is gone by now so it's not easy to find
6073                  * section index by section name, but we can find *any*
6074                  * bpf_program within desired section name and use it's
6075                  * prog->sec_idx to do a proper search by section index and
6076                  * instruction offset
6077                  */
6078                 prog = NULL;
6079                 for (i = 0; i < obj->nr_programs; i++) {
6080                         prog = &obj->programs[i];
6081                         if (strcmp(prog->sec_name, sec_name) == 0)
6082                                 break;
6083                 }
6084                 if (!prog) {
6085                         pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
6086                         return -ENOENT;
6087                 }
6088                 sec_idx = prog->sec_idx;
6089
6090                 pr_debug("sec '%s': found %d CO-RE relocations\n",
6091                          sec_name, sec->num_info);
6092
6093                 for_each_btf_ext_rec(seg, sec, i, rec) {
6094                         insn_idx = rec->insn_off / BPF_INSN_SZ;
6095                         prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6096                         if (!prog) {
6097                                 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
6098                                         sec_name, insn_idx, i);
6099                                 err = -EINVAL;
6100                                 goto out;
6101                         }
6102                         /* no need to apply CO-RE relocation if the program is
6103                          * not going to be loaded
6104                          */
6105                         if (!prog->load)
6106                                 continue;
6107
6108                         err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
6109                         if (err) {
6110                                 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
6111                                         prog->name, i, err);
6112                                 goto out;
6113                         }
6114                 }
6115         }
6116
6117 out:
6118         /* obj->btf_vmlinux and module BTFs are freed after object load */
6119         btf__free(obj->btf_vmlinux_override);
6120         obj->btf_vmlinux_override = NULL;
6121
6122         if (!IS_ERR_OR_NULL(cand_cache)) {
6123                 hashmap__for_each_entry(cand_cache, entry, i) {
6124                         bpf_core_free_cands(entry->value);
6125                 }
6126                 hashmap__free(cand_cache);
6127         }
6128         return err;
6129 }
6130
6131 /* Relocate data references within program code:
6132  *  - map references;
6133  *  - global variable references;
6134  *  - extern references.
6135  */
6136 static int
6137 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6138 {
6139         int i;
6140
6141         for (i = 0; i < prog->nr_reloc; i++) {
6142                 struct reloc_desc *relo = &prog->reloc_desc[i];
6143                 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6144                 struct extern_desc *ext;
6145
6146                 switch (relo->type) {
6147                 case RELO_LD64:
6148                         insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6149                         insn[0].imm = obj->maps[relo->map_idx].fd;
6150                         relo->processed = true;
6151                         break;
6152                 case RELO_DATA:
6153                         insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6154                         insn[1].imm = insn[0].imm + relo->sym_off;
6155                         insn[0].imm = obj->maps[relo->map_idx].fd;
6156                         relo->processed = true;
6157                         break;
6158                 case RELO_EXTERN:
6159                         ext = &obj->externs[relo->sym_off];
6160                         if (ext->type == EXT_KCFG) {
6161                                 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6162                                 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6163                                 insn[1].imm = ext->kcfg.data_off;
6164                         } else /* EXT_KSYM */ {
6165                                 if (ext->ksym.type_id) { /* typed ksyms */
6166                                         insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6167                                         insn[0].imm = ext->ksym.kernel_btf_id;
6168                                         insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6169                                 } else { /* typeless ksyms */
6170                                         insn[0].imm = (__u32)ext->ksym.addr;
6171                                         insn[1].imm = ext->ksym.addr >> 32;
6172                                 }
6173                         }
6174                         relo->processed = true;
6175                         break;
6176                 case RELO_CALL:
6177                         /* will be handled as a follow up pass */
6178                         break;
6179                 default:
6180                         pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6181                                 prog->name, i, relo->type);
6182                         return -EINVAL;
6183                 }
6184         }
6185
6186         return 0;
6187 }
6188
6189 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6190                                     const struct bpf_program *prog,
6191                                     const struct btf_ext_info *ext_info,
6192                                     void **prog_info, __u32 *prog_rec_cnt,
6193                                     __u32 *prog_rec_sz)
6194 {
6195         void *copy_start = NULL, *copy_end = NULL;
6196         void *rec, *rec_end, *new_prog_info;
6197         const struct btf_ext_info_sec *sec;
6198         size_t old_sz, new_sz;
6199         const char *sec_name;
6200         int i, off_adj;
6201
6202         for_each_btf_ext_sec(ext_info, sec) {
6203                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6204                 if (!sec_name)
6205                         return -EINVAL;
6206                 if (strcmp(sec_name, prog->sec_name) != 0)
6207                         continue;
6208
6209                 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6210                         __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6211
6212                         if (insn_off < prog->sec_insn_off)
6213                                 continue;
6214                         if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6215                                 break;
6216
6217                         if (!copy_start)
6218                                 copy_start = rec;
6219                         copy_end = rec + ext_info->rec_size;
6220                 }
6221
6222                 if (!copy_start)
6223                         return -ENOENT;
6224
6225                 /* append func/line info of a given (sub-)program to the main
6226                  * program func/line info
6227                  */
6228                 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6229                 new_sz = old_sz + (copy_end - copy_start);
6230                 new_prog_info = realloc(*prog_info, new_sz);
6231                 if (!new_prog_info)
6232                         return -ENOMEM;
6233                 *prog_info = new_prog_info;
6234                 *prog_rec_cnt = new_sz / ext_info->rec_size;
6235                 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6236
6237                 /* Kernel instruction offsets are in units of 8-byte
6238                  * instructions, while .BTF.ext instruction offsets generated
6239                  * by Clang are in units of bytes. So convert Clang offsets
6240                  * into kernel offsets and adjust offset according to program
6241                  * relocated position.
6242                  */
6243                 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6244                 rec = new_prog_info + old_sz;
6245                 rec_end = new_prog_info + new_sz;
6246                 for (; rec < rec_end; rec += ext_info->rec_size) {
6247                         __u32 *insn_off = rec;
6248
6249                         *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6250                 }
6251                 *prog_rec_sz = ext_info->rec_size;
6252                 return 0;
6253         }
6254
6255         return -ENOENT;
6256 }
6257
6258 static int
6259 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6260                               struct bpf_program *main_prog,
6261                               const struct bpf_program *prog)
6262 {
6263         int err;
6264
6265         /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6266          * supprot func/line info
6267          */
6268         if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6269                 return 0;
6270
6271         /* only attempt func info relocation if main program's func_info
6272          * relocation was successful
6273          */
6274         if (main_prog != prog && !main_prog->func_info)
6275                 goto line_info;
6276
6277         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6278                                        &main_prog->func_info,
6279                                        &main_prog->func_info_cnt,
6280                                        &main_prog->func_info_rec_size);
6281         if (err) {
6282                 if (err != -ENOENT) {
6283                         pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6284                                 prog->name, err);
6285                         return err;
6286                 }
6287                 if (main_prog->func_info) {
6288                         /*
6289                          * Some info has already been found but has problem
6290                          * in the last btf_ext reloc. Must have to error out.
6291                          */
6292                         pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6293                         return err;
6294                 }
6295                 /* Have problem loading the very first info. Ignore the rest. */
6296                 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6297                         prog->name);
6298         }
6299
6300 line_info:
6301         /* don't relocate line info if main program's relocation failed */
6302         if (main_prog != prog && !main_prog->line_info)
6303                 return 0;
6304
6305         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6306                                        &main_prog->line_info,
6307                                        &main_prog->line_info_cnt,
6308                                        &main_prog->line_info_rec_size);
6309         if (err) {
6310                 if (err != -ENOENT) {
6311                         pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6312                                 prog->name, err);
6313                         return err;
6314                 }
6315                 if (main_prog->line_info) {
6316                         /*
6317                          * Some info has already been found but has problem
6318                          * in the last btf_ext reloc. Must have to error out.
6319                          */
6320                         pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6321                         return err;
6322                 }
6323                 /* Have problem loading the very first info. Ignore the rest. */
6324                 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6325                         prog->name);
6326         }
6327         return 0;
6328 }
6329
6330 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6331 {
6332         size_t insn_idx = *(const size_t *)key;
6333         const struct reloc_desc *relo = elem;
6334
6335         if (insn_idx == relo->insn_idx)
6336                 return 0;
6337         return insn_idx < relo->insn_idx ? -1 : 1;
6338 }
6339
6340 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6341 {
6342         return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6343                        sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6344 }
6345
6346 static int
6347 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6348                        struct bpf_program *prog)
6349 {
6350         size_t sub_insn_idx, insn_idx, new_cnt;
6351         struct bpf_program *subprog;
6352         struct bpf_insn *insns, *insn;
6353         struct reloc_desc *relo;
6354         int err;
6355
6356         err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6357         if (err)
6358                 return err;
6359
6360         for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6361                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6362                 if (!insn_is_subprog_call(insn))
6363                         continue;
6364
6365                 relo = find_prog_insn_relo(prog, insn_idx);
6366                 if (relo && relo->type != RELO_CALL) {
6367                         pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6368                                 prog->name, insn_idx, relo->type);
6369                         return -LIBBPF_ERRNO__RELOC;
6370                 }
6371                 if (relo) {
6372                         /* sub-program instruction index is a combination of
6373                          * an offset of a symbol pointed to by relocation and
6374                          * call instruction's imm field; for global functions,
6375                          * call always has imm = -1, but for static functions
6376                          * relocation is against STT_SECTION and insn->imm
6377                          * points to a start of a static function
6378                          */
6379                         sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6380                 } else {
6381                         /* if subprogram call is to a static function within
6382                          * the same ELF section, there won't be any relocation
6383                          * emitted, but it also means there is no additional
6384                          * offset necessary, insns->imm is relative to
6385                          * instruction's original position within the section
6386                          */
6387                         sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6388                 }
6389
6390                 /* we enforce that sub-programs should be in .text section */
6391                 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6392                 if (!subprog) {
6393                         pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6394                                 prog->name);
6395                         return -LIBBPF_ERRNO__RELOC;
6396                 }
6397
6398                 /* if it's the first call instruction calling into this
6399                  * subprogram (meaning this subprog hasn't been processed
6400                  * yet) within the context of current main program:
6401                  *   - append it at the end of main program's instructions blog;
6402                  *   - process is recursively, while current program is put on hold;
6403                  *   - if that subprogram calls some other not yet processes
6404                  *   subprogram, same thing will happen recursively until
6405                  *   there are no more unprocesses subprograms left to append
6406                  *   and relocate.
6407                  */
6408                 if (subprog->sub_insn_off == 0) {
6409                         subprog->sub_insn_off = main_prog->insns_cnt;
6410
6411                         new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6412                         insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6413                         if (!insns) {
6414                                 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6415                                 return -ENOMEM;
6416                         }
6417                         main_prog->insns = insns;
6418                         main_prog->insns_cnt = new_cnt;
6419
6420                         memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6421                                subprog->insns_cnt * sizeof(*insns));
6422
6423                         pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6424                                  main_prog->name, subprog->insns_cnt, subprog->name);
6425
6426                         err = bpf_object__reloc_code(obj, main_prog, subprog);
6427                         if (err)
6428                                 return err;
6429                 }
6430
6431                 /* main_prog->insns memory could have been re-allocated, so
6432                  * calculate pointer again
6433                  */
6434                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6435                 /* calculate correct instruction position within current main
6436                  * prog; each main prog can have a different set of
6437                  * subprograms appended (potentially in different order as
6438                  * well), so position of any subprog can be different for
6439                  * different main programs */
6440                 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6441
6442                 if (relo)
6443                         relo->processed = true;
6444
6445                 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6446                          prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6447         }
6448
6449         return 0;
6450 }
6451
6452 /*
6453  * Relocate sub-program calls.
6454  *
6455  * Algorithm operates as follows. Each entry-point BPF program (referred to as
6456  * main prog) is processed separately. For each subprog (non-entry functions,
6457  * that can be called from either entry progs or other subprogs) gets their
6458  * sub_insn_off reset to zero. This serves as indicator that this subprogram
6459  * hasn't been yet appended and relocated within current main prog. Once its
6460  * relocated, sub_insn_off will point at the position within current main prog
6461  * where given subprog was appended. This will further be used to relocate all
6462  * the call instructions jumping into this subprog.
6463  *
6464  * We start with main program and process all call instructions. If the call
6465  * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6466  * is zero), subprog instructions are appended at the end of main program's
6467  * instruction array. Then main program is "put on hold" while we recursively
6468  * process newly appended subprogram. If that subprogram calls into another
6469  * subprogram that hasn't been appended, new subprogram is appended again to
6470  * the *main* prog's instructions (subprog's instructions are always left
6471  * untouched, as they need to be in unmodified state for subsequent main progs
6472  * and subprog instructions are always sent only as part of a main prog) and
6473  * the process continues recursively. Once all the subprogs called from a main
6474  * prog or any of its subprogs are appended (and relocated), all their
6475  * positions within finalized instructions array are known, so it's easy to
6476  * rewrite call instructions with correct relative offsets, corresponding to
6477  * desired target subprog.
6478  *
6479  * Its important to realize that some subprogs might not be called from some
6480  * main prog and any of its called/used subprogs. Those will keep their
6481  * subprog->sub_insn_off as zero at all times and won't be appended to current
6482  * main prog and won't be relocated within the context of current main prog.
6483  * They might still be used from other main progs later.
6484  *
6485  * Visually this process can be shown as below. Suppose we have two main
6486  * programs mainA and mainB and BPF object contains three subprogs: subA,
6487  * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6488  * subC both call subB:
6489  *
6490  *        +--------+ +-------+
6491  *        |        v v       |
6492  *     +--+---+ +--+-+-+ +---+--+
6493  *     | subA | | subB | | subC |
6494  *     +--+---+ +------+ +---+--+
6495  *        ^                  ^
6496  *        |                  |
6497  *    +---+-------+   +------+----+
6498  *    |   mainA   |   |   mainB   |
6499  *    +-----------+   +-----------+
6500  *
6501  * We'll start relocating mainA, will find subA, append it and start
6502  * processing sub A recursively:
6503  *
6504  *    +-----------+------+
6505  *    |   mainA   | subA |
6506  *    +-----------+------+
6507  *
6508  * At this point we notice that subB is used from subA, so we append it and
6509  * relocate (there are no further subcalls from subB):
6510  *
6511  *    +-----------+------+------+
6512  *    |   mainA   | subA | subB |
6513  *    +-----------+------+------+
6514  *
6515  * At this point, we relocate subA calls, then go one level up and finish with
6516  * relocatin mainA calls. mainA is done.
6517  *
6518  * For mainB process is similar but results in different order. We start with
6519  * mainB and skip subA and subB, as mainB never calls them (at least
6520  * directly), but we see subC is needed, so we append and start processing it:
6521  *
6522  *    +-----------+------+
6523  *    |   mainB   | subC |
6524  *    +-----------+------+
6525  * Now we see subC needs subB, so we go back to it, append and relocate it:
6526  *
6527  *    +-----------+------+------+
6528  *    |   mainB   | subC | subB |
6529  *    +-----------+------+------+
6530  *
6531  * At this point we unwind recursion, relocate calls in subC, then in mainB.
6532  */
6533 static int
6534 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6535 {
6536         struct bpf_program *subprog;
6537         int i, j, err;
6538
6539         /* mark all subprogs as not relocated (yet) within the context of
6540          * current main program
6541          */
6542         for (i = 0; i < obj->nr_programs; i++) {
6543                 subprog = &obj->programs[i];
6544                 if (!prog_is_subprog(obj, subprog))
6545                         continue;
6546
6547                 subprog->sub_insn_off = 0;
6548                 for (j = 0; j < subprog->nr_reloc; j++)
6549                         if (subprog->reloc_desc[j].type == RELO_CALL)
6550                                 subprog->reloc_desc[j].processed = false;
6551         }
6552
6553         err = bpf_object__reloc_code(obj, prog, prog);
6554         if (err)
6555                 return err;
6556
6557
6558         return 0;
6559 }
6560
6561 static int
6562 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6563 {
6564         struct bpf_program *prog;
6565         size_t i;
6566         int err;
6567
6568         if (obj->btf_ext) {
6569                 err = bpf_object__relocate_core(obj, targ_btf_path);
6570                 if (err) {
6571                         pr_warn("failed to perform CO-RE relocations: %d\n",
6572                                 err);
6573                         return err;
6574                 }
6575         }
6576         /* relocate data references first for all programs and sub-programs,
6577          * as they don't change relative to code locations, so subsequent
6578          * subprogram processing won't need to re-calculate any of them
6579          */
6580         for (i = 0; i < obj->nr_programs; i++) {
6581                 prog = &obj->programs[i];
6582                 err = bpf_object__relocate_data(obj, prog);
6583                 if (err) {
6584                         pr_warn("prog '%s': failed to relocate data references: %d\n",
6585                                 prog->name, err);
6586                         return err;
6587                 }
6588         }
6589         /* now relocate subprogram calls and append used subprograms to main
6590          * programs; each copy of subprogram code needs to be relocated
6591          * differently for each main program, because its code location might
6592          * have changed
6593          */
6594         for (i = 0; i < obj->nr_programs; i++) {
6595                 prog = &obj->programs[i];
6596                 /* sub-program's sub-calls are relocated within the context of
6597                  * its main program only
6598                  */
6599                 if (prog_is_subprog(obj, prog))
6600                         continue;
6601
6602                 err = bpf_object__relocate_calls(obj, prog);
6603                 if (err) {
6604                         pr_warn("prog '%s': failed to relocate calls: %d\n",
6605                                 prog->name, err);
6606                         return err;
6607                 }
6608         }
6609         /* free up relocation descriptors */
6610         for (i = 0; i < obj->nr_programs; i++) {
6611                 prog = &obj->programs[i];
6612                 zfree(&prog->reloc_desc);
6613                 prog->nr_reloc = 0;
6614         }
6615         return 0;
6616 }
6617
6618 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6619                                             GElf_Shdr *shdr, Elf_Data *data);
6620
6621 static int bpf_object__collect_map_relos(struct bpf_object *obj,
6622                                          GElf_Shdr *shdr, Elf_Data *data)
6623 {
6624         const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6625         int i, j, nrels, new_sz;
6626         const struct btf_var_secinfo *vi = NULL;
6627         const struct btf_type *sec, *var, *def;
6628         struct bpf_map *map = NULL, *targ_map;
6629         const struct btf_member *member;
6630         const char *name, *mname;
6631         Elf_Data *symbols;
6632         unsigned int moff;
6633         GElf_Sym sym;
6634         GElf_Rel rel;
6635         void *tmp;
6636
6637         if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6638                 return -EINVAL;
6639         sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6640         if (!sec)
6641                 return -EINVAL;
6642
6643         symbols = obj->efile.symbols;
6644         nrels = shdr->sh_size / shdr->sh_entsize;
6645         for (i = 0; i < nrels; i++) {
6646                 if (!gelf_getrel(data, i, &rel)) {
6647                         pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6648                         return -LIBBPF_ERRNO__FORMAT;
6649                 }
6650                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6651                         pr_warn(".maps relo #%d: symbol %zx not found\n",
6652                                 i, (size_t)GELF_R_SYM(rel.r_info));
6653                         return -LIBBPF_ERRNO__FORMAT;
6654                 }
6655                 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
6656                 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6657                         pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6658                                 i, name);
6659                         return -LIBBPF_ERRNO__RELOC;
6660                 }
6661
6662                 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
6663                          i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
6664                          (size_t)rel.r_offset, sym.st_name, name);
6665
6666                 for (j = 0; j < obj->nr_maps; j++) {
6667                         map = &obj->maps[j];
6668                         if (map->sec_idx != obj->efile.btf_maps_shndx)
6669                                 continue;
6670
6671                         vi = btf_var_secinfos(sec) + map->btf_var_idx;
6672                         if (vi->offset <= rel.r_offset &&
6673                             rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6674                                 break;
6675                 }
6676                 if (j == obj->nr_maps) {
6677                         pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
6678                                 i, name, (size_t)rel.r_offset);
6679                         return -EINVAL;
6680                 }
6681
6682                 if (!bpf_map_type__is_map_in_map(map->def.type))
6683                         return -EINVAL;
6684                 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6685                     map->def.key_size != sizeof(int)) {
6686                         pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6687                                 i, map->name, sizeof(int));
6688                         return -EINVAL;
6689                 }
6690
6691                 targ_map = bpf_object__find_map_by_name(obj, name);
6692                 if (!targ_map)
6693                         return -ESRCH;
6694
6695                 var = btf__type_by_id(obj->btf, vi->type);
6696                 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6697                 if (btf_vlen(def) == 0)
6698                         return -EINVAL;
6699                 member = btf_members(def) + btf_vlen(def) - 1;
6700                 mname = btf__name_by_offset(obj->btf, member->name_off);
6701                 if (strcmp(mname, "values"))
6702                         return -EINVAL;
6703
6704                 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6705                 if (rel.r_offset - vi->offset < moff)
6706                         return -EINVAL;
6707
6708                 moff = rel.r_offset - vi->offset - moff;
6709                 /* here we use BPF pointer size, which is always 64 bit, as we
6710                  * are parsing ELF that was built for BPF target
6711                  */
6712                 if (moff % bpf_ptr_sz)
6713                         return -EINVAL;
6714                 moff /= bpf_ptr_sz;
6715                 if (moff >= map->init_slots_sz) {
6716                         new_sz = moff + 1;
6717                         tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6718                         if (!tmp)
6719                                 return -ENOMEM;
6720                         map->init_slots = tmp;
6721                         memset(map->init_slots + map->init_slots_sz, 0,
6722                                (new_sz - map->init_slots_sz) * host_ptr_sz);
6723                         map->init_slots_sz = new_sz;
6724                 }
6725                 map->init_slots[moff] = targ_map;
6726
6727                 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
6728                          i, map->name, moff, name);
6729         }
6730
6731         return 0;
6732 }
6733
6734 static int cmp_relocs(const void *_a, const void *_b)
6735 {
6736         const struct reloc_desc *a = _a;
6737         const struct reloc_desc *b = _b;
6738
6739         if (a->insn_idx != b->insn_idx)
6740                 return a->insn_idx < b->insn_idx ? -1 : 1;
6741
6742         /* no two relocations should have the same insn_idx, but ... */
6743         if (a->type != b->type)
6744                 return a->type < b->type ? -1 : 1;
6745
6746         return 0;
6747 }
6748
6749 static int bpf_object__collect_relos(struct bpf_object *obj)
6750 {
6751         int i, err;
6752
6753         for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6754                 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6755                 Elf_Data *data = obj->efile.reloc_sects[i].data;
6756                 int idx = shdr->sh_info;
6757
6758                 if (shdr->sh_type != SHT_REL) {
6759                         pr_warn("internal error at %d\n", __LINE__);
6760                         return -LIBBPF_ERRNO__INTERNAL;
6761                 }
6762
6763                 if (idx == obj->efile.st_ops_shndx)
6764                         err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6765                 else if (idx == obj->efile.btf_maps_shndx)
6766                         err = bpf_object__collect_map_relos(obj, shdr, data);
6767                 else
6768                         err = bpf_object__collect_prog_relos(obj, shdr, data);
6769                 if (err)
6770                         return err;
6771         }
6772
6773         for (i = 0; i < obj->nr_programs; i++) {
6774                 struct bpf_program *p = &obj->programs[i];
6775                 
6776                 if (!p->nr_reloc)
6777                         continue;
6778
6779                 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6780         }
6781         return 0;
6782 }
6783
6784 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6785 {
6786         if (BPF_CLASS(insn->code) == BPF_JMP &&
6787             BPF_OP(insn->code) == BPF_CALL &&
6788             BPF_SRC(insn->code) == BPF_K &&
6789             insn->src_reg == 0 &&
6790             insn->dst_reg == 0) {
6791                     *func_id = insn->imm;
6792                     return true;
6793         }
6794         return false;
6795 }
6796
6797 static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
6798 {
6799         struct bpf_insn *insn = prog->insns;
6800         enum bpf_func_id func_id;
6801         int i;
6802
6803         for (i = 0; i < prog->insns_cnt; i++, insn++) {
6804                 if (!insn_is_helper_call(insn, &func_id))
6805                         continue;
6806
6807                 /* on kernels that don't yet support
6808                  * bpf_probe_read_{kernel,user}[_str] helpers, fall back
6809                  * to bpf_probe_read() which works well for old kernels
6810                  */
6811                 switch (func_id) {
6812                 case BPF_FUNC_probe_read_kernel:
6813                 case BPF_FUNC_probe_read_user:
6814                         if (!kernel_supports(FEAT_PROBE_READ_KERN))
6815                                 insn->imm = BPF_FUNC_probe_read;
6816                         break;
6817                 case BPF_FUNC_probe_read_kernel_str:
6818                 case BPF_FUNC_probe_read_user_str:
6819                         if (!kernel_supports(FEAT_PROBE_READ_KERN))
6820                                 insn->imm = BPF_FUNC_probe_read_str;
6821                         break;
6822                 default:
6823                         break;
6824                 }
6825         }
6826         return 0;
6827 }
6828
6829 static int
6830 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
6831              char *license, __u32 kern_version, int *pfd)
6832 {
6833         struct bpf_prog_load_params load_attr = {};
6834         char *cp, errmsg[STRERR_BUFSIZE];
6835         size_t log_buf_size = 0;
6836         char *log_buf = NULL;
6837         int btf_fd, ret;
6838
6839         if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6840                 /*
6841                  * The program type must be set.  Most likely we couldn't find a proper
6842                  * section definition at load time, and thus we didn't infer the type.
6843                  */
6844                 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6845                         prog->name, prog->sec_name);
6846                 return -EINVAL;
6847         }
6848
6849         if (!insns || !insns_cnt)
6850                 return -EINVAL;
6851
6852         load_attr.prog_type = prog->type;
6853         /* old kernels might not support specifying expected_attach_type */
6854         if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
6855             prog->sec_def->is_exp_attach_type_optional)
6856                 load_attr.expected_attach_type = 0;
6857         else
6858                 load_attr.expected_attach_type = prog->expected_attach_type;
6859         if (kernel_supports(FEAT_PROG_NAME))
6860                 load_attr.name = prog->name;
6861         load_attr.insns = insns;
6862         load_attr.insn_cnt = insns_cnt;
6863         load_attr.license = license;
6864         load_attr.attach_btf_id = prog->attach_btf_id;
6865         if (prog->attach_prog_fd)
6866                 load_attr.attach_prog_fd = prog->attach_prog_fd;
6867         else
6868                 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6869         load_attr.attach_btf_id = prog->attach_btf_id;
6870         load_attr.kern_version = kern_version;
6871         load_attr.prog_ifindex = prog->prog_ifindex;
6872
6873         /* specify func_info/line_info only if kernel supports them */
6874         btf_fd = bpf_object__btf_fd(prog->obj);
6875         if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
6876                 load_attr.prog_btf_fd = btf_fd;
6877                 load_attr.func_info = prog->func_info;
6878                 load_attr.func_info_rec_size = prog->func_info_rec_size;
6879                 load_attr.func_info_cnt = prog->func_info_cnt;
6880                 load_attr.line_info = prog->line_info;
6881                 load_attr.line_info_rec_size = prog->line_info_rec_size;
6882                 load_attr.line_info_cnt = prog->line_info_cnt;
6883         }
6884         load_attr.log_level = prog->log_level;
6885         load_attr.prog_flags = prog->prog_flags;
6886
6887 retry_load:
6888         if (log_buf_size) {
6889                 log_buf = malloc(log_buf_size);
6890                 if (!log_buf)
6891                         return -ENOMEM;
6892
6893                 *log_buf = 0;
6894         }
6895
6896         load_attr.log_buf = log_buf;
6897         load_attr.log_buf_sz = log_buf_size;
6898         ret = libbpf__bpf_prog_load(&load_attr);
6899
6900         if (ret >= 0) {
6901                 if (log_buf && load_attr.log_level)
6902                         pr_debug("verifier log:\n%s", log_buf);
6903
6904                 if (prog->obj->rodata_map_idx >= 0 &&
6905                     kernel_supports(FEAT_PROG_BIND_MAP)) {
6906                         struct bpf_map *rodata_map =
6907                                 &prog->obj->maps[prog->obj->rodata_map_idx];
6908
6909                         if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
6910                                 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6911                                 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
6912                                         prog->name, cp);
6913                                 /* Don't fail hard if can't bind rodata. */
6914                         }
6915                 }
6916
6917                 *pfd = ret;
6918                 ret = 0;
6919                 goto out;
6920         }
6921
6922         if (!log_buf || errno == ENOSPC) {
6923                 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
6924                                    log_buf_size << 1);
6925
6926                 free(log_buf);
6927                 goto retry_load;
6928         }
6929         ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
6930         cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6931         pr_warn("load bpf program failed: %s\n", cp);
6932         pr_perm_msg(ret);
6933
6934         if (log_buf && log_buf[0] != '\0') {
6935                 ret = -LIBBPF_ERRNO__VERIFY;
6936                 pr_warn("-- BEGIN DUMP LOG ---\n");
6937                 pr_warn("\n%s\n", log_buf);
6938                 pr_warn("-- END LOG --\n");
6939         } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
6940                 pr_warn("Program too large (%zu insns), at most %d insns\n",
6941                         load_attr.insn_cnt, BPF_MAXINSNS);
6942                 ret = -LIBBPF_ERRNO__PROG2BIG;
6943         } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
6944                 /* Wrong program type? */
6945                 int fd;
6946
6947                 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
6948                 load_attr.expected_attach_type = 0;
6949                 load_attr.log_buf = NULL;
6950                 load_attr.log_buf_sz = 0;
6951                 fd = libbpf__bpf_prog_load(&load_attr);
6952                 if (fd >= 0) {
6953                         close(fd);
6954                         ret = -LIBBPF_ERRNO__PROGTYPE;
6955                         goto out;
6956                 }
6957         }
6958
6959 out:
6960         free(log_buf);
6961         return ret;
6962 }
6963
6964 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
6965
6966 int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
6967 {
6968         int err = 0, fd, i;
6969
6970         if (prog->obj->loaded) {
6971                 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
6972                 return -EINVAL;
6973         }
6974
6975         if ((prog->type == BPF_PROG_TYPE_TRACING ||
6976              prog->type == BPF_PROG_TYPE_LSM ||
6977              prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
6978                 int btf_obj_fd = 0, btf_type_id = 0;
6979
6980                 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
6981                 if (err)
6982                         return err;
6983
6984                 prog->attach_btf_obj_fd = btf_obj_fd;
6985                 prog->attach_btf_id = btf_type_id;
6986         }
6987
6988         if (prog->instances.nr < 0 || !prog->instances.fds) {
6989                 if (prog->preprocessor) {
6990                         pr_warn("Internal error: can't load program '%s'\n",
6991                                 prog->name);
6992                         return -LIBBPF_ERRNO__INTERNAL;
6993                 }
6994
6995                 prog->instances.fds = malloc(sizeof(int));
6996                 if (!prog->instances.fds) {
6997                         pr_warn("Not enough memory for BPF fds\n");
6998                         return -ENOMEM;
6999                 }
7000                 prog->instances.nr = 1;
7001                 prog->instances.fds[0] = -1;
7002         }
7003
7004         if (!prog->preprocessor) {
7005                 if (prog->instances.nr != 1) {
7006                         pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7007                                 prog->name, prog->instances.nr);
7008                 }
7009                 err = load_program(prog, prog->insns, prog->insns_cnt,
7010                                    license, kern_ver, &fd);
7011                 if (!err)
7012                         prog->instances.fds[0] = fd;
7013                 goto out;
7014         }
7015
7016         for (i = 0; i < prog->instances.nr; i++) {
7017                 struct bpf_prog_prep_result result;
7018                 bpf_program_prep_t preprocessor = prog->preprocessor;
7019
7020                 memset(&result, 0, sizeof(result));
7021                 err = preprocessor(prog, i, prog->insns,
7022                                    prog->insns_cnt, &result);
7023                 if (err) {
7024                         pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
7025                                 i, prog->name);
7026                         goto out;
7027                 }
7028
7029                 if (!result.new_insn_ptr || !result.new_insn_cnt) {
7030                         pr_debug("Skip loading the %dth instance of program '%s'\n",
7031                                  i, prog->name);
7032                         prog->instances.fds[i] = -1;
7033                         if (result.pfd)
7034                                 *result.pfd = -1;
7035                         continue;
7036                 }
7037
7038                 err = load_program(prog, result.new_insn_ptr,
7039                                    result.new_insn_cnt, license, kern_ver, &fd);
7040                 if (err) {
7041                         pr_warn("Loading the %dth instance of program '%s' failed\n",
7042                                 i, prog->name);
7043                         goto out;
7044                 }
7045
7046                 if (result.pfd)
7047                         *result.pfd = fd;
7048                 prog->instances.fds[i] = fd;
7049         }
7050 out:
7051         if (err)
7052                 pr_warn("failed to load program '%s'\n", prog->name);
7053         zfree(&prog->insns);
7054         prog->insns_cnt = 0;
7055         return err;
7056 }
7057
7058 static int
7059 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7060 {
7061         struct bpf_program *prog;
7062         size_t i;
7063         int err;
7064
7065         for (i = 0; i < obj->nr_programs; i++) {
7066                 prog = &obj->programs[i];
7067                 err = bpf_object__sanitize_prog(obj, prog);
7068                 if (err)
7069                         return err;
7070         }
7071
7072         for (i = 0; i < obj->nr_programs; i++) {
7073                 prog = &obj->programs[i];
7074                 if (prog_is_subprog(obj, prog))
7075                         continue;
7076                 if (!prog->load) {
7077                         pr_debug("prog '%s': skipped loading\n", prog->name);
7078                         continue;
7079                 }
7080                 prog->log_level |= log_level;
7081                 err = bpf_program__load(prog, obj->license, obj->kern_version);
7082                 if (err)
7083                         return err;
7084         }
7085         return 0;
7086 }
7087
7088 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7089
7090 static struct bpf_object *
7091 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7092                    const struct bpf_object_open_opts *opts)
7093 {
7094         const char *obj_name, *kconfig;
7095         struct bpf_program *prog;
7096         struct bpf_object *obj;
7097         char tmp_name[64];
7098         int err;
7099
7100         if (elf_version(EV_CURRENT) == EV_NONE) {
7101                 pr_warn("failed to init libelf for %s\n",
7102                         path ? : "(mem buf)");
7103                 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7104         }
7105
7106         if (!OPTS_VALID(opts, bpf_object_open_opts))
7107                 return ERR_PTR(-EINVAL);
7108
7109         obj_name = OPTS_GET(opts, object_name, NULL);
7110         if (obj_buf) {
7111                 if (!obj_name) {
7112                         snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7113                                  (unsigned long)obj_buf,
7114                                  (unsigned long)obj_buf_sz);
7115                         obj_name = tmp_name;
7116                 }
7117                 path = obj_name;
7118                 pr_debug("loading object '%s' from buffer\n", obj_name);
7119         }
7120
7121         obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7122         if (IS_ERR(obj))
7123                 return obj;
7124
7125         kconfig = OPTS_GET(opts, kconfig, NULL);
7126         if (kconfig) {
7127                 obj->kconfig = strdup(kconfig);
7128                 if (!obj->kconfig)
7129                         return ERR_PTR(-ENOMEM);
7130         }
7131
7132         err = bpf_object__elf_init(obj);
7133         err = err ? : bpf_object__check_endianness(obj);
7134         err = err ? : bpf_object__elf_collect(obj);
7135         err = err ? : bpf_object__collect_externs(obj);
7136         err = err ? : bpf_object__finalize_btf(obj);
7137         err = err ? : bpf_object__init_maps(obj, opts);
7138         err = err ? : bpf_object__collect_relos(obj);
7139         if (err)
7140                 goto out;
7141         bpf_object__elf_finish(obj);
7142
7143         bpf_object__for_each_program(prog, obj) {
7144                 prog->sec_def = find_sec_def(prog->sec_name);
7145                 if (!prog->sec_def) {
7146                         /* couldn't guess, but user might manually specify */
7147                         pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7148                                 prog->name, prog->sec_name);
7149                         continue;
7150                 }
7151
7152                 if (prog->sec_def->is_sleepable)
7153                         prog->prog_flags |= BPF_F_SLEEPABLE;
7154                 bpf_program__set_type(prog, prog->sec_def->prog_type);
7155                 bpf_program__set_expected_attach_type(prog,
7156                                 prog->sec_def->expected_attach_type);
7157
7158                 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7159                     prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
7160                         prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
7161         }
7162
7163         return obj;
7164 out:
7165         bpf_object__close(obj);
7166         return ERR_PTR(err);
7167 }
7168
7169 static struct bpf_object *
7170 __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7171 {
7172         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7173                 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7174         );
7175
7176         /* param validation */
7177         if (!attr->file)
7178                 return NULL;
7179
7180         pr_debug("loading %s\n", attr->file);
7181         return __bpf_object__open(attr->file, NULL, 0, &opts);
7182 }
7183
7184 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7185 {
7186         return __bpf_object__open_xattr(attr, 0);
7187 }
7188
7189 struct bpf_object *bpf_object__open(const char *path)
7190 {
7191         struct bpf_object_open_attr attr = {
7192                 .file           = path,
7193                 .prog_type      = BPF_PROG_TYPE_UNSPEC,
7194         };
7195
7196         return bpf_object__open_xattr(&attr);
7197 }
7198
7199 struct bpf_object *
7200 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7201 {
7202         if (!path)
7203                 return ERR_PTR(-EINVAL);
7204
7205         pr_debug("loading %s\n", path);
7206
7207         return __bpf_object__open(path, NULL, 0, opts);
7208 }
7209
7210 struct bpf_object *
7211 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7212                      const struct bpf_object_open_opts *opts)
7213 {
7214         if (!obj_buf || obj_buf_sz == 0)
7215                 return ERR_PTR(-EINVAL);
7216
7217         return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
7218 }
7219
7220 struct bpf_object *
7221 bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7222                         const char *name)
7223 {
7224         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7225                 .object_name = name,
7226                 /* wrong default, but backwards-compatible */
7227                 .relaxed_maps = true,
7228         );
7229
7230         /* returning NULL is wrong, but backwards-compatible */
7231         if (!obj_buf || obj_buf_sz == 0)
7232                 return NULL;
7233
7234         return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
7235 }
7236
7237 int bpf_object__unload(struct bpf_object *obj)
7238 {
7239         size_t i;
7240
7241         if (!obj)
7242                 return -EINVAL;
7243
7244         for (i = 0; i < obj->nr_maps; i++) {
7245                 zclose(obj->maps[i].fd);
7246                 if (obj->maps[i].st_ops)
7247                         zfree(&obj->maps[i].st_ops->kern_vdata);
7248         }
7249
7250         for (i = 0; i < obj->nr_programs; i++)
7251                 bpf_program__unload(&obj->programs[i]);
7252
7253         return 0;
7254 }
7255
7256 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7257 {
7258         struct bpf_map *m;
7259
7260         bpf_object__for_each_map(m, obj) {
7261                 if (!bpf_map__is_internal(m))
7262                         continue;
7263                 if (!kernel_supports(FEAT_GLOBAL_DATA)) {
7264                         pr_warn("kernel doesn't support global data\n");
7265                         return -ENOTSUP;
7266                 }
7267                 if (!kernel_supports(FEAT_ARRAY_MMAP))
7268                         m->def.map_flags ^= BPF_F_MMAPABLE;
7269         }
7270
7271         return 0;
7272 }
7273
7274 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7275 {
7276         char sym_type, sym_name[500];
7277         unsigned long long sym_addr;
7278         struct extern_desc *ext;
7279         int ret, err = 0;
7280         FILE *f;
7281
7282         f = fopen("/proc/kallsyms", "r");
7283         if (!f) {
7284                 err = -errno;
7285                 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7286                 return err;
7287         }
7288
7289         while (true) {
7290                 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7291                              &sym_addr, &sym_type, sym_name);
7292                 if (ret == EOF && feof(f))
7293                         break;
7294                 if (ret != 3) {
7295                         pr_warn("failed to read kallsyms entry: %d\n", ret);
7296                         err = -EINVAL;
7297                         goto out;
7298                 }
7299
7300                 ext = find_extern_by_name(obj, sym_name);
7301                 if (!ext || ext->type != EXT_KSYM)
7302                         continue;
7303
7304                 if (ext->is_set && ext->ksym.addr != sym_addr) {
7305                         pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7306                                 sym_name, ext->ksym.addr, sym_addr);
7307                         err = -EINVAL;
7308                         goto out;
7309                 }
7310                 if (!ext->is_set) {
7311                         ext->is_set = true;
7312                         ext->ksym.addr = sym_addr;
7313                         pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7314                 }
7315         }
7316
7317 out:
7318         fclose(f);
7319         return err;
7320 }
7321
7322 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7323 {
7324         struct extern_desc *ext;
7325         struct btf *btf;
7326         int i, j, id, btf_fd, err;
7327
7328         for (i = 0; i < obj->nr_extern; i++) {
7329                 const struct btf_type *targ_var, *targ_type;
7330                 __u32 targ_type_id, local_type_id;
7331                 const char *targ_var_name;
7332                 int ret;
7333
7334                 ext = &obj->externs[i];
7335                 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7336                         continue;
7337
7338                 btf = obj->btf_vmlinux;
7339                 btf_fd = 0;
7340                 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7341                 if (id == -ENOENT) {
7342                         err = load_module_btfs(obj);
7343                         if (err)
7344                                 return err;
7345
7346                         for (j = 0; j < obj->btf_module_cnt; j++) {
7347                                 btf = obj->btf_modules[j].btf;
7348                                 /* we assume module BTF FD is always >0 */
7349                                 btf_fd = obj->btf_modules[j].fd;
7350                                 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7351                                 if (id != -ENOENT)
7352                                         break;
7353                         }
7354                 }
7355                 if (id <= 0) {
7356                         pr_warn("extern (ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
7357                                 ext->name);
7358                         return -ESRCH;
7359                 }
7360
7361                 /* find local type_id */
7362                 local_type_id = ext->ksym.type_id;
7363
7364                 /* find target type_id */
7365                 targ_var = btf__type_by_id(btf, id);
7366                 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7367                 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7368
7369                 ret = bpf_core_types_are_compat(obj->btf, local_type_id,
7370                                                 btf, targ_type_id);
7371                 if (ret <= 0) {
7372                         const struct btf_type *local_type;
7373                         const char *targ_name, *local_name;
7374
7375                         local_type = btf__type_by_id(obj->btf, local_type_id);
7376                         local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7377                         targ_name = btf__name_by_offset(btf, targ_type->name_off);
7378
7379                         pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7380                                 ext->name, local_type_id,
7381                                 btf_kind_str(local_type), local_name, targ_type_id,
7382                                 btf_kind_str(targ_type), targ_name);
7383                         return -EINVAL;
7384                 }
7385
7386                 ext->is_set = true;
7387                 ext->ksym.kernel_btf_obj_fd = btf_fd;
7388                 ext->ksym.kernel_btf_id = id;
7389                 pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
7390                          ext->name, id, btf_kind_str(targ_var), targ_var_name);
7391         }
7392         return 0;
7393 }
7394
7395 static int bpf_object__resolve_externs(struct bpf_object *obj,
7396                                        const char *extra_kconfig)
7397 {
7398         bool need_config = false, need_kallsyms = false;
7399         bool need_vmlinux_btf = false;
7400         struct extern_desc *ext;
7401         void *kcfg_data = NULL;
7402         int err, i;
7403
7404         if (obj->nr_extern == 0)
7405                 return 0;
7406
7407         if (obj->kconfig_map_idx >= 0)
7408                 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7409
7410         for (i = 0; i < obj->nr_extern; i++) {
7411                 ext = &obj->externs[i];
7412
7413                 if (ext->type == EXT_KCFG &&
7414                     strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7415                         void *ext_val = kcfg_data + ext->kcfg.data_off;
7416                         __u32 kver = get_kernel_version();
7417
7418                         if (!kver) {
7419                                 pr_warn("failed to get kernel version\n");
7420                                 return -EINVAL;
7421                         }
7422                         err = set_kcfg_value_num(ext, ext_val, kver);
7423                         if (err)
7424                                 return err;
7425                         pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7426                 } else if (ext->type == EXT_KCFG &&
7427                            strncmp(ext->name, "CONFIG_", 7) == 0) {
7428                         need_config = true;
7429                 } else if (ext->type == EXT_KSYM) {
7430                         if (ext->ksym.type_id)
7431                                 need_vmlinux_btf = true;
7432                         else
7433                                 need_kallsyms = true;
7434                 } else {
7435                         pr_warn("unrecognized extern '%s'\n", ext->name);
7436                         return -EINVAL;
7437                 }
7438         }
7439         if (need_config && extra_kconfig) {
7440                 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7441                 if (err)
7442                         return -EINVAL;
7443                 need_config = false;
7444                 for (i = 0; i < obj->nr_extern; i++) {
7445                         ext = &obj->externs[i];
7446                         if (ext->type == EXT_KCFG && !ext->is_set) {
7447                                 need_config = true;
7448                                 break;
7449                         }
7450                 }
7451         }
7452         if (need_config) {
7453                 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7454                 if (err)
7455                         return -EINVAL;
7456         }
7457         if (need_kallsyms) {
7458                 err = bpf_object__read_kallsyms_file(obj);
7459                 if (err)
7460                         return -EINVAL;
7461         }
7462         if (need_vmlinux_btf) {
7463                 err = bpf_object__resolve_ksyms_btf_id(obj);
7464                 if (err)
7465                         return -EINVAL;
7466         }
7467         for (i = 0; i < obj->nr_extern; i++) {
7468                 ext = &obj->externs[i];
7469
7470                 if (!ext->is_set && !ext->is_weak) {
7471                         pr_warn("extern %s (strong) not resolved\n", ext->name);
7472                         return -ESRCH;
7473                 } else if (!ext->is_set) {
7474                         pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7475                                  ext->name);
7476                 }
7477         }
7478
7479         return 0;
7480 }
7481
7482 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7483 {
7484         struct bpf_object *obj;
7485         int err, i;
7486
7487         if (!attr)
7488                 return -EINVAL;
7489         obj = attr->obj;
7490         if (!obj)
7491                 return -EINVAL;
7492
7493         if (obj->loaded) {
7494                 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7495                 return -EINVAL;
7496         }
7497
7498         err = bpf_object__probe_loading(obj);
7499         err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7500         err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7501         err = err ? : bpf_object__sanitize_and_load_btf(obj);
7502         err = err ? : bpf_object__sanitize_maps(obj);
7503         err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7504         err = err ? : bpf_object__create_maps(obj);
7505         err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7506         err = err ? : bpf_object__load_progs(obj, attr->log_level);
7507
7508         /* clean up module BTFs */
7509         for (i = 0; i < obj->btf_module_cnt; i++) {
7510                 close(obj->btf_modules[i].fd);
7511                 btf__free(obj->btf_modules[i].btf);
7512                 free(obj->btf_modules[i].name);
7513         }
7514         free(obj->btf_modules);
7515
7516         /* clean up vmlinux BTF */
7517         btf__free(obj->btf_vmlinux);
7518         obj->btf_vmlinux = NULL;
7519
7520         obj->loaded = true; /* doesn't matter if successfully or not */
7521
7522         if (err)
7523                 goto out;
7524
7525         return 0;
7526 out:
7527         /* unpin any maps that were auto-pinned during load */
7528         for (i = 0; i < obj->nr_maps; i++)
7529                 if (obj->maps[i].pinned && !obj->maps[i].reused)
7530                         bpf_map__unpin(&obj->maps[i], NULL);
7531
7532         bpf_object__unload(obj);
7533         pr_warn("failed to load object '%s'\n", obj->path);
7534         return err;
7535 }
7536
7537 int bpf_object__load(struct bpf_object *obj)
7538 {
7539         struct bpf_object_load_attr attr = {
7540                 .obj = obj,
7541         };
7542
7543         return bpf_object__load_xattr(&attr);
7544 }
7545
7546 static int make_parent_dir(const char *path)
7547 {
7548         char *cp, errmsg[STRERR_BUFSIZE];
7549         char *dname, *dir;
7550         int err = 0;
7551
7552         dname = strdup(path);
7553         if (dname == NULL)
7554                 return -ENOMEM;
7555
7556         dir = dirname(dname);
7557         if (mkdir(dir, 0700) && errno != EEXIST)
7558                 err = -errno;
7559
7560         free(dname);
7561         if (err) {
7562                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7563                 pr_warn("failed to mkdir %s: %s\n", path, cp);
7564         }
7565         return err;
7566 }
7567
7568 static int check_path(const char *path)
7569 {
7570         char *cp, errmsg[STRERR_BUFSIZE];
7571         struct statfs st_fs;
7572         char *dname, *dir;
7573         int err = 0;
7574
7575         if (path == NULL)
7576                 return -EINVAL;
7577
7578         dname = strdup(path);
7579         if (dname == NULL)
7580                 return -ENOMEM;
7581
7582         dir = dirname(dname);
7583         if (statfs(dir, &st_fs)) {
7584                 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7585                 pr_warn("failed to statfs %s: %s\n", dir, cp);
7586                 err = -errno;
7587         }
7588         free(dname);
7589
7590         if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7591                 pr_warn("specified path %s is not on BPF FS\n", path);
7592                 err = -EINVAL;
7593         }
7594
7595         return err;
7596 }
7597
7598 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7599                               int instance)
7600 {
7601         char *cp, errmsg[STRERR_BUFSIZE];
7602         int err;
7603
7604         err = make_parent_dir(path);
7605         if (err)
7606                 return err;
7607
7608         err = check_path(path);
7609         if (err)
7610                 return err;
7611
7612         if (prog == NULL) {
7613                 pr_warn("invalid program pointer\n");
7614                 return -EINVAL;
7615         }
7616
7617         if (instance < 0 || instance >= prog->instances.nr) {
7618                 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7619                         instance, prog->name, prog->instances.nr);
7620                 return -EINVAL;
7621         }
7622
7623         if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7624                 err = -errno;
7625                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7626                 pr_warn("failed to pin program: %s\n", cp);
7627                 return err;
7628         }
7629         pr_debug("pinned program '%s'\n", path);
7630
7631         return 0;
7632 }
7633
7634 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7635                                 int instance)
7636 {
7637         int err;
7638
7639         err = check_path(path);
7640         if (err)
7641                 return err;
7642
7643         if (prog == NULL) {
7644                 pr_warn("invalid program pointer\n");
7645                 return -EINVAL;
7646         }
7647
7648         if (instance < 0 || instance >= prog->instances.nr) {
7649                 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7650                         instance, prog->name, prog->instances.nr);
7651                 return -EINVAL;
7652         }
7653
7654         err = unlink(path);
7655         if (err != 0)
7656                 return -errno;
7657         pr_debug("unpinned program '%s'\n", path);
7658
7659         return 0;
7660 }
7661
7662 int bpf_program__pin(struct bpf_program *prog, const char *path)
7663 {
7664         int i, err;
7665
7666         err = make_parent_dir(path);
7667         if (err)
7668                 return err;
7669
7670         err = check_path(path);
7671         if (err)
7672                 return err;
7673
7674         if (prog == NULL) {
7675                 pr_warn("invalid program pointer\n");
7676                 return -EINVAL;
7677         }
7678
7679         if (prog->instances.nr <= 0) {
7680                 pr_warn("no instances of prog %s to pin\n", prog->name);
7681                 return -EINVAL;
7682         }
7683
7684         if (prog->instances.nr == 1) {
7685                 /* don't create subdirs when pinning single instance */
7686                 return bpf_program__pin_instance(prog, path, 0);
7687         }
7688
7689         for (i = 0; i < prog->instances.nr; i++) {
7690                 char buf[PATH_MAX];
7691                 int len;
7692
7693                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7694                 if (len < 0) {
7695                         err = -EINVAL;
7696                         goto err_unpin;
7697                 } else if (len >= PATH_MAX) {
7698                         err = -ENAMETOOLONG;
7699                         goto err_unpin;
7700                 }
7701
7702                 err = bpf_program__pin_instance(prog, buf, i);
7703                 if (err)
7704                         goto err_unpin;
7705         }
7706
7707         return 0;
7708
7709 err_unpin:
7710         for (i = i - 1; i >= 0; i--) {
7711                 char buf[PATH_MAX];
7712                 int len;
7713
7714                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7715                 if (len < 0)
7716                         continue;
7717                 else if (len >= PATH_MAX)
7718                         continue;
7719
7720                 bpf_program__unpin_instance(prog, buf, i);
7721         }
7722
7723         rmdir(path);
7724
7725         return err;
7726 }
7727
7728 int bpf_program__unpin(struct bpf_program *prog, const char *path)
7729 {
7730         int i, err;
7731
7732         err = check_path(path);
7733         if (err)
7734                 return err;
7735
7736         if (prog == NULL) {
7737                 pr_warn("invalid program pointer\n");
7738                 return -EINVAL;
7739         }
7740
7741         if (prog->instances.nr <= 0) {
7742                 pr_warn("no instances of prog %s to pin\n", prog->name);
7743                 return -EINVAL;
7744         }
7745
7746         if (prog->instances.nr == 1) {
7747                 /* don't create subdirs when pinning single instance */
7748                 return bpf_program__unpin_instance(prog, path, 0);
7749         }
7750
7751         for (i = 0; i < prog->instances.nr; i++) {
7752                 char buf[PATH_MAX];
7753                 int len;
7754
7755                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7756                 if (len < 0)
7757                         return -EINVAL;
7758                 else if (len >= PATH_MAX)
7759                         return -ENAMETOOLONG;
7760
7761                 err = bpf_program__unpin_instance(prog, buf, i);
7762                 if (err)
7763                         return err;
7764         }
7765
7766         err = rmdir(path);
7767         if (err)
7768                 return -errno;
7769
7770         return 0;
7771 }
7772
7773 int bpf_map__pin(struct bpf_map *map, const char *path)
7774 {
7775         char *cp, errmsg[STRERR_BUFSIZE];
7776         int err;
7777
7778         if (map == NULL) {
7779                 pr_warn("invalid map pointer\n");
7780                 return -EINVAL;
7781         }
7782
7783         if (map->pin_path) {
7784                 if (path && strcmp(path, map->pin_path)) {
7785                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7786                                 bpf_map__name(map), map->pin_path, path);
7787                         return -EINVAL;
7788                 } else if (map->pinned) {
7789                         pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7790                                  bpf_map__name(map), map->pin_path);
7791                         return 0;
7792                 }
7793         } else {
7794                 if (!path) {
7795                         pr_warn("missing a path to pin map '%s' at\n",
7796                                 bpf_map__name(map));
7797                         return -EINVAL;
7798                 } else if (map->pinned) {
7799                         pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7800                         return -EEXIST;
7801                 }
7802
7803                 map->pin_path = strdup(path);
7804                 if (!map->pin_path) {
7805                         err = -errno;
7806                         goto out_err;
7807                 }
7808         }
7809
7810         err = make_parent_dir(map->pin_path);
7811         if (err)
7812                 return err;
7813
7814         err = check_path(map->pin_path);
7815         if (err)
7816                 return err;
7817
7818         if (bpf_obj_pin(map->fd, map->pin_path)) {
7819                 err = -errno;
7820                 goto out_err;
7821         }
7822
7823         map->pinned = true;
7824         pr_debug("pinned map '%s'\n", map->pin_path);
7825
7826         return 0;
7827
7828 out_err:
7829         cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7830         pr_warn("failed to pin map: %s\n", cp);
7831         return err;
7832 }
7833
7834 int bpf_map__unpin(struct bpf_map *map, const char *path)
7835 {
7836         int err;
7837
7838         if (map == NULL) {
7839                 pr_warn("invalid map pointer\n");
7840                 return -EINVAL;
7841         }
7842
7843         if (map->pin_path) {
7844                 if (path && strcmp(path, map->pin_path)) {
7845                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7846                                 bpf_map__name(map), map->pin_path, path);
7847                         return -EINVAL;
7848                 }
7849                 path = map->pin_path;
7850         } else if (!path) {
7851                 pr_warn("no path to unpin map '%s' from\n",
7852                         bpf_map__name(map));
7853                 return -EINVAL;
7854         }
7855
7856         err = check_path(path);
7857         if (err)
7858                 return err;
7859
7860         err = unlink(path);
7861         if (err != 0)
7862                 return -errno;
7863
7864         map->pinned = false;
7865         pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7866
7867         return 0;
7868 }
7869
7870 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7871 {
7872         char *new = NULL;
7873
7874         if (path) {
7875                 new = strdup(path);
7876                 if (!new)
7877                         return -errno;
7878         }
7879
7880         free(map->pin_path);
7881         map->pin_path = new;
7882         return 0;
7883 }
7884
7885 const char *bpf_map__get_pin_path(const struct bpf_map *map)
7886 {
7887         return map->pin_path;
7888 }
7889
7890 bool bpf_map__is_pinned(const struct bpf_map *map)
7891 {
7892         return map->pinned;
7893 }
7894
7895 static void sanitize_pin_path(char *s)
7896 {
7897         /* bpffs disallows periods in path names */
7898         while (*s) {
7899                 if (*s == '.')
7900                         *s = '_';
7901                 s++;
7902         }
7903 }
7904
7905 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7906 {
7907         struct bpf_map *map;
7908         int err;
7909
7910         if (!obj)
7911                 return -ENOENT;
7912
7913         if (!obj->loaded) {
7914                 pr_warn("object not yet loaded; load it first\n");
7915                 return -ENOENT;
7916         }
7917
7918         bpf_object__for_each_map(map, obj) {
7919                 char *pin_path = NULL;
7920                 char buf[PATH_MAX];
7921
7922                 if (path) {
7923                         int len;
7924
7925                         len = snprintf(buf, PATH_MAX, "%s/%s", path,
7926                                        bpf_map__name(map));
7927                         if (len < 0) {
7928                                 err = -EINVAL;
7929                                 goto err_unpin_maps;
7930                         } else if (len >= PATH_MAX) {
7931                                 err = -ENAMETOOLONG;
7932                                 goto err_unpin_maps;
7933                         }
7934                         sanitize_pin_path(buf);
7935                         pin_path = buf;
7936                 } else if (!map->pin_path) {
7937                         continue;
7938                 }
7939
7940                 err = bpf_map__pin(map, pin_path);
7941                 if (err)
7942                         goto err_unpin_maps;
7943         }
7944
7945         return 0;
7946
7947 err_unpin_maps:
7948         while ((map = bpf_map__prev(map, obj))) {
7949                 if (!map->pin_path)
7950                         continue;
7951
7952                 bpf_map__unpin(map, NULL);
7953         }
7954
7955         return err;
7956 }
7957
7958 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7959 {
7960         struct bpf_map *map;
7961         int err;
7962
7963         if (!obj)
7964                 return -ENOENT;
7965
7966         bpf_object__for_each_map(map, obj) {
7967                 char *pin_path = NULL;
7968                 char buf[PATH_MAX];
7969
7970                 if (path) {
7971                         int len;
7972
7973                         len = snprintf(buf, PATH_MAX, "%s/%s", path,
7974                                        bpf_map__name(map));
7975                         if (len < 0)
7976                                 return -EINVAL;
7977                         else if (len >= PATH_MAX)
7978                                 return -ENAMETOOLONG;
7979                         sanitize_pin_path(buf);
7980                         pin_path = buf;
7981                 } else if (!map->pin_path) {
7982                         continue;
7983                 }
7984
7985                 err = bpf_map__unpin(map, pin_path);
7986                 if (err)
7987                         return err;
7988         }
7989
7990         return 0;
7991 }
7992
7993 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
7994 {
7995         struct bpf_program *prog;
7996         int err;
7997
7998         if (!obj)
7999                 return -ENOENT;
8000
8001         if (!obj->loaded) {
8002                 pr_warn("object not yet loaded; load it first\n");
8003                 return -ENOENT;
8004         }
8005
8006         bpf_object__for_each_program(prog, obj) {
8007                 char buf[PATH_MAX];
8008                 int len;
8009
8010                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8011                                prog->pin_name);
8012                 if (len < 0) {
8013                         err = -EINVAL;
8014                         goto err_unpin_programs;
8015                 } else if (len >= PATH_MAX) {
8016                         err = -ENAMETOOLONG;
8017                         goto err_unpin_programs;
8018                 }
8019
8020                 err = bpf_program__pin(prog, buf);
8021                 if (err)
8022                         goto err_unpin_programs;
8023         }
8024
8025         return 0;
8026
8027 err_unpin_programs:
8028         while ((prog = bpf_program__prev(prog, obj))) {
8029                 char buf[PATH_MAX];
8030                 int len;
8031
8032                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8033                                prog->pin_name);
8034                 if (len < 0)
8035                         continue;
8036                 else if (len >= PATH_MAX)
8037                         continue;
8038
8039                 bpf_program__unpin(prog, buf);
8040         }
8041
8042         return err;
8043 }
8044
8045 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8046 {
8047         struct bpf_program *prog;
8048         int err;
8049
8050         if (!obj)
8051                 return -ENOENT;
8052
8053         bpf_object__for_each_program(prog, obj) {
8054                 char buf[PATH_MAX];
8055                 int len;
8056
8057                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8058                                prog->pin_name);
8059                 if (len < 0)
8060                         return -EINVAL;
8061                 else if (len >= PATH_MAX)
8062                         return -ENAMETOOLONG;
8063
8064                 err = bpf_program__unpin(prog, buf);
8065                 if (err)
8066                         return err;
8067         }
8068
8069         return 0;
8070 }
8071
8072 int bpf_object__pin(struct bpf_object *obj, const char *path)
8073 {
8074         int err;
8075
8076         err = bpf_object__pin_maps(obj, path);
8077         if (err)
8078                 return err;
8079
8080         err = bpf_object__pin_programs(obj, path);
8081         if (err) {
8082                 bpf_object__unpin_maps(obj, path);
8083                 return err;
8084         }
8085
8086         return 0;
8087 }
8088
8089 static void bpf_map__destroy(struct bpf_map *map)
8090 {
8091         if (map->clear_priv)
8092                 map->clear_priv(map, map->priv);
8093         map->priv = NULL;
8094         map->clear_priv = NULL;
8095
8096         if (map->inner_map) {
8097                 bpf_map__destroy(map->inner_map);
8098                 zfree(&map->inner_map);
8099         }
8100
8101         zfree(&map->init_slots);
8102         map->init_slots_sz = 0;
8103
8104         if (map->mmaped) {
8105                 munmap(map->mmaped, bpf_map_mmap_sz(map));
8106                 map->mmaped = NULL;
8107         }
8108
8109         if (map->st_ops) {
8110                 zfree(&map->st_ops->data);
8111                 zfree(&map->st_ops->progs);
8112                 zfree(&map->st_ops->kern_func_off);
8113                 zfree(&map->st_ops);
8114         }
8115
8116         zfree(&map->name);
8117         zfree(&map->pin_path);
8118
8119         if (map->fd >= 0)
8120                 zclose(map->fd);
8121 }
8122
8123 void bpf_object__close(struct bpf_object *obj)
8124 {
8125         size_t i;
8126
8127         if (IS_ERR_OR_NULL(obj))
8128                 return;
8129
8130         if (obj->clear_priv)
8131                 obj->clear_priv(obj, obj->priv);
8132
8133         bpf_object__elf_finish(obj);
8134         bpf_object__unload(obj);
8135         btf__free(obj->btf);
8136         btf_ext__free(obj->btf_ext);
8137
8138         for (i = 0; i < obj->nr_maps; i++)
8139                 bpf_map__destroy(&obj->maps[i]);
8140
8141         zfree(&obj->kconfig);
8142         zfree(&obj->externs);
8143         obj->nr_extern = 0;
8144
8145         zfree(&obj->maps);
8146         obj->nr_maps = 0;
8147
8148         if (obj->programs && obj->nr_programs) {
8149                 for (i = 0; i < obj->nr_programs; i++)
8150                         bpf_program__exit(&obj->programs[i]);
8151         }
8152         zfree(&obj->programs);
8153
8154         list_del(&obj->list);
8155         free(obj);
8156 }
8157
8158 struct bpf_object *
8159 bpf_object__next(struct bpf_object *prev)
8160 {
8161         struct bpf_object *next;
8162
8163         if (!prev)
8164                 next = list_first_entry(&bpf_objects_list,
8165                                         struct bpf_object,
8166                                         list);
8167         else
8168                 next = list_next_entry(prev, list);
8169
8170         /* Empty list is noticed here so don't need checking on entry. */
8171         if (&next->list == &bpf_objects_list)
8172                 return NULL;
8173
8174         return next;
8175 }
8176
8177 const char *bpf_object__name(const struct bpf_object *obj)
8178 {
8179         return obj ? obj->name : ERR_PTR(-EINVAL);
8180 }
8181
8182 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8183 {
8184         return obj ? obj->kern_version : 0;
8185 }
8186
8187 struct btf *bpf_object__btf(const struct bpf_object *obj)
8188 {
8189         return obj ? obj->btf : NULL;
8190 }
8191
8192 int bpf_object__btf_fd(const struct bpf_object *obj)
8193 {
8194         return obj->btf ? btf__fd(obj->btf) : -1;
8195 }
8196
8197 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8198                          bpf_object_clear_priv_t clear_priv)
8199 {
8200         if (obj->priv && obj->clear_priv)
8201                 obj->clear_priv(obj, obj->priv);
8202
8203         obj->priv = priv;
8204         obj->clear_priv = clear_priv;
8205         return 0;
8206 }
8207
8208 void *bpf_object__priv(const struct bpf_object *obj)
8209 {
8210         return obj ? obj->priv : ERR_PTR(-EINVAL);
8211 }
8212
8213 static struct bpf_program *
8214 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8215                     bool forward)
8216 {
8217         size_t nr_programs = obj->nr_programs;
8218         ssize_t idx;
8219
8220         if (!nr_programs)
8221                 return NULL;
8222
8223         if (!p)
8224                 /* Iter from the beginning */
8225                 return forward ? &obj->programs[0] :
8226                         &obj->programs[nr_programs - 1];
8227
8228         if (p->obj != obj) {
8229                 pr_warn("error: program handler doesn't match object\n");
8230                 return NULL;
8231         }
8232
8233         idx = (p - obj->programs) + (forward ? 1 : -1);
8234         if (idx >= obj->nr_programs || idx < 0)
8235                 return NULL;
8236         return &obj->programs[idx];
8237 }
8238
8239 struct bpf_program *
8240 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8241 {
8242         struct bpf_program *prog = prev;
8243
8244         do {
8245                 prog = __bpf_program__iter(prog, obj, true);
8246         } while (prog && prog_is_subprog(obj, prog));
8247
8248         return prog;
8249 }
8250
8251 struct bpf_program *
8252 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8253 {
8254         struct bpf_program *prog = next;
8255
8256         do {
8257                 prog = __bpf_program__iter(prog, obj, false);
8258         } while (prog && prog_is_subprog(obj, prog));
8259
8260         return prog;
8261 }
8262
8263 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8264                           bpf_program_clear_priv_t clear_priv)
8265 {
8266         if (prog->priv && prog->clear_priv)
8267                 prog->clear_priv(prog, prog->priv);
8268
8269         prog->priv = priv;
8270         prog->clear_priv = clear_priv;
8271         return 0;
8272 }
8273
8274 void *bpf_program__priv(const struct bpf_program *prog)
8275 {
8276         return prog ? prog->priv : ERR_PTR(-EINVAL);
8277 }
8278
8279 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8280 {
8281         prog->prog_ifindex = ifindex;
8282 }
8283
8284 const char *bpf_program__name(const struct bpf_program *prog)
8285 {
8286         return prog->name;
8287 }
8288
8289 const char *bpf_program__section_name(const struct bpf_program *prog)
8290 {
8291         return prog->sec_name;
8292 }
8293
8294 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8295 {
8296         const char *title;
8297
8298         title = prog->sec_name;
8299         if (needs_copy) {
8300                 title = strdup(title);
8301                 if (!title) {
8302                         pr_warn("failed to strdup program title\n");
8303                         return ERR_PTR(-ENOMEM);
8304                 }
8305         }
8306
8307         return title;
8308 }
8309
8310 bool bpf_program__autoload(const struct bpf_program *prog)
8311 {
8312         return prog->load;
8313 }
8314
8315 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8316 {
8317         if (prog->obj->loaded)
8318                 return -EINVAL;
8319
8320         prog->load = autoload;
8321         return 0;
8322 }
8323
8324 int bpf_program__fd(const struct bpf_program *prog)
8325 {
8326         return bpf_program__nth_fd(prog, 0);
8327 }
8328
8329 size_t bpf_program__size(const struct bpf_program *prog)
8330 {
8331         return prog->insns_cnt * BPF_INSN_SZ;
8332 }
8333
8334 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8335                           bpf_program_prep_t prep)
8336 {
8337         int *instances_fds;
8338
8339         if (nr_instances <= 0 || !prep)
8340                 return -EINVAL;
8341
8342         if (prog->instances.nr > 0 || prog->instances.fds) {
8343                 pr_warn("Can't set pre-processor after loading\n");
8344                 return -EINVAL;
8345         }
8346
8347         instances_fds = malloc(sizeof(int) * nr_instances);
8348         if (!instances_fds) {
8349                 pr_warn("alloc memory failed for fds\n");
8350                 return -ENOMEM;
8351         }
8352
8353         /* fill all fd with -1 */
8354         memset(instances_fds, -1, sizeof(int) * nr_instances);
8355
8356         prog->instances.nr = nr_instances;
8357         prog->instances.fds = instances_fds;
8358         prog->preprocessor = prep;
8359         return 0;
8360 }
8361
8362 int bpf_program__nth_fd(const struct bpf_program *prog, int n)
8363 {
8364         int fd;
8365
8366         if (!prog)
8367                 return -EINVAL;
8368
8369         if (n >= prog->instances.nr || n < 0) {
8370                 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8371                         n, prog->name, prog->instances.nr);
8372                 return -EINVAL;
8373         }
8374
8375         fd = prog->instances.fds[n];
8376         if (fd < 0) {
8377                 pr_warn("%dth instance of program '%s' is invalid\n",
8378                         n, prog->name);
8379                 return -ENOENT;
8380         }
8381
8382         return fd;
8383 }
8384
8385 enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
8386 {
8387         return prog->type;
8388 }
8389
8390 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8391 {
8392         prog->type = type;
8393 }
8394
8395 static bool bpf_program__is_type(const struct bpf_program *prog,
8396                                  enum bpf_prog_type type)
8397 {
8398         return prog ? (prog->type == type) : false;
8399 }
8400
8401 #define BPF_PROG_TYPE_FNS(NAME, TYPE)                           \
8402 int bpf_program__set_##NAME(struct bpf_program *prog)           \
8403 {                                                               \
8404         if (!prog)                                              \
8405                 return -EINVAL;                                 \
8406         bpf_program__set_type(prog, TYPE);                      \
8407         return 0;                                               \
8408 }                                                               \
8409                                                                 \
8410 bool bpf_program__is_##NAME(const struct bpf_program *prog)     \
8411 {                                                               \
8412         return bpf_program__is_type(prog, TYPE);                \
8413 }                                                               \
8414
8415 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8416 BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8417 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8418 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8419 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8420 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8421 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8422 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8423 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8424 BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8425 BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8426 BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8427 BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8428
8429 enum bpf_attach_type
8430 bpf_program__get_expected_attach_type(struct bpf_program *prog)
8431 {
8432         return prog->expected_attach_type;
8433 }
8434
8435 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8436                                            enum bpf_attach_type type)
8437 {
8438         prog->expected_attach_type = type;
8439 }
8440
8441 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional,           \
8442                           attachable, attach_btf)                           \
8443         {                                                                   \
8444                 .sec = string,                                              \
8445                 .len = sizeof(string) - 1,                                  \
8446                 .prog_type = ptype,                                         \
8447                 .expected_attach_type = eatype,                             \
8448                 .is_exp_attach_type_optional = eatype_optional,             \
8449                 .is_attachable = attachable,                                \
8450                 .is_attach_btf = attach_btf,                                \
8451         }
8452
8453 /* Programs that can NOT be attached. */
8454 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
8455
8456 /* Programs that can be attached. */
8457 #define BPF_APROG_SEC(string, ptype, atype) \
8458         BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
8459
8460 /* Programs that must specify expected attach type at load time. */
8461 #define BPF_EAPROG_SEC(string, ptype, eatype) \
8462         BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
8463
8464 /* Programs that use BTF to identify attach point */
8465 #define BPF_PROG_BTF(string, ptype, eatype) \
8466         BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
8467
8468 /* Programs that can be attached but attach type can't be identified by section
8469  * name. Kept for backward compatibility.
8470  */
8471 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
8472
8473 #define SEC_DEF(sec_pfx, ptype, ...) {                                      \
8474         .sec = sec_pfx,                                                     \
8475         .len = sizeof(sec_pfx) - 1,                                         \
8476         .prog_type = BPF_PROG_TYPE_##ptype,                                 \
8477         __VA_ARGS__                                                         \
8478 }
8479
8480 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
8481                                       struct bpf_program *prog);
8482 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
8483                                   struct bpf_program *prog);
8484 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
8485                                       struct bpf_program *prog);
8486 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
8487                                      struct bpf_program *prog);
8488 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
8489                                    struct bpf_program *prog);
8490 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
8491                                     struct bpf_program *prog);
8492
8493 static const struct bpf_sec_def section_defs[] = {
8494         BPF_PROG_SEC("socket",                  BPF_PROG_TYPE_SOCKET_FILTER),
8495         BPF_PROG_SEC("sk_reuseport",            BPF_PROG_TYPE_SK_REUSEPORT),
8496         SEC_DEF("kprobe/", KPROBE,
8497                 .attach_fn = attach_kprobe),
8498         BPF_PROG_SEC("uprobe/",                 BPF_PROG_TYPE_KPROBE),
8499         SEC_DEF("kretprobe/", KPROBE,
8500                 .attach_fn = attach_kprobe),
8501         BPF_PROG_SEC("uretprobe/",              BPF_PROG_TYPE_KPROBE),
8502         BPF_PROG_SEC("classifier",              BPF_PROG_TYPE_SCHED_CLS),
8503         BPF_PROG_SEC("action",                  BPF_PROG_TYPE_SCHED_ACT),
8504         SEC_DEF("tracepoint/", TRACEPOINT,
8505                 .attach_fn = attach_tp),
8506         SEC_DEF("tp/", TRACEPOINT,
8507                 .attach_fn = attach_tp),
8508         SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
8509                 .attach_fn = attach_raw_tp),
8510         SEC_DEF("raw_tp/", RAW_TRACEPOINT,
8511                 .attach_fn = attach_raw_tp),
8512         SEC_DEF("tp_btf/", TRACING,
8513                 .expected_attach_type = BPF_TRACE_RAW_TP,
8514                 .is_attach_btf = true,
8515                 .attach_fn = attach_trace),
8516         SEC_DEF("fentry/", TRACING,
8517                 .expected_attach_type = BPF_TRACE_FENTRY,
8518                 .is_attach_btf = true,
8519                 .attach_fn = attach_trace),
8520         SEC_DEF("fmod_ret/", TRACING,
8521                 .expected_attach_type = BPF_MODIFY_RETURN,
8522                 .is_attach_btf = true,
8523                 .attach_fn = attach_trace),
8524         SEC_DEF("fexit/", TRACING,
8525                 .expected_attach_type = BPF_TRACE_FEXIT,
8526                 .is_attach_btf = true,
8527                 .attach_fn = attach_trace),
8528         SEC_DEF("fentry.s/", TRACING,
8529                 .expected_attach_type = BPF_TRACE_FENTRY,
8530                 .is_attach_btf = true,
8531                 .is_sleepable = true,
8532                 .attach_fn = attach_trace),
8533         SEC_DEF("fmod_ret.s/", TRACING,
8534                 .expected_attach_type = BPF_MODIFY_RETURN,
8535                 .is_attach_btf = true,
8536                 .is_sleepable = true,
8537                 .attach_fn = attach_trace),
8538         SEC_DEF("fexit.s/", TRACING,
8539                 .expected_attach_type = BPF_TRACE_FEXIT,
8540                 .is_attach_btf = true,
8541                 .is_sleepable = true,
8542                 .attach_fn = attach_trace),
8543         SEC_DEF("freplace/", EXT,
8544                 .is_attach_btf = true,
8545                 .attach_fn = attach_trace),
8546         SEC_DEF("lsm/", LSM,
8547                 .is_attach_btf = true,
8548                 .expected_attach_type = BPF_LSM_MAC,
8549                 .attach_fn = attach_lsm),
8550         SEC_DEF("lsm.s/", LSM,
8551                 .is_attach_btf = true,
8552                 .is_sleepable = true,
8553                 .expected_attach_type = BPF_LSM_MAC,
8554                 .attach_fn = attach_lsm),
8555         SEC_DEF("iter/", TRACING,
8556                 .expected_attach_type = BPF_TRACE_ITER,
8557                 .is_attach_btf = true,
8558                 .attach_fn = attach_iter),
8559         BPF_EAPROG_SEC("xdp_devmap/",           BPF_PROG_TYPE_XDP,
8560                                                 BPF_XDP_DEVMAP),
8561         BPF_EAPROG_SEC("xdp_cpumap/",           BPF_PROG_TYPE_XDP,
8562                                                 BPF_XDP_CPUMAP),
8563         BPF_APROG_SEC("xdp",                    BPF_PROG_TYPE_XDP,
8564                                                 BPF_XDP),
8565         BPF_PROG_SEC("perf_event",              BPF_PROG_TYPE_PERF_EVENT),
8566         BPF_PROG_SEC("lwt_in",                  BPF_PROG_TYPE_LWT_IN),
8567         BPF_PROG_SEC("lwt_out",                 BPF_PROG_TYPE_LWT_OUT),
8568         BPF_PROG_SEC("lwt_xmit",                BPF_PROG_TYPE_LWT_XMIT),
8569         BPF_PROG_SEC("lwt_seg6local",           BPF_PROG_TYPE_LWT_SEG6LOCAL),
8570         BPF_APROG_SEC("cgroup_skb/ingress",     BPF_PROG_TYPE_CGROUP_SKB,
8571                                                 BPF_CGROUP_INET_INGRESS),
8572         BPF_APROG_SEC("cgroup_skb/egress",      BPF_PROG_TYPE_CGROUP_SKB,
8573                                                 BPF_CGROUP_INET_EGRESS),
8574         BPF_APROG_COMPAT("cgroup/skb",          BPF_PROG_TYPE_CGROUP_SKB),
8575         BPF_EAPROG_SEC("cgroup/sock_create",    BPF_PROG_TYPE_CGROUP_SOCK,
8576                                                 BPF_CGROUP_INET_SOCK_CREATE),
8577         BPF_EAPROG_SEC("cgroup/sock_release",   BPF_PROG_TYPE_CGROUP_SOCK,
8578                                                 BPF_CGROUP_INET_SOCK_RELEASE),
8579         BPF_APROG_SEC("cgroup/sock",            BPF_PROG_TYPE_CGROUP_SOCK,
8580                                                 BPF_CGROUP_INET_SOCK_CREATE),
8581         BPF_EAPROG_SEC("cgroup/post_bind4",     BPF_PROG_TYPE_CGROUP_SOCK,
8582                                                 BPF_CGROUP_INET4_POST_BIND),
8583         BPF_EAPROG_SEC("cgroup/post_bind6",     BPF_PROG_TYPE_CGROUP_SOCK,
8584                                                 BPF_CGROUP_INET6_POST_BIND),
8585         BPF_APROG_SEC("cgroup/dev",             BPF_PROG_TYPE_CGROUP_DEVICE,
8586                                                 BPF_CGROUP_DEVICE),
8587         BPF_APROG_SEC("sockops",                BPF_PROG_TYPE_SOCK_OPS,
8588                                                 BPF_CGROUP_SOCK_OPS),
8589         BPF_APROG_SEC("sk_skb/stream_parser",   BPF_PROG_TYPE_SK_SKB,
8590                                                 BPF_SK_SKB_STREAM_PARSER),
8591         BPF_APROG_SEC("sk_skb/stream_verdict",  BPF_PROG_TYPE_SK_SKB,
8592                                                 BPF_SK_SKB_STREAM_VERDICT),
8593         BPF_APROG_COMPAT("sk_skb",              BPF_PROG_TYPE_SK_SKB),
8594         BPF_APROG_SEC("sk_msg",                 BPF_PROG_TYPE_SK_MSG,
8595                                                 BPF_SK_MSG_VERDICT),
8596         BPF_APROG_SEC("lirc_mode2",             BPF_PROG_TYPE_LIRC_MODE2,
8597                                                 BPF_LIRC_MODE2),
8598         BPF_APROG_SEC("flow_dissector",         BPF_PROG_TYPE_FLOW_DISSECTOR,
8599                                                 BPF_FLOW_DISSECTOR),
8600         BPF_EAPROG_SEC("cgroup/bind4",          BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8601                                                 BPF_CGROUP_INET4_BIND),
8602         BPF_EAPROG_SEC("cgroup/bind6",          BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8603                                                 BPF_CGROUP_INET6_BIND),
8604         BPF_EAPROG_SEC("cgroup/connect4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8605                                                 BPF_CGROUP_INET4_CONNECT),
8606         BPF_EAPROG_SEC("cgroup/connect6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8607                                                 BPF_CGROUP_INET6_CONNECT),
8608         BPF_EAPROG_SEC("cgroup/sendmsg4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8609                                                 BPF_CGROUP_UDP4_SENDMSG),
8610         BPF_EAPROG_SEC("cgroup/sendmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8611                                                 BPF_CGROUP_UDP6_SENDMSG),
8612         BPF_EAPROG_SEC("cgroup/recvmsg4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8613                                                 BPF_CGROUP_UDP4_RECVMSG),
8614         BPF_EAPROG_SEC("cgroup/recvmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8615                                                 BPF_CGROUP_UDP6_RECVMSG),
8616         BPF_EAPROG_SEC("cgroup/getpeername4",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8617                                                 BPF_CGROUP_INET4_GETPEERNAME),
8618         BPF_EAPROG_SEC("cgroup/getpeername6",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8619                                                 BPF_CGROUP_INET6_GETPEERNAME),
8620         BPF_EAPROG_SEC("cgroup/getsockname4",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8621                                                 BPF_CGROUP_INET4_GETSOCKNAME),
8622         BPF_EAPROG_SEC("cgroup/getsockname6",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8623                                                 BPF_CGROUP_INET6_GETSOCKNAME),
8624         BPF_EAPROG_SEC("cgroup/sysctl",         BPF_PROG_TYPE_CGROUP_SYSCTL,
8625                                                 BPF_CGROUP_SYSCTL),
8626         BPF_EAPROG_SEC("cgroup/getsockopt",     BPF_PROG_TYPE_CGROUP_SOCKOPT,
8627                                                 BPF_CGROUP_GETSOCKOPT),
8628         BPF_EAPROG_SEC("cgroup/setsockopt",     BPF_PROG_TYPE_CGROUP_SOCKOPT,
8629                                                 BPF_CGROUP_SETSOCKOPT),
8630         BPF_PROG_SEC("struct_ops",              BPF_PROG_TYPE_STRUCT_OPS),
8631         BPF_EAPROG_SEC("sk_lookup/",            BPF_PROG_TYPE_SK_LOOKUP,
8632                                                 BPF_SK_LOOKUP),
8633 };
8634
8635 #undef BPF_PROG_SEC_IMPL
8636 #undef BPF_PROG_SEC
8637 #undef BPF_APROG_SEC
8638 #undef BPF_EAPROG_SEC
8639 #undef BPF_APROG_COMPAT
8640 #undef SEC_DEF
8641
8642 #define MAX_TYPE_NAME_SIZE 32
8643
8644 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8645 {
8646         int i, n = ARRAY_SIZE(section_defs);
8647
8648         for (i = 0; i < n; i++) {
8649                 if (strncmp(sec_name,
8650                             section_defs[i].sec, section_defs[i].len))
8651                         continue;
8652                 return &section_defs[i];
8653         }
8654         return NULL;
8655 }
8656
8657 static char *libbpf_get_type_names(bool attach_type)
8658 {
8659         int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8660         char *buf;
8661
8662         buf = malloc(len);
8663         if (!buf)
8664                 return NULL;
8665
8666         buf[0] = '\0';
8667         /* Forge string buf with all available names */
8668         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8669                 if (attach_type && !section_defs[i].is_attachable)
8670                         continue;
8671
8672                 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8673                         free(buf);
8674                         return NULL;
8675                 }
8676                 strcat(buf, " ");
8677                 strcat(buf, section_defs[i].sec);
8678         }
8679
8680         return buf;
8681 }
8682
8683 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8684                              enum bpf_attach_type *expected_attach_type)
8685 {
8686         const struct bpf_sec_def *sec_def;
8687         char *type_names;
8688
8689         if (!name)
8690                 return -EINVAL;
8691
8692         sec_def = find_sec_def(name);
8693         if (sec_def) {
8694                 *prog_type = sec_def->prog_type;
8695                 *expected_attach_type = sec_def->expected_attach_type;
8696                 return 0;
8697         }
8698
8699         pr_debug("failed to guess program type from ELF section '%s'\n", name);
8700         type_names = libbpf_get_type_names(false);
8701         if (type_names != NULL) {
8702                 pr_debug("supported section(type) names are:%s\n", type_names);
8703                 free(type_names);
8704         }
8705
8706         return -ESRCH;
8707 }
8708
8709 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8710                                                      size_t offset)
8711 {
8712         struct bpf_map *map;
8713         size_t i;
8714
8715         for (i = 0; i < obj->nr_maps; i++) {
8716                 map = &obj->maps[i];
8717                 if (!bpf_map__is_struct_ops(map))
8718                         continue;
8719                 if (map->sec_offset <= offset &&
8720                     offset - map->sec_offset < map->def.value_size)
8721                         return map;
8722         }
8723
8724         return NULL;
8725 }
8726
8727 /* Collect the reloc from ELF and populate the st_ops->progs[] */
8728 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8729                                             GElf_Shdr *shdr, Elf_Data *data)
8730 {
8731         const struct btf_member *member;
8732         struct bpf_struct_ops *st_ops;
8733         struct bpf_program *prog;
8734         unsigned int shdr_idx;
8735         const struct btf *btf;
8736         struct bpf_map *map;
8737         Elf_Data *symbols;
8738         unsigned int moff, insn_idx;
8739         const char *name;
8740         __u32 member_idx;
8741         GElf_Sym sym;
8742         GElf_Rel rel;
8743         int i, nrels;
8744
8745         symbols = obj->efile.symbols;
8746         btf = obj->btf;
8747         nrels = shdr->sh_size / shdr->sh_entsize;
8748         for (i = 0; i < nrels; i++) {
8749                 if (!gelf_getrel(data, i, &rel)) {
8750                         pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8751                         return -LIBBPF_ERRNO__FORMAT;
8752                 }
8753
8754                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
8755                         pr_warn("struct_ops reloc: symbol %zx not found\n",
8756                                 (size_t)GELF_R_SYM(rel.r_info));
8757                         return -LIBBPF_ERRNO__FORMAT;
8758                 }
8759
8760                 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
8761                 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8762                 if (!map) {
8763                         pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
8764                                 (size_t)rel.r_offset);
8765                         return -EINVAL;
8766                 }
8767
8768                 moff = rel.r_offset - map->sec_offset;
8769                 shdr_idx = sym.st_shndx;
8770                 st_ops = map->st_ops;
8771                 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8772                          map->name,
8773                          (long long)(rel.r_info >> 32),
8774                          (long long)sym.st_value,
8775                          shdr_idx, (size_t)rel.r_offset,
8776                          map->sec_offset, sym.st_name, name);
8777
8778                 if (shdr_idx >= SHN_LORESERVE) {
8779                         pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
8780                                 map->name, (size_t)rel.r_offset, shdr_idx);
8781                         return -LIBBPF_ERRNO__RELOC;
8782                 }
8783                 if (sym.st_value % BPF_INSN_SZ) {
8784                         pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8785                                 map->name, (unsigned long long)sym.st_value);
8786                         return -LIBBPF_ERRNO__FORMAT;
8787                 }
8788                 insn_idx = sym.st_value / BPF_INSN_SZ;
8789
8790                 member = find_member_by_offset(st_ops->type, moff * 8);
8791                 if (!member) {
8792                         pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8793                                 map->name, moff);
8794                         return -EINVAL;
8795                 }
8796                 member_idx = member - btf_members(st_ops->type);
8797                 name = btf__name_by_offset(btf, member->name_off);
8798
8799                 if (!resolve_func_ptr(btf, member->type, NULL)) {
8800                         pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8801                                 map->name, name);
8802                         return -EINVAL;
8803                 }
8804
8805                 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8806                 if (!prog) {
8807                         pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8808                                 map->name, shdr_idx, name);
8809                         return -EINVAL;
8810                 }
8811
8812                 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
8813                         const struct bpf_sec_def *sec_def;
8814
8815                         sec_def = find_sec_def(prog->sec_name);
8816                         if (sec_def &&
8817                             sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
8818                                 /* for pr_warn */
8819                                 prog->type = sec_def->prog_type;
8820                                 goto invalid_prog;
8821                         }
8822
8823                         prog->type = BPF_PROG_TYPE_STRUCT_OPS;
8824                         prog->attach_btf_id = st_ops->type_id;
8825                         prog->expected_attach_type = member_idx;
8826                 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
8827                            prog->attach_btf_id != st_ops->type_id ||
8828                            prog->expected_attach_type != member_idx) {
8829                         goto invalid_prog;
8830                 }
8831                 st_ops->progs[member_idx] = prog;
8832         }
8833
8834         return 0;
8835
8836 invalid_prog:
8837         pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8838                 map->name, prog->name, prog->sec_name, prog->type,
8839                 prog->attach_btf_id, prog->expected_attach_type, name);
8840         return -EINVAL;
8841 }
8842
8843 #define BTF_TRACE_PREFIX "btf_trace_"
8844 #define BTF_LSM_PREFIX "bpf_lsm_"
8845 #define BTF_ITER_PREFIX "bpf_iter_"
8846 #define BTF_MAX_NAME_SIZE 128
8847
8848 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8849                                    const char *name, __u32 kind)
8850 {
8851         char btf_type_name[BTF_MAX_NAME_SIZE];
8852         int ret;
8853
8854         ret = snprintf(btf_type_name, sizeof(btf_type_name),
8855                        "%s%s", prefix, name);
8856         /* snprintf returns the number of characters written excluding the
8857          * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
8858          * indicates truncation.
8859          */
8860         if (ret < 0 || ret >= sizeof(btf_type_name))
8861                 return -ENAMETOOLONG;
8862         return btf__find_by_name_kind(btf, btf_type_name, kind);
8863 }
8864
8865 static inline int find_attach_btf_id(struct btf *btf, const char *name,
8866                                      enum bpf_attach_type attach_type)
8867 {
8868         int err;
8869
8870         if (attach_type == BPF_TRACE_RAW_TP)
8871                 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
8872                                               BTF_KIND_TYPEDEF);
8873         else if (attach_type == BPF_LSM_MAC)
8874                 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
8875                                               BTF_KIND_FUNC);
8876         else if (attach_type == BPF_TRACE_ITER)
8877                 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
8878                                               BTF_KIND_FUNC);
8879         else
8880                 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8881
8882         return err;
8883 }
8884
8885 int libbpf_find_vmlinux_btf_id(const char *name,
8886                                enum bpf_attach_type attach_type)
8887 {
8888         struct btf *btf;
8889         int err;
8890
8891         btf = libbpf_find_kernel_btf();
8892         if (IS_ERR(btf)) {
8893                 pr_warn("vmlinux BTF is not found\n");
8894                 return -EINVAL;
8895         }
8896
8897         err = find_attach_btf_id(btf, name, attach_type);
8898         if (err <= 0)
8899                 pr_warn("%s is not found in vmlinux BTF\n", name);
8900
8901         btf__free(btf);
8902         return err;
8903 }
8904
8905 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8906 {
8907         struct bpf_prog_info_linear *info_linear;
8908         struct bpf_prog_info *info;
8909         struct btf *btf = NULL;
8910         int err = -EINVAL;
8911
8912         info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
8913         if (IS_ERR_OR_NULL(info_linear)) {
8914                 pr_warn("failed get_prog_info_linear for FD %d\n",
8915                         attach_prog_fd);
8916                 return -EINVAL;
8917         }
8918         info = &info_linear->info;
8919         if (!info->btf_id) {
8920                 pr_warn("The target program doesn't have BTF\n");
8921                 goto out;
8922         }
8923         if (btf__get_from_id(info->btf_id, &btf)) {
8924                 pr_warn("Failed to get BTF of the program\n");
8925                 goto out;
8926         }
8927         err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8928         btf__free(btf);
8929         if (err <= 0) {
8930                 pr_warn("%s is not found in prog's BTF\n", name);
8931                 goto out;
8932         }
8933 out:
8934         free(info_linear);
8935         return err;
8936 }
8937
8938 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
8939                               enum bpf_attach_type attach_type,
8940                               int *btf_obj_fd, int *btf_type_id)
8941 {
8942         int ret, i;
8943
8944         ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
8945         if (ret > 0) {
8946                 *btf_obj_fd = 0; /* vmlinux BTF */
8947                 *btf_type_id = ret;
8948                 return 0;
8949         }
8950         if (ret != -ENOENT)
8951                 return ret;
8952
8953         ret = load_module_btfs(obj);
8954         if (ret)
8955                 return ret;
8956
8957         for (i = 0; i < obj->btf_module_cnt; i++) {
8958                 const struct module_btf *mod = &obj->btf_modules[i];
8959
8960                 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
8961                 if (ret > 0) {
8962                         *btf_obj_fd = mod->fd;
8963                         *btf_type_id = ret;
8964                         return 0;
8965                 }
8966                 if (ret == -ENOENT)
8967                         continue;
8968
8969                 return ret;
8970         }
8971
8972         return -ESRCH;
8973 }
8974
8975 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
8976 {
8977         enum bpf_attach_type attach_type = prog->expected_attach_type;
8978         __u32 attach_prog_fd = prog->attach_prog_fd;
8979         const char *name = prog->sec_name, *attach_name;
8980         const struct bpf_sec_def *sec = NULL;
8981         int i, err;
8982
8983         if (!name)
8984                 return -EINVAL;
8985
8986         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8987                 if (!section_defs[i].is_attach_btf)
8988                         continue;
8989                 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
8990                         continue;
8991
8992                 sec = &section_defs[i];
8993                 break;
8994         }
8995
8996         if (!sec) {
8997                 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
8998                 return -ESRCH;
8999         }
9000         attach_name = name + sec->len;
9001
9002         /* BPF program's BTF ID */
9003         if (attach_prog_fd) {
9004                 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9005                 if (err < 0) {
9006                         pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9007                                  attach_prog_fd, attach_name, err);
9008                         return err;
9009                 }
9010                 *btf_obj_fd = 0;
9011                 *btf_type_id = err;
9012                 return 0;
9013         }
9014
9015         /* kernel/module BTF ID */
9016         err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9017         if (err) {
9018                 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9019                 return err;
9020         }
9021         return 0;
9022 }
9023
9024 int libbpf_attach_type_by_name(const char *name,
9025                                enum bpf_attach_type *attach_type)
9026 {
9027         char *type_names;
9028         int i;
9029
9030         if (!name)
9031                 return -EINVAL;
9032
9033         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9034                 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9035                         continue;
9036                 if (!section_defs[i].is_attachable)
9037                         return -EINVAL;
9038                 *attach_type = section_defs[i].expected_attach_type;
9039                 return 0;
9040         }
9041         pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9042         type_names = libbpf_get_type_names(true);
9043         if (type_names != NULL) {
9044                 pr_debug("attachable section(type) names are:%s\n", type_names);
9045                 free(type_names);
9046         }
9047
9048         return -EINVAL;
9049 }
9050
9051 int bpf_map__fd(const struct bpf_map *map)
9052 {
9053         return map ? map->fd : -EINVAL;
9054 }
9055
9056 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9057 {
9058         return map ? &map->def : ERR_PTR(-EINVAL);
9059 }
9060
9061 const char *bpf_map__name(const struct bpf_map *map)
9062 {
9063         return map ? map->name : NULL;
9064 }
9065
9066 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9067 {
9068         return map->def.type;
9069 }
9070
9071 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9072 {
9073         if (map->fd >= 0)
9074                 return -EBUSY;
9075         map->def.type = type;
9076         return 0;
9077 }
9078
9079 __u32 bpf_map__map_flags(const struct bpf_map *map)
9080 {
9081         return map->def.map_flags;
9082 }
9083
9084 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9085 {
9086         if (map->fd >= 0)
9087                 return -EBUSY;
9088         map->def.map_flags = flags;
9089         return 0;
9090 }
9091
9092 __u32 bpf_map__numa_node(const struct bpf_map *map)
9093 {
9094         return map->numa_node;
9095 }
9096
9097 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9098 {
9099         if (map->fd >= 0)
9100                 return -EBUSY;
9101         map->numa_node = numa_node;
9102         return 0;
9103 }
9104
9105 __u32 bpf_map__key_size(const struct bpf_map *map)
9106 {
9107         return map->def.key_size;
9108 }
9109
9110 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9111 {
9112         if (map->fd >= 0)
9113                 return -EBUSY;
9114         map->def.key_size = size;
9115         return 0;
9116 }
9117
9118 __u32 bpf_map__value_size(const struct bpf_map *map)
9119 {
9120         return map->def.value_size;
9121 }
9122
9123 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9124 {
9125         if (map->fd >= 0)
9126                 return -EBUSY;
9127         map->def.value_size = size;
9128         return 0;
9129 }
9130
9131 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9132 {
9133         return map ? map->btf_key_type_id : 0;
9134 }
9135
9136 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9137 {
9138         return map ? map->btf_value_type_id : 0;
9139 }
9140
9141 int bpf_map__set_priv(struct bpf_map *map, void *priv,
9142                      bpf_map_clear_priv_t clear_priv)
9143 {
9144         if (!map)
9145                 return -EINVAL;
9146
9147         if (map->priv) {
9148                 if (map->clear_priv)
9149                         map->clear_priv(map, map->priv);
9150         }
9151
9152         map->priv = priv;
9153         map->clear_priv = clear_priv;
9154         return 0;
9155 }
9156
9157 void *bpf_map__priv(const struct bpf_map *map)
9158 {
9159         return map ? map->priv : ERR_PTR(-EINVAL);
9160 }
9161
9162 int bpf_map__set_initial_value(struct bpf_map *map,
9163                                const void *data, size_t size)
9164 {
9165         if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9166             size != map->def.value_size || map->fd >= 0)
9167                 return -EINVAL;
9168
9169         memcpy(map->mmaped, data, size);
9170         return 0;
9171 }
9172
9173 bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9174 {
9175         return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9176 }
9177
9178 bool bpf_map__is_internal(const struct bpf_map *map)
9179 {
9180         return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9181 }
9182
9183 __u32 bpf_map__ifindex(const struct bpf_map *map)
9184 {
9185         return map->map_ifindex;
9186 }
9187
9188 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9189 {
9190         if (map->fd >= 0)
9191                 return -EBUSY;
9192         map->map_ifindex = ifindex;
9193         return 0;
9194 }
9195
9196 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9197 {
9198         if (!bpf_map_type__is_map_in_map(map->def.type)) {
9199                 pr_warn("error: unsupported map type\n");
9200                 return -EINVAL;
9201         }
9202         if (map->inner_map_fd != -1) {
9203                 pr_warn("error: inner_map_fd already specified\n");
9204                 return -EINVAL;
9205         }
9206         map->inner_map_fd = fd;
9207         return 0;
9208 }
9209
9210 static struct bpf_map *
9211 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9212 {
9213         ssize_t idx;
9214         struct bpf_map *s, *e;
9215
9216         if (!obj || !obj->maps)
9217                 return NULL;
9218
9219         s = obj->maps;
9220         e = obj->maps + obj->nr_maps;
9221
9222         if ((m < s) || (m >= e)) {
9223                 pr_warn("error in %s: map handler doesn't belong to object\n",
9224                          __func__);
9225                 return NULL;
9226         }
9227
9228         idx = (m - obj->maps) + i;
9229         if (idx >= obj->nr_maps || idx < 0)
9230                 return NULL;
9231         return &obj->maps[idx];
9232 }
9233
9234 struct bpf_map *
9235 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9236 {
9237         if (prev == NULL)
9238                 return obj->maps;
9239
9240         return __bpf_map__iter(prev, obj, 1);
9241 }
9242
9243 struct bpf_map *
9244 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9245 {
9246         if (next == NULL) {
9247                 if (!obj->nr_maps)
9248                         return NULL;
9249                 return obj->maps + obj->nr_maps - 1;
9250         }
9251
9252         return __bpf_map__iter(next, obj, -1);
9253 }
9254
9255 struct bpf_map *
9256 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9257 {
9258         struct bpf_map *pos;
9259
9260         bpf_object__for_each_map(pos, obj) {
9261                 if (pos->name && !strcmp(pos->name, name))
9262                         return pos;
9263         }
9264         return NULL;
9265 }
9266
9267 int
9268 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9269 {
9270         return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9271 }
9272
9273 struct bpf_map *
9274 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9275 {
9276         return ERR_PTR(-ENOTSUP);
9277 }
9278
9279 long libbpf_get_error(const void *ptr)
9280 {
9281         return PTR_ERR_OR_ZERO(ptr);
9282 }
9283
9284 int bpf_prog_load(const char *file, enum bpf_prog_type type,
9285                   struct bpf_object **pobj, int *prog_fd)
9286 {
9287         struct bpf_prog_load_attr attr;
9288
9289         memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9290         attr.file = file;
9291         attr.prog_type = type;
9292         attr.expected_attach_type = 0;
9293
9294         return bpf_prog_load_xattr(&attr, pobj, prog_fd);
9295 }
9296
9297 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9298                         struct bpf_object **pobj, int *prog_fd)
9299 {
9300         struct bpf_object_open_attr open_attr = {};
9301         struct bpf_program *prog, *first_prog = NULL;
9302         struct bpf_object *obj;
9303         struct bpf_map *map;
9304         int err;
9305
9306         if (!attr)
9307                 return -EINVAL;
9308         if (!attr->file)
9309                 return -EINVAL;
9310
9311         open_attr.file = attr->file;
9312         open_attr.prog_type = attr->prog_type;
9313
9314         obj = bpf_object__open_xattr(&open_attr);
9315         if (IS_ERR_OR_NULL(obj))
9316                 return -ENOENT;
9317
9318         bpf_object__for_each_program(prog, obj) {
9319                 enum bpf_attach_type attach_type = attr->expected_attach_type;
9320                 /*
9321                  * to preserve backwards compatibility, bpf_prog_load treats
9322                  * attr->prog_type, if specified, as an override to whatever
9323                  * bpf_object__open guessed
9324                  */
9325                 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9326                         bpf_program__set_type(prog, attr->prog_type);
9327                         bpf_program__set_expected_attach_type(prog,
9328                                                               attach_type);
9329                 }
9330                 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9331                         /*
9332                          * we haven't guessed from section name and user
9333                          * didn't provide a fallback type, too bad...
9334                          */
9335                         bpf_object__close(obj);
9336                         return -EINVAL;
9337                 }
9338
9339                 prog->prog_ifindex = attr->ifindex;
9340                 prog->log_level = attr->log_level;
9341                 prog->prog_flags |= attr->prog_flags;
9342                 if (!first_prog)
9343                         first_prog = prog;
9344         }
9345
9346         bpf_object__for_each_map(map, obj) {
9347                 if (!bpf_map__is_offload_neutral(map))
9348                         map->map_ifindex = attr->ifindex;
9349         }
9350
9351         if (!first_prog) {
9352                 pr_warn("object file doesn't contain bpf program\n");
9353                 bpf_object__close(obj);
9354                 return -ENOENT;
9355         }
9356
9357         err = bpf_object__load(obj);
9358         if (err) {
9359                 bpf_object__close(obj);
9360                 return err;
9361         }
9362
9363         *pobj = obj;
9364         *prog_fd = bpf_program__fd(first_prog);
9365         return 0;
9366 }
9367
9368 struct bpf_link {
9369         int (*detach)(struct bpf_link *link);
9370         int (*destroy)(struct bpf_link *link);
9371         char *pin_path;         /* NULL, if not pinned */
9372         int fd;                 /* hook FD, -1 if not applicable */
9373         bool disconnected;
9374 };
9375
9376 /* Replace link's underlying BPF program with the new one */
9377 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9378 {
9379         return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9380 }
9381
9382 /* Release "ownership" of underlying BPF resource (typically, BPF program
9383  * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
9384  * link, when destructed through bpf_link__destroy() call won't attempt to
9385  * detach/unregisted that BPF resource. This is useful in situations where,
9386  * say, attached BPF program has to outlive userspace program that attached it
9387  * in the system. Depending on type of BPF program, though, there might be
9388  * additional steps (like pinning BPF program in BPF FS) necessary to ensure
9389  * exit of userspace program doesn't trigger automatic detachment and clean up
9390  * inside the kernel.
9391  */
9392 void bpf_link__disconnect(struct bpf_link *link)
9393 {
9394         link->disconnected = true;
9395 }
9396
9397 int bpf_link__destroy(struct bpf_link *link)
9398 {
9399         int err = 0;
9400
9401         if (IS_ERR_OR_NULL(link))
9402                 return 0;
9403
9404         if (!link->disconnected && link->detach)
9405                 err = link->detach(link);
9406         if (link->destroy)
9407                 link->destroy(link);
9408         if (link->pin_path)
9409                 free(link->pin_path);
9410         free(link);
9411
9412         return err;
9413 }
9414
9415 int bpf_link__fd(const struct bpf_link *link)
9416 {
9417         return link->fd;
9418 }
9419
9420 const char *bpf_link__pin_path(const struct bpf_link *link)
9421 {
9422         return link->pin_path;
9423 }
9424
9425 static int bpf_link__detach_fd(struct bpf_link *link)
9426 {
9427         return close(link->fd);
9428 }
9429
9430 struct bpf_link *bpf_link__open(const char *path)
9431 {
9432         struct bpf_link *link;
9433         int fd;
9434
9435         fd = bpf_obj_get(path);
9436         if (fd < 0) {
9437                 fd = -errno;
9438                 pr_warn("failed to open link at %s: %d\n", path, fd);
9439                 return ERR_PTR(fd);
9440         }
9441
9442         link = calloc(1, sizeof(*link));
9443         if (!link) {
9444                 close(fd);
9445                 return ERR_PTR(-ENOMEM);
9446         }
9447         link->detach = &bpf_link__detach_fd;
9448         link->fd = fd;
9449
9450         link->pin_path = strdup(path);
9451         if (!link->pin_path) {
9452                 bpf_link__destroy(link);
9453                 return ERR_PTR(-ENOMEM);
9454         }
9455
9456         return link;
9457 }
9458
9459 int bpf_link__detach(struct bpf_link *link)
9460 {
9461         return bpf_link_detach(link->fd) ? -errno : 0;
9462 }
9463
9464 int bpf_link__pin(struct bpf_link *link, const char *path)
9465 {
9466         int err;
9467
9468         if (link->pin_path)
9469                 return -EBUSY;
9470         err = make_parent_dir(path);
9471         if (err)
9472                 return err;
9473         err = check_path(path);
9474         if (err)
9475                 return err;
9476
9477         link->pin_path = strdup(path);
9478         if (!link->pin_path)
9479                 return -ENOMEM;
9480
9481         if (bpf_obj_pin(link->fd, link->pin_path)) {
9482                 err = -errno;
9483                 zfree(&link->pin_path);
9484                 return err;
9485         }
9486
9487         pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9488         return 0;
9489 }
9490
9491 int bpf_link__unpin(struct bpf_link *link)
9492 {
9493         int err;
9494
9495         if (!link->pin_path)
9496                 return -EINVAL;
9497
9498         err = unlink(link->pin_path);
9499         if (err != 0)
9500                 return -errno;
9501
9502         pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9503         zfree(&link->pin_path);
9504         return 0;
9505 }
9506
9507 static int bpf_link__detach_perf_event(struct bpf_link *link)
9508 {
9509         int err;
9510
9511         err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
9512         if (err)
9513                 err = -errno;
9514
9515         close(link->fd);
9516         return err;
9517 }
9518
9519 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
9520                                                 int pfd)
9521 {
9522         char errmsg[STRERR_BUFSIZE];
9523         struct bpf_link *link;
9524         int prog_fd, err;
9525
9526         if (pfd < 0) {
9527                 pr_warn("prog '%s': invalid perf event FD %d\n",
9528                         prog->name, pfd);
9529                 return ERR_PTR(-EINVAL);
9530         }
9531         prog_fd = bpf_program__fd(prog);
9532         if (prog_fd < 0) {
9533                 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9534                         prog->name);
9535                 return ERR_PTR(-EINVAL);
9536         }
9537
9538         link = calloc(1, sizeof(*link));
9539         if (!link)
9540                 return ERR_PTR(-ENOMEM);
9541         link->detach = &bpf_link__detach_perf_event;
9542         link->fd = pfd;
9543
9544         if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9545                 err = -errno;
9546                 free(link);
9547                 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
9548                         prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9549                 if (err == -EPROTO)
9550                         pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9551                                 prog->name, pfd);
9552                 return ERR_PTR(err);
9553         }
9554         if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9555                 err = -errno;
9556                 free(link);
9557                 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
9558                         prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9559                 return ERR_PTR(err);
9560         }
9561         return link;
9562 }
9563
9564 /*
9565  * this function is expected to parse integer in the range of [0, 2^31-1] from
9566  * given file using scanf format string fmt. If actual parsed value is
9567  * negative, the result might be indistinguishable from error
9568  */
9569 static int parse_uint_from_file(const char *file, const char *fmt)
9570 {
9571         char buf[STRERR_BUFSIZE];
9572         int err, ret;
9573         FILE *f;
9574
9575         f = fopen(file, "r");
9576         if (!f) {
9577                 err = -errno;
9578                 pr_debug("failed to open '%s': %s\n", file,
9579                          libbpf_strerror_r(err, buf, sizeof(buf)));
9580                 return err;
9581         }
9582         err = fscanf(f, fmt, &ret);
9583         if (err != 1) {
9584                 err = err == EOF ? -EIO : -errno;
9585                 pr_debug("failed to parse '%s': %s\n", file,
9586                         libbpf_strerror_r(err, buf, sizeof(buf)));
9587                 fclose(f);
9588                 return err;
9589         }
9590         fclose(f);
9591         return ret;
9592 }
9593
9594 static int determine_kprobe_perf_type(void)
9595 {
9596         const char *file = "/sys/bus/event_source/devices/kprobe/type";
9597
9598         return parse_uint_from_file(file, "%d\n");
9599 }
9600
9601 static int determine_uprobe_perf_type(void)
9602 {
9603         const char *file = "/sys/bus/event_source/devices/uprobe/type";
9604
9605         return parse_uint_from_file(file, "%d\n");
9606 }
9607
9608 static int determine_kprobe_retprobe_bit(void)
9609 {
9610         const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9611
9612         return parse_uint_from_file(file, "config:%d\n");
9613 }
9614
9615 static int determine_uprobe_retprobe_bit(void)
9616 {
9617         const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9618
9619         return parse_uint_from_file(file, "config:%d\n");
9620 }
9621
9622 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9623                                  uint64_t offset, int pid)
9624 {
9625         struct perf_event_attr attr = {};
9626         char errmsg[STRERR_BUFSIZE];
9627         int type, pfd, err;
9628
9629         type = uprobe ? determine_uprobe_perf_type()
9630                       : determine_kprobe_perf_type();
9631         if (type < 0) {
9632                 pr_warn("failed to determine %s perf type: %s\n",
9633                         uprobe ? "uprobe" : "kprobe",
9634                         libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9635                 return type;
9636         }
9637         if (retprobe) {
9638                 int bit = uprobe ? determine_uprobe_retprobe_bit()
9639                                  : determine_kprobe_retprobe_bit();
9640
9641                 if (bit < 0) {
9642                         pr_warn("failed to determine %s retprobe bit: %s\n",
9643                                 uprobe ? "uprobe" : "kprobe",
9644                                 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9645                         return bit;
9646                 }
9647                 attr.config |= 1 << bit;
9648         }
9649         attr.size = sizeof(attr);
9650         attr.type = type;
9651         attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
9652         attr.config2 = offset;           /* kprobe_addr or probe_offset */
9653
9654         /* pid filter is meaningful only for uprobes */
9655         pfd = syscall(__NR_perf_event_open, &attr,
9656                       pid < 0 ? -1 : pid /* pid */,
9657                       pid == -1 ? 0 : -1 /* cpu */,
9658                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9659         if (pfd < 0) {
9660                 err = -errno;
9661                 pr_warn("%s perf_event_open() failed: %s\n",
9662                         uprobe ? "uprobe" : "kprobe",
9663                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9664                 return err;
9665         }
9666         return pfd;
9667 }
9668
9669 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9670                                             bool retprobe,
9671                                             const char *func_name)
9672 {
9673         char errmsg[STRERR_BUFSIZE];
9674         struct bpf_link *link;
9675         int pfd, err;
9676
9677         pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
9678                                     0 /* offset */, -1 /* pid */);
9679         if (pfd < 0) {
9680                 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
9681                         prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9682                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9683                 return ERR_PTR(pfd);
9684         }
9685         link = bpf_program__attach_perf_event(prog, pfd);
9686         if (IS_ERR(link)) {
9687                 close(pfd);
9688                 err = PTR_ERR(link);
9689                 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
9690                         prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9691                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9692                 return link;
9693         }
9694         return link;
9695 }
9696
9697 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9698                                       struct bpf_program *prog)
9699 {
9700         const char *func_name;
9701         bool retprobe;
9702
9703         func_name = prog->sec_name + sec->len;
9704         retprobe = strcmp(sec->sec, "kretprobe/") == 0;
9705
9706         return bpf_program__attach_kprobe(prog, retprobe, func_name);
9707 }
9708
9709 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
9710                                             bool retprobe, pid_t pid,
9711                                             const char *binary_path,
9712                                             size_t func_offset)
9713 {
9714         char errmsg[STRERR_BUFSIZE];
9715         struct bpf_link *link;
9716         int pfd, err;
9717
9718         pfd = perf_event_open_probe(true /* uprobe */, retprobe,
9719                                     binary_path, func_offset, pid);
9720         if (pfd < 0) {
9721                 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
9722                         prog->name, retprobe ? "uretprobe" : "uprobe",
9723                         binary_path, func_offset,
9724                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9725                 return ERR_PTR(pfd);
9726         }
9727         link = bpf_program__attach_perf_event(prog, pfd);
9728         if (IS_ERR(link)) {
9729                 close(pfd);
9730                 err = PTR_ERR(link);
9731                 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
9732                         prog->name, retprobe ? "uretprobe" : "uprobe",
9733                         binary_path, func_offset,
9734                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9735                 return link;
9736         }
9737         return link;
9738 }
9739
9740 static int determine_tracepoint_id(const char *tp_category,
9741                                    const char *tp_name)
9742 {
9743         char file[PATH_MAX];
9744         int ret;
9745
9746         ret = snprintf(file, sizeof(file),
9747                        "/sys/kernel/debug/tracing/events/%s/%s/id",
9748                        tp_category, tp_name);
9749         if (ret < 0)
9750                 return -errno;
9751         if (ret >= sizeof(file)) {
9752                 pr_debug("tracepoint %s/%s path is too long\n",
9753                          tp_category, tp_name);
9754                 return -E2BIG;
9755         }
9756         return parse_uint_from_file(file, "%d\n");
9757 }
9758
9759 static int perf_event_open_tracepoint(const char *tp_category,
9760                                       const char *tp_name)
9761 {
9762         struct perf_event_attr attr = {};
9763         char errmsg[STRERR_BUFSIZE];
9764         int tp_id, pfd, err;
9765
9766         tp_id = determine_tracepoint_id(tp_category, tp_name);
9767         if (tp_id < 0) {
9768                 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
9769                         tp_category, tp_name,
9770                         libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
9771                 return tp_id;
9772         }
9773
9774         attr.type = PERF_TYPE_TRACEPOINT;
9775         attr.size = sizeof(attr);
9776         attr.config = tp_id;
9777
9778         pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
9779                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9780         if (pfd < 0) {
9781                 err = -errno;
9782                 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
9783                         tp_category, tp_name,
9784                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9785                 return err;
9786         }
9787         return pfd;
9788 }
9789
9790 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
9791                                                 const char *tp_category,
9792                                                 const char *tp_name)
9793 {
9794         char errmsg[STRERR_BUFSIZE];
9795         struct bpf_link *link;
9796         int pfd, err;
9797
9798         pfd = perf_event_open_tracepoint(tp_category, tp_name);
9799         if (pfd < 0) {
9800                 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
9801                         prog->name, tp_category, tp_name,
9802                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9803                 return ERR_PTR(pfd);
9804         }
9805         link = bpf_program__attach_perf_event(prog, pfd);
9806         if (IS_ERR(link)) {
9807                 close(pfd);
9808                 err = PTR_ERR(link);
9809                 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
9810                         prog->name, tp_category, tp_name,
9811                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9812                 return link;
9813         }
9814         return link;
9815 }
9816
9817 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9818                                   struct bpf_program *prog)
9819 {
9820         char *sec_name, *tp_cat, *tp_name;
9821         struct bpf_link *link;
9822
9823         sec_name = strdup(prog->sec_name);
9824         if (!sec_name)
9825                 return ERR_PTR(-ENOMEM);
9826
9827         /* extract "tp/<category>/<name>" */
9828         tp_cat = sec_name + sec->len;
9829         tp_name = strchr(tp_cat, '/');
9830         if (!tp_name) {
9831                 link = ERR_PTR(-EINVAL);
9832                 goto out;
9833         }
9834         *tp_name = '\0';
9835         tp_name++;
9836
9837         link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
9838 out:
9839         free(sec_name);
9840         return link;
9841 }
9842
9843 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
9844                                                     const char *tp_name)
9845 {
9846         char errmsg[STRERR_BUFSIZE];
9847         struct bpf_link *link;
9848         int prog_fd, pfd;
9849
9850         prog_fd = bpf_program__fd(prog);
9851         if (prog_fd < 0) {
9852                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9853                 return ERR_PTR(-EINVAL);
9854         }
9855
9856         link = calloc(1, sizeof(*link));
9857         if (!link)
9858                 return ERR_PTR(-ENOMEM);
9859         link->detach = &bpf_link__detach_fd;
9860
9861         pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
9862         if (pfd < 0) {
9863                 pfd = -errno;
9864                 free(link);
9865                 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
9866                         prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9867                 return ERR_PTR(pfd);
9868         }
9869         link->fd = pfd;
9870         return link;
9871 }
9872
9873 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9874                                       struct bpf_program *prog)
9875 {
9876         const char *tp_name = prog->sec_name + sec->len;
9877
9878         return bpf_program__attach_raw_tracepoint(prog, tp_name);
9879 }
9880
9881 /* Common logic for all BPF program types that attach to a btf_id */
9882 static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
9883 {
9884         char errmsg[STRERR_BUFSIZE];
9885         struct bpf_link *link;
9886         int prog_fd, pfd;
9887
9888         prog_fd = bpf_program__fd(prog);
9889         if (prog_fd < 0) {
9890                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9891                 return ERR_PTR(-EINVAL);
9892         }
9893
9894         link = calloc(1, sizeof(*link));
9895         if (!link)
9896                 return ERR_PTR(-ENOMEM);
9897         link->detach = &bpf_link__detach_fd;
9898
9899         pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
9900         if (pfd < 0) {
9901                 pfd = -errno;
9902                 free(link);
9903                 pr_warn("prog '%s': failed to attach: %s\n",
9904                         prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9905                 return ERR_PTR(pfd);
9906         }
9907         link->fd = pfd;
9908         return (struct bpf_link *)link;
9909 }
9910
9911 struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
9912 {
9913         return bpf_program__attach_btf_id(prog);
9914 }
9915
9916 struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
9917 {
9918         return bpf_program__attach_btf_id(prog);
9919 }
9920
9921 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
9922                                      struct bpf_program *prog)
9923 {
9924         return bpf_program__attach_trace(prog);
9925 }
9926
9927 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
9928                                    struct bpf_program *prog)
9929 {
9930         return bpf_program__attach_lsm(prog);
9931 }
9932
9933 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
9934                                     struct bpf_program *prog)
9935 {
9936         return bpf_program__attach_iter(prog, NULL);
9937 }
9938
9939 static struct bpf_link *
9940 bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
9941                        const char *target_name)
9942 {
9943         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
9944                             .target_btf_id = btf_id);
9945         enum bpf_attach_type attach_type;
9946         char errmsg[STRERR_BUFSIZE];
9947         struct bpf_link *link;
9948         int prog_fd, link_fd;
9949
9950         prog_fd = bpf_program__fd(prog);
9951         if (prog_fd < 0) {
9952                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9953                 return ERR_PTR(-EINVAL);
9954         }
9955
9956         link = calloc(1, sizeof(*link));
9957         if (!link)
9958                 return ERR_PTR(-ENOMEM);
9959         link->detach = &bpf_link__detach_fd;
9960
9961         attach_type = bpf_program__get_expected_attach_type(prog);
9962         link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
9963         if (link_fd < 0) {
9964                 link_fd = -errno;
9965                 free(link);
9966                 pr_warn("prog '%s': failed to attach to %s: %s\n",
9967                         prog->name, target_name,
9968                         libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9969                 return ERR_PTR(link_fd);
9970         }
9971         link->fd = link_fd;
9972         return link;
9973 }
9974
9975 struct bpf_link *
9976 bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
9977 {
9978         return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
9979 }
9980
9981 struct bpf_link *
9982 bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
9983 {
9984         return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
9985 }
9986
9987 struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
9988 {
9989         /* target_fd/target_ifindex use the same field in LINK_CREATE */
9990         return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
9991 }
9992
9993 struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
9994                                               int target_fd,
9995                                               const char *attach_func_name)
9996 {
9997         int btf_id;
9998
9999         if (!!target_fd != !!attach_func_name) {
10000                 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10001                         prog->name);
10002                 return ERR_PTR(-EINVAL);
10003         }
10004
10005         if (prog->type != BPF_PROG_TYPE_EXT) {
10006                 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10007                         prog->name);
10008                 return ERR_PTR(-EINVAL);
10009         }
10010
10011         if (target_fd) {
10012                 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10013                 if (btf_id < 0)
10014                         return ERR_PTR(btf_id);
10015
10016                 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10017         } else {
10018                 /* no target, so use raw_tracepoint_open for compatibility
10019                  * with old kernels
10020                  */
10021                 return bpf_program__attach_trace(prog);
10022         }
10023 }
10024
10025 struct bpf_link *
10026 bpf_program__attach_iter(struct bpf_program *prog,
10027                          const struct bpf_iter_attach_opts *opts)
10028 {
10029         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10030         char errmsg[STRERR_BUFSIZE];
10031         struct bpf_link *link;
10032         int prog_fd, link_fd;
10033         __u32 target_fd = 0;
10034
10035         if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10036                 return ERR_PTR(-EINVAL);
10037
10038         link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10039         link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10040
10041         prog_fd = bpf_program__fd(prog);
10042         if (prog_fd < 0) {
10043                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10044                 return ERR_PTR(-EINVAL);
10045         }
10046
10047         link = calloc(1, sizeof(*link));
10048         if (!link)
10049                 return ERR_PTR(-ENOMEM);
10050         link->detach = &bpf_link__detach_fd;
10051
10052         link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10053                                   &link_create_opts);
10054         if (link_fd < 0) {
10055                 link_fd = -errno;
10056                 free(link);
10057                 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10058                         prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10059                 return ERR_PTR(link_fd);
10060         }
10061         link->fd = link_fd;
10062         return link;
10063 }
10064
10065 struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10066 {
10067         const struct bpf_sec_def *sec_def;
10068
10069         sec_def = find_sec_def(prog->sec_name);
10070         if (!sec_def || !sec_def->attach_fn)
10071                 return ERR_PTR(-ESRCH);
10072
10073         return sec_def->attach_fn(sec_def, prog);
10074 }
10075
10076 static int bpf_link__detach_struct_ops(struct bpf_link *link)
10077 {
10078         __u32 zero = 0;
10079
10080         if (bpf_map_delete_elem(link->fd, &zero))
10081                 return -errno;
10082
10083         return 0;
10084 }
10085
10086 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10087 {
10088         struct bpf_struct_ops *st_ops;
10089         struct bpf_link *link;
10090         __u32 i, zero = 0;
10091         int err;
10092
10093         if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10094                 return ERR_PTR(-EINVAL);
10095
10096         link = calloc(1, sizeof(*link));
10097         if (!link)
10098                 return ERR_PTR(-EINVAL);
10099
10100         st_ops = map->st_ops;
10101         for (i = 0; i < btf_vlen(st_ops->type); i++) {
10102                 struct bpf_program *prog = st_ops->progs[i];
10103                 void *kern_data;
10104                 int prog_fd;
10105
10106                 if (!prog)
10107                         continue;
10108
10109                 prog_fd = bpf_program__fd(prog);
10110                 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10111                 *(unsigned long *)kern_data = prog_fd;
10112         }
10113
10114         err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10115         if (err) {
10116                 err = -errno;
10117                 free(link);
10118                 return ERR_PTR(err);
10119         }
10120
10121         link->detach = bpf_link__detach_struct_ops;
10122         link->fd = map->fd;
10123
10124         return link;
10125 }
10126
10127 enum bpf_perf_event_ret
10128 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10129                            void **copy_mem, size_t *copy_size,
10130                            bpf_perf_event_print_t fn, void *private_data)
10131 {
10132         struct perf_event_mmap_page *header = mmap_mem;
10133         __u64 data_head = ring_buffer_read_head(header);
10134         __u64 data_tail = header->data_tail;
10135         void *base = ((__u8 *)header) + page_size;
10136         int ret = LIBBPF_PERF_EVENT_CONT;
10137         struct perf_event_header *ehdr;
10138         size_t ehdr_size;
10139
10140         while (data_head != data_tail) {
10141                 ehdr = base + (data_tail & (mmap_size - 1));
10142                 ehdr_size = ehdr->size;
10143
10144                 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10145                         void *copy_start = ehdr;
10146                         size_t len_first = base + mmap_size - copy_start;
10147                         size_t len_secnd = ehdr_size - len_first;
10148
10149                         if (*copy_size < ehdr_size) {
10150                                 free(*copy_mem);
10151                                 *copy_mem = malloc(ehdr_size);
10152                                 if (!*copy_mem) {
10153                                         *copy_size = 0;
10154                                         ret = LIBBPF_PERF_EVENT_ERROR;
10155                                         break;
10156                                 }
10157                                 *copy_size = ehdr_size;
10158                         }
10159
10160                         memcpy(*copy_mem, copy_start, len_first);
10161                         memcpy(*copy_mem + len_first, base, len_secnd);
10162                         ehdr = *copy_mem;
10163                 }
10164
10165                 ret = fn(ehdr, private_data);
10166                 data_tail += ehdr_size;
10167                 if (ret != LIBBPF_PERF_EVENT_CONT)
10168                         break;
10169         }
10170
10171         ring_buffer_write_tail(header, data_tail);
10172         return ret;
10173 }
10174
10175 struct perf_buffer;
10176
10177 struct perf_buffer_params {
10178         struct perf_event_attr *attr;
10179         /* if event_cb is specified, it takes precendence */
10180         perf_buffer_event_fn event_cb;
10181         /* sample_cb and lost_cb are higher-level common-case callbacks */
10182         perf_buffer_sample_fn sample_cb;
10183         perf_buffer_lost_fn lost_cb;
10184         void *ctx;
10185         int cpu_cnt;
10186         int *cpus;
10187         int *map_keys;
10188 };
10189
10190 struct perf_cpu_buf {
10191         struct perf_buffer *pb;
10192         void *base; /* mmap()'ed memory */
10193         void *buf; /* for reconstructing segmented data */
10194         size_t buf_size;
10195         int fd;
10196         int cpu;
10197         int map_key;
10198 };
10199
10200 struct perf_buffer {
10201         perf_buffer_event_fn event_cb;
10202         perf_buffer_sample_fn sample_cb;
10203         perf_buffer_lost_fn lost_cb;
10204         void *ctx; /* passed into callbacks */
10205
10206         size_t page_size;
10207         size_t mmap_size;
10208         struct perf_cpu_buf **cpu_bufs;
10209         struct epoll_event *events;
10210         int cpu_cnt; /* number of allocated CPU buffers */
10211         int epoll_fd; /* perf event FD */
10212         int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
10213 };
10214
10215 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10216                                       struct perf_cpu_buf *cpu_buf)
10217 {
10218         if (!cpu_buf)
10219                 return;
10220         if (cpu_buf->base &&
10221             munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10222                 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10223         if (cpu_buf->fd >= 0) {
10224                 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10225                 close(cpu_buf->fd);
10226         }
10227         free(cpu_buf->buf);
10228         free(cpu_buf);
10229 }
10230
10231 void perf_buffer__free(struct perf_buffer *pb)
10232 {
10233         int i;
10234
10235         if (IS_ERR_OR_NULL(pb))
10236                 return;
10237         if (pb->cpu_bufs) {
10238                 for (i = 0; i < pb->cpu_cnt; i++) {
10239                         struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10240
10241                         if (!cpu_buf)
10242                                 continue;
10243
10244                         bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10245                         perf_buffer__free_cpu_buf(pb, cpu_buf);
10246                 }
10247                 free(pb->cpu_bufs);
10248         }
10249         if (pb->epoll_fd >= 0)
10250                 close(pb->epoll_fd);
10251         free(pb->events);
10252         free(pb);
10253 }
10254
10255 static struct perf_cpu_buf *
10256 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10257                           int cpu, int map_key)
10258 {
10259         struct perf_cpu_buf *cpu_buf;
10260         char msg[STRERR_BUFSIZE];
10261         int err;
10262
10263         cpu_buf = calloc(1, sizeof(*cpu_buf));
10264         if (!cpu_buf)
10265                 return ERR_PTR(-ENOMEM);
10266
10267         cpu_buf->pb = pb;
10268         cpu_buf->cpu = cpu;
10269         cpu_buf->map_key = map_key;
10270
10271         cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
10272                               -1, PERF_FLAG_FD_CLOEXEC);
10273         if (cpu_buf->fd < 0) {
10274                 err = -errno;
10275                 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10276                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10277                 goto error;
10278         }
10279
10280         cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10281                              PROT_READ | PROT_WRITE, MAP_SHARED,
10282                              cpu_buf->fd, 0);
10283         if (cpu_buf->base == MAP_FAILED) {
10284                 cpu_buf->base = NULL;
10285                 err = -errno;
10286                 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10287                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10288                 goto error;
10289         }
10290
10291         if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10292                 err = -errno;
10293                 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10294                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10295                 goto error;
10296         }
10297
10298         return cpu_buf;
10299
10300 error:
10301         perf_buffer__free_cpu_buf(pb, cpu_buf);
10302         return (struct perf_cpu_buf *)ERR_PTR(err);
10303 }
10304
10305 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10306                                               struct perf_buffer_params *p);
10307
10308 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
10309                                      const struct perf_buffer_opts *opts)
10310 {
10311         struct perf_buffer_params p = {};
10312         struct perf_event_attr attr = { 0, };
10313
10314         attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10315         attr.type = PERF_TYPE_SOFTWARE;
10316         attr.sample_type = PERF_SAMPLE_RAW;
10317         attr.sample_period = 1;
10318         attr.wakeup_events = 1;
10319
10320         p.attr = &attr;
10321         p.sample_cb = opts ? opts->sample_cb : NULL;
10322         p.lost_cb = opts ? opts->lost_cb : NULL;
10323         p.ctx = opts ? opts->ctx : NULL;
10324
10325         return __perf_buffer__new(map_fd, page_cnt, &p);
10326 }
10327
10328 struct perf_buffer *
10329 perf_buffer__new_raw(int map_fd, size_t page_cnt,
10330                      const struct perf_buffer_raw_opts *opts)
10331 {
10332         struct perf_buffer_params p = {};
10333
10334         p.attr = opts->attr;
10335         p.event_cb = opts->event_cb;
10336         p.ctx = opts->ctx;
10337         p.cpu_cnt = opts->cpu_cnt;
10338         p.cpus = opts->cpus;
10339         p.map_keys = opts->map_keys;
10340
10341         return __perf_buffer__new(map_fd, page_cnt, &p);
10342 }
10343
10344 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10345                                               struct perf_buffer_params *p)
10346 {
10347         const char *online_cpus_file = "/sys/devices/system/cpu/online";
10348         struct bpf_map_info map;
10349         char msg[STRERR_BUFSIZE];
10350         struct perf_buffer *pb;
10351         bool *online = NULL;
10352         __u32 map_info_len;
10353         int err, i, j, n;
10354
10355         if (page_cnt & (page_cnt - 1)) {
10356                 pr_warn("page count should be power of two, but is %zu\n",
10357                         page_cnt);
10358                 return ERR_PTR(-EINVAL);
10359         }
10360
10361         /* best-effort sanity checks */
10362         memset(&map, 0, sizeof(map));
10363         map_info_len = sizeof(map);
10364         err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10365         if (err) {
10366                 err = -errno;
10367                 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
10368                  * -EBADFD, -EFAULT, or -E2BIG on real error
10369                  */
10370                 if (err != -EINVAL) {
10371                         pr_warn("failed to get map info for map FD %d: %s\n",
10372                                 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10373                         return ERR_PTR(err);
10374                 }
10375                 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10376                          map_fd);
10377         } else {
10378                 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10379                         pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10380                                 map.name);
10381                         return ERR_PTR(-EINVAL);
10382                 }
10383         }
10384
10385         pb = calloc(1, sizeof(*pb));
10386         if (!pb)
10387                 return ERR_PTR(-ENOMEM);
10388
10389         pb->event_cb = p->event_cb;
10390         pb->sample_cb = p->sample_cb;
10391         pb->lost_cb = p->lost_cb;
10392         pb->ctx = p->ctx;
10393
10394         pb->page_size = getpagesize();
10395         pb->mmap_size = pb->page_size * page_cnt;
10396         pb->map_fd = map_fd;
10397
10398         pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10399         if (pb->epoll_fd < 0) {
10400                 err = -errno;
10401                 pr_warn("failed to create epoll instance: %s\n",
10402                         libbpf_strerror_r(err, msg, sizeof(msg)));
10403                 goto error;
10404         }
10405
10406         if (p->cpu_cnt > 0) {
10407                 pb->cpu_cnt = p->cpu_cnt;
10408         } else {
10409                 pb->cpu_cnt = libbpf_num_possible_cpus();
10410                 if (pb->cpu_cnt < 0) {
10411                         err = pb->cpu_cnt;
10412                         goto error;
10413                 }
10414                 if (map.max_entries && map.max_entries < pb->cpu_cnt)
10415                         pb->cpu_cnt = map.max_entries;
10416         }
10417
10418         pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10419         if (!pb->events) {
10420                 err = -ENOMEM;
10421                 pr_warn("failed to allocate events: out of memory\n");
10422                 goto error;
10423         }
10424         pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10425         if (!pb->cpu_bufs) {
10426                 err = -ENOMEM;
10427                 pr_warn("failed to allocate buffers: out of memory\n");
10428                 goto error;
10429         }
10430
10431         err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10432         if (err) {
10433                 pr_warn("failed to get online CPU mask: %d\n", err);
10434                 goto error;
10435         }
10436
10437         for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
10438                 struct perf_cpu_buf *cpu_buf;
10439                 int cpu, map_key;
10440
10441                 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10442                 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10443
10444                 /* in case user didn't explicitly requested particular CPUs to
10445                  * be attached to, skip offline/not present CPUs
10446                  */
10447                 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10448                         continue;
10449
10450                 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10451                 if (IS_ERR(cpu_buf)) {
10452                         err = PTR_ERR(cpu_buf);
10453                         goto error;
10454                 }
10455
10456                 pb->cpu_bufs[j] = cpu_buf;
10457
10458                 err = bpf_map_update_elem(pb->map_fd, &map_key,
10459                                           &cpu_buf->fd, 0);
10460                 if (err) {
10461                         err = -errno;
10462                         pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10463                                 cpu, map_key, cpu_buf->fd,
10464                                 libbpf_strerror_r(err, msg, sizeof(msg)));
10465                         goto error;
10466                 }
10467
10468                 pb->events[j].events = EPOLLIN;
10469                 pb->events[j].data.ptr = cpu_buf;
10470                 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
10471                               &pb->events[j]) < 0) {
10472                         err = -errno;
10473                         pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10474                                 cpu, cpu_buf->fd,
10475                                 libbpf_strerror_r(err, msg, sizeof(msg)));
10476                         goto error;
10477                 }
10478                 j++;
10479         }
10480         pb->cpu_cnt = j;
10481         free(online);
10482
10483         return pb;
10484
10485 error:
10486         free(online);
10487         if (pb)
10488                 perf_buffer__free(pb);
10489         return ERR_PTR(err);
10490 }
10491
10492 struct perf_sample_raw {
10493         struct perf_event_header header;
10494         uint32_t size;
10495         char data[];
10496 };
10497
10498 struct perf_sample_lost {
10499         struct perf_event_header header;
10500         uint64_t id;
10501         uint64_t lost;
10502         uint64_t sample_id;
10503 };
10504
10505 static enum bpf_perf_event_ret
10506 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10507 {
10508         struct perf_cpu_buf *cpu_buf = ctx;
10509         struct perf_buffer *pb = cpu_buf->pb;
10510         void *data = e;
10511
10512         /* user wants full control over parsing perf event */
10513         if (pb->event_cb)
10514                 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10515
10516         switch (e->type) {
10517         case PERF_RECORD_SAMPLE: {
10518                 struct perf_sample_raw *s = data;
10519
10520                 if (pb->sample_cb)
10521                         pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10522                 break;
10523         }
10524         case PERF_RECORD_LOST: {
10525                 struct perf_sample_lost *s = data;
10526
10527                 if (pb->lost_cb)
10528                         pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10529                 break;
10530         }
10531         default:
10532                 pr_warn("unknown perf sample type %d\n", e->type);
10533                 return LIBBPF_PERF_EVENT_ERROR;
10534         }
10535         return LIBBPF_PERF_EVENT_CONT;
10536 }
10537
10538 static int perf_buffer__process_records(struct perf_buffer *pb,
10539                                         struct perf_cpu_buf *cpu_buf)
10540 {
10541         enum bpf_perf_event_ret ret;
10542
10543         ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10544                                          pb->page_size, &cpu_buf->buf,
10545                                          &cpu_buf->buf_size,
10546                                          perf_buffer__process_record, cpu_buf);
10547         if (ret != LIBBPF_PERF_EVENT_CONT)
10548                 return ret;
10549         return 0;
10550 }
10551
10552 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10553 {
10554         return pb->epoll_fd;
10555 }
10556
10557 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10558 {
10559         int i, cnt, err;
10560
10561         cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10562         for (i = 0; i < cnt; i++) {
10563                 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10564
10565                 err = perf_buffer__process_records(pb, cpu_buf);
10566                 if (err) {
10567                         pr_warn("error while processing records: %d\n", err);
10568                         return err;
10569                 }
10570         }
10571         return cnt < 0 ? -errno : cnt;
10572 }
10573
10574 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10575  * manager.
10576  */
10577 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10578 {
10579         return pb->cpu_cnt;
10580 }
10581
10582 /*
10583  * Return perf_event FD of a ring buffer in *buf_idx* slot of
10584  * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10585  * select()/poll()/epoll() Linux syscalls.
10586  */
10587 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10588 {
10589         struct perf_cpu_buf *cpu_buf;
10590
10591         if (buf_idx >= pb->cpu_cnt)
10592                 return -EINVAL;
10593
10594         cpu_buf = pb->cpu_bufs[buf_idx];
10595         if (!cpu_buf)
10596                 return -ENOENT;
10597
10598         return cpu_buf->fd;
10599 }
10600
10601 /*
10602  * Consume data from perf ring buffer corresponding to slot *buf_idx* in
10603  * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10604  * consume, do nothing and return success.
10605  * Returns:
10606  *   - 0 on success;
10607  *   - <0 on failure.
10608  */
10609 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10610 {
10611         struct perf_cpu_buf *cpu_buf;
10612
10613         if (buf_idx >= pb->cpu_cnt)
10614                 return -EINVAL;
10615
10616         cpu_buf = pb->cpu_bufs[buf_idx];
10617         if (!cpu_buf)
10618                 return -ENOENT;
10619
10620         return perf_buffer__process_records(pb, cpu_buf);
10621 }
10622
10623 int perf_buffer__consume(struct perf_buffer *pb)
10624 {
10625         int i, err;
10626
10627         for (i = 0; i < pb->cpu_cnt; i++) {
10628                 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10629
10630                 if (!cpu_buf)
10631                         continue;
10632
10633                 err = perf_buffer__process_records(pb, cpu_buf);
10634                 if (err) {
10635                         pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
10636                         return err;
10637                 }
10638         }
10639         return 0;
10640 }
10641
10642 struct bpf_prog_info_array_desc {
10643         int     array_offset;   /* e.g. offset of jited_prog_insns */
10644         int     count_offset;   /* e.g. offset of jited_prog_len */
10645         int     size_offset;    /* > 0: offset of rec size,
10646                                  * < 0: fix size of -size_offset
10647                                  */
10648 };
10649
10650 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10651         [BPF_PROG_INFO_JITED_INSNS] = {
10652                 offsetof(struct bpf_prog_info, jited_prog_insns),
10653                 offsetof(struct bpf_prog_info, jited_prog_len),
10654                 -1,
10655         },
10656         [BPF_PROG_INFO_XLATED_INSNS] = {
10657                 offsetof(struct bpf_prog_info, xlated_prog_insns),
10658                 offsetof(struct bpf_prog_info, xlated_prog_len),
10659                 -1,
10660         },
10661         [BPF_PROG_INFO_MAP_IDS] = {
10662                 offsetof(struct bpf_prog_info, map_ids),
10663                 offsetof(struct bpf_prog_info, nr_map_ids),
10664                 -(int)sizeof(__u32),
10665         },
10666         [BPF_PROG_INFO_JITED_KSYMS] = {
10667                 offsetof(struct bpf_prog_info, jited_ksyms),
10668                 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10669                 -(int)sizeof(__u64),
10670         },
10671         [BPF_PROG_INFO_JITED_FUNC_LENS] = {
10672                 offsetof(struct bpf_prog_info, jited_func_lens),
10673                 offsetof(struct bpf_prog_info, nr_jited_func_lens),
10674                 -(int)sizeof(__u32),
10675         },
10676         [BPF_PROG_INFO_FUNC_INFO] = {
10677                 offsetof(struct bpf_prog_info, func_info),
10678                 offsetof(struct bpf_prog_info, nr_func_info),
10679                 offsetof(struct bpf_prog_info, func_info_rec_size),
10680         },
10681         [BPF_PROG_INFO_LINE_INFO] = {
10682                 offsetof(struct bpf_prog_info, line_info),
10683                 offsetof(struct bpf_prog_info, nr_line_info),
10684                 offsetof(struct bpf_prog_info, line_info_rec_size),
10685         },
10686         [BPF_PROG_INFO_JITED_LINE_INFO] = {
10687                 offsetof(struct bpf_prog_info, jited_line_info),
10688                 offsetof(struct bpf_prog_info, nr_jited_line_info),
10689                 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
10690         },
10691         [BPF_PROG_INFO_PROG_TAGS] = {
10692                 offsetof(struct bpf_prog_info, prog_tags),
10693                 offsetof(struct bpf_prog_info, nr_prog_tags),
10694                 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10695         },
10696
10697 };
10698
10699 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
10700                                            int offset)
10701 {
10702         __u32 *array = (__u32 *)info;
10703
10704         if (offset >= 0)
10705                 return array[offset / sizeof(__u32)];
10706         return -(int)offset;
10707 }
10708
10709 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
10710                                            int offset)
10711 {
10712         __u64 *array = (__u64 *)info;
10713
10714         if (offset >= 0)
10715                 return array[offset / sizeof(__u64)];
10716         return -(int)offset;
10717 }
10718
10719 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
10720                                          __u32 val)
10721 {
10722         __u32 *array = (__u32 *)info;
10723
10724         if (offset >= 0)
10725                 array[offset / sizeof(__u32)] = val;
10726 }
10727
10728 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
10729                                          __u64 val)
10730 {
10731         __u64 *array = (__u64 *)info;
10732
10733         if (offset >= 0)
10734                 array[offset / sizeof(__u64)] = val;
10735 }
10736
10737 struct bpf_prog_info_linear *
10738 bpf_program__get_prog_info_linear(int fd, __u64 arrays)
10739 {
10740         struct bpf_prog_info_linear *info_linear;
10741         struct bpf_prog_info info = {};
10742         __u32 info_len = sizeof(info);
10743         __u32 data_len = 0;
10744         int i, err;
10745         void *ptr;
10746
10747         if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
10748                 return ERR_PTR(-EINVAL);
10749
10750         /* step 1: get array dimensions */
10751         err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
10752         if (err) {
10753                 pr_debug("can't get prog info: %s", strerror(errno));
10754                 return ERR_PTR(-EFAULT);
10755         }
10756
10757         /* step 2: calculate total size of all arrays */
10758         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10759                 bool include_array = (arrays & (1UL << i)) > 0;
10760                 struct bpf_prog_info_array_desc *desc;
10761                 __u32 count, size;
10762
10763                 desc = bpf_prog_info_array_desc + i;
10764
10765                 /* kernel is too old to support this field */
10766                 if (info_len < desc->array_offset + sizeof(__u32) ||
10767                     info_len < desc->count_offset + sizeof(__u32) ||
10768                     (desc->size_offset > 0 && info_len < desc->size_offset))
10769                         include_array = false;
10770
10771                 if (!include_array) {
10772                         arrays &= ~(1UL << i);  /* clear the bit */
10773                         continue;
10774                 }
10775
10776                 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10777                 size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10778
10779                 data_len += count * size;
10780         }
10781
10782         /* step 3: allocate continuous memory */
10783         data_len = roundup(data_len, sizeof(__u64));
10784         info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
10785         if (!info_linear)
10786                 return ERR_PTR(-ENOMEM);
10787
10788         /* step 4: fill data to info_linear->info */
10789         info_linear->arrays = arrays;
10790         memset(&info_linear->info, 0, sizeof(info));
10791         ptr = info_linear->data;
10792
10793         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10794                 struct bpf_prog_info_array_desc *desc;
10795                 __u32 count, size;
10796
10797                 if ((arrays & (1UL << i)) == 0)
10798                         continue;
10799
10800                 desc  = bpf_prog_info_array_desc + i;
10801                 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10802                 size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10803                 bpf_prog_info_set_offset_u32(&info_linear->info,
10804                                              desc->count_offset, count);
10805                 bpf_prog_info_set_offset_u32(&info_linear->info,
10806                                              desc->size_offset, size);
10807                 bpf_prog_info_set_offset_u64(&info_linear->info,
10808                                              desc->array_offset,
10809                                              ptr_to_u64(ptr));
10810                 ptr += count * size;
10811         }
10812
10813         /* step 5: call syscall again to get required arrays */
10814         err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
10815         if (err) {
10816                 pr_debug("can't get prog info: %s", strerror(errno));
10817                 free(info_linear);
10818                 return ERR_PTR(-EFAULT);
10819         }
10820
10821         /* step 6: verify the data */
10822         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10823                 struct bpf_prog_info_array_desc *desc;
10824                 __u32 v1, v2;
10825
10826                 if ((arrays & (1UL << i)) == 0)
10827                         continue;
10828
10829                 desc = bpf_prog_info_array_desc + i;
10830                 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10831                 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10832                                                    desc->count_offset);
10833                 if (v1 != v2)
10834                         pr_warn("%s: mismatch in element count\n", __func__);
10835
10836                 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10837                 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10838                                                    desc->size_offset);
10839                 if (v1 != v2)
10840                         pr_warn("%s: mismatch in rec size\n", __func__);
10841         }
10842
10843         /* step 7: update info_len and data_len */
10844         info_linear->info_len = sizeof(struct bpf_prog_info);
10845         info_linear->data_len = data_len;
10846
10847         return info_linear;
10848 }
10849
10850 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
10851 {
10852         int i;
10853
10854         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10855                 struct bpf_prog_info_array_desc *desc;
10856                 __u64 addr, offs;
10857
10858                 if ((info_linear->arrays & (1UL << i)) == 0)
10859                         continue;
10860
10861                 desc = bpf_prog_info_array_desc + i;
10862                 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
10863                                                      desc->array_offset);
10864                 offs = addr - ptr_to_u64(info_linear->data);
10865                 bpf_prog_info_set_offset_u64(&info_linear->info,
10866                                              desc->array_offset, offs);
10867         }
10868 }
10869
10870 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
10871 {
10872         int i;
10873
10874         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10875                 struct bpf_prog_info_array_desc *desc;
10876                 __u64 addr, offs;
10877
10878                 if ((info_linear->arrays & (1UL << i)) == 0)
10879                         continue;
10880
10881                 desc = bpf_prog_info_array_desc + i;
10882                 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
10883                                                      desc->array_offset);
10884                 addr = offs + ptr_to_u64(info_linear->data);
10885                 bpf_prog_info_set_offset_u64(&info_linear->info,
10886                                              desc->array_offset, addr);
10887         }
10888 }
10889
10890 int bpf_program__set_attach_target(struct bpf_program *prog,
10891                                    int attach_prog_fd,
10892                                    const char *attach_func_name)
10893 {
10894         int btf_obj_fd = 0, btf_id = 0, err;
10895
10896         if (!prog || attach_prog_fd < 0 || !attach_func_name)
10897                 return -EINVAL;
10898
10899         if (prog->obj->loaded)
10900                 return -EINVAL;
10901
10902         if (attach_prog_fd) {
10903                 btf_id = libbpf_find_prog_btf_id(attach_func_name,
10904                                                  attach_prog_fd);
10905                 if (btf_id < 0)
10906                         return btf_id;
10907         } else {
10908                 /* load btf_vmlinux, if not yet */
10909                 err = bpf_object__load_vmlinux_btf(prog->obj, true);
10910                 if (err)
10911                         return err;
10912                 err = find_kernel_btf_id(prog->obj, attach_func_name,
10913                                          prog->expected_attach_type,
10914                                          &btf_obj_fd, &btf_id);
10915                 if (err)
10916                         return err;
10917         }
10918
10919         prog->attach_btf_id = btf_id;
10920         prog->attach_btf_obj_fd = btf_obj_fd;
10921         prog->attach_prog_fd = attach_prog_fd;
10922         return 0;
10923 }
10924
10925 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
10926 {
10927         int err = 0, n, len, start, end = -1;
10928         bool *tmp;
10929
10930         *mask = NULL;
10931         *mask_sz = 0;
10932
10933         /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
10934         while (*s) {
10935                 if (*s == ',' || *s == '\n') {
10936                         s++;
10937                         continue;
10938                 }
10939                 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
10940                 if (n <= 0 || n > 2) {
10941                         pr_warn("Failed to get CPU range %s: %d\n", s, n);
10942                         err = -EINVAL;
10943                         goto cleanup;
10944                 } else if (n == 1) {
10945                         end = start;
10946                 }
10947                 if (start < 0 || start > end) {
10948                         pr_warn("Invalid CPU range [%d,%d] in %s\n",
10949                                 start, end, s);
10950                         err = -EINVAL;
10951                         goto cleanup;
10952                 }
10953                 tmp = realloc(*mask, end + 1);
10954                 if (!tmp) {
10955                         err = -ENOMEM;
10956                         goto cleanup;
10957                 }
10958                 *mask = tmp;
10959                 memset(tmp + *mask_sz, 0, start - *mask_sz);
10960                 memset(tmp + start, 1, end - start + 1);
10961                 *mask_sz = end + 1;
10962                 s += len;
10963         }
10964         if (!*mask_sz) {
10965                 pr_warn("Empty CPU range\n");
10966                 return -EINVAL;
10967         }
10968         return 0;
10969 cleanup:
10970         free(*mask);
10971         *mask = NULL;
10972         return err;
10973 }
10974
10975 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
10976 {
10977         int fd, err = 0, len;
10978         char buf[128];
10979
10980         fd = open(fcpu, O_RDONLY);
10981         if (fd < 0) {
10982                 err = -errno;
10983                 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
10984                 return err;
10985         }
10986         len = read(fd, buf, sizeof(buf));
10987         close(fd);
10988         if (len <= 0) {
10989                 err = len ? -errno : -EINVAL;
10990                 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
10991                 return err;
10992         }
10993         if (len >= sizeof(buf)) {
10994                 pr_warn("CPU mask is too big in file %s\n", fcpu);
10995                 return -E2BIG;
10996         }
10997         buf[len] = '\0';
10998
10999         return parse_cpu_mask_str(buf, mask, mask_sz);
11000 }
11001
11002 int libbpf_num_possible_cpus(void)
11003 {
11004         static const char *fcpu = "/sys/devices/system/cpu/possible";
11005         static int cpus;
11006         int err, n, i, tmp_cpus;
11007         bool *mask;
11008
11009         tmp_cpus = READ_ONCE(cpus);
11010         if (tmp_cpus > 0)
11011                 return tmp_cpus;
11012
11013         err = parse_cpu_mask_file(fcpu, &mask, &n);
11014         if (err)
11015                 return err;
11016
11017         tmp_cpus = 0;
11018         for (i = 0; i < n; i++) {
11019                 if (mask[i])
11020                         tmp_cpus++;
11021         }
11022         free(mask);
11023
11024         WRITE_ONCE(cpus, tmp_cpus);
11025         return tmp_cpus;
11026 }
11027
11028 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11029                               const struct bpf_object_open_opts *opts)
11030 {
11031         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11032                 .object_name = s->name,
11033         );
11034         struct bpf_object *obj;
11035         int i;
11036
11037         /* Attempt to preserve opts->object_name, unless overriden by user
11038          * explicitly. Overwriting object name for skeletons is discouraged,
11039          * as it breaks global data maps, because they contain object name
11040          * prefix as their own map name prefix. When skeleton is generated,
11041          * bpftool is making an assumption that this name will stay the same.
11042          */
11043         if (opts) {
11044                 memcpy(&skel_opts, opts, sizeof(*opts));
11045                 if (!opts->object_name)
11046                         skel_opts.object_name = s->name;
11047         }
11048
11049         obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11050         if (IS_ERR(obj)) {
11051                 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
11052                         s->name, PTR_ERR(obj));
11053                 return PTR_ERR(obj);
11054         }
11055
11056         *s->obj = obj;
11057
11058         for (i = 0; i < s->map_cnt; i++) {
11059                 struct bpf_map **map = s->maps[i].map;
11060                 const char *name = s->maps[i].name;
11061                 void **mmaped = s->maps[i].mmaped;
11062
11063                 *map = bpf_object__find_map_by_name(obj, name);
11064                 if (!*map) {
11065                         pr_warn("failed to find skeleton map '%s'\n", name);
11066                         return -ESRCH;
11067                 }
11068
11069                 /* externs shouldn't be pre-setup from user code */
11070                 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11071                         *mmaped = (*map)->mmaped;
11072         }
11073
11074         for (i = 0; i < s->prog_cnt; i++) {
11075                 struct bpf_program **prog = s->progs[i].prog;
11076                 const char *name = s->progs[i].name;
11077
11078                 *prog = bpf_object__find_program_by_name(obj, name);
11079                 if (!*prog) {
11080                         pr_warn("failed to find skeleton program '%s'\n", name);
11081                         return -ESRCH;
11082                 }
11083         }
11084
11085         return 0;
11086 }
11087
11088 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11089 {
11090         int i, err;
11091
11092         err = bpf_object__load(*s->obj);
11093         if (err) {
11094                 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11095                 return err;
11096         }
11097
11098         for (i = 0; i < s->map_cnt; i++) {
11099                 struct bpf_map *map = *s->maps[i].map;
11100                 size_t mmap_sz = bpf_map_mmap_sz(map);
11101                 int prot, map_fd = bpf_map__fd(map);
11102                 void **mmaped = s->maps[i].mmaped;
11103
11104                 if (!mmaped)
11105                         continue;
11106
11107                 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11108                         *mmaped = NULL;
11109                         continue;
11110                 }
11111
11112                 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11113                         prot = PROT_READ;
11114                 else
11115                         prot = PROT_READ | PROT_WRITE;
11116
11117                 /* Remap anonymous mmap()-ed "map initialization image" as
11118                  * a BPF map-backed mmap()-ed memory, but preserving the same
11119                  * memory address. This will cause kernel to change process'
11120                  * page table to point to a different piece of kernel memory,
11121                  * but from userspace point of view memory address (and its
11122                  * contents, being identical at this point) will stay the
11123                  * same. This mapping will be released by bpf_object__close()
11124                  * as per normal clean up procedure, so we don't need to worry
11125                  * about it from skeleton's clean up perspective.
11126                  */
11127                 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11128                                 MAP_SHARED | MAP_FIXED, map_fd, 0);
11129                 if (*mmaped == MAP_FAILED) {
11130                         err = -errno;
11131                         *mmaped = NULL;
11132                         pr_warn("failed to re-mmap() map '%s': %d\n",
11133                                  bpf_map__name(map), err);
11134                         return err;
11135                 }
11136         }
11137
11138         return 0;
11139 }
11140
11141 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11142 {
11143         int i;
11144
11145         for (i = 0; i < s->prog_cnt; i++) {
11146                 struct bpf_program *prog = *s->progs[i].prog;
11147                 struct bpf_link **link = s->progs[i].link;
11148                 const struct bpf_sec_def *sec_def;
11149
11150                 if (!prog->load)
11151                         continue;
11152
11153                 sec_def = find_sec_def(prog->sec_name);
11154                 if (!sec_def || !sec_def->attach_fn)
11155                         continue;
11156
11157                 *link = sec_def->attach_fn(sec_def, prog);
11158                 if (IS_ERR(*link)) {
11159                         pr_warn("failed to auto-attach program '%s': %ld\n",
11160                                 bpf_program__name(prog), PTR_ERR(*link));
11161                         return PTR_ERR(*link);
11162                 }
11163         }
11164
11165         return 0;
11166 }
11167
11168 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11169 {
11170         int i;
11171
11172         for (i = 0; i < s->prog_cnt; i++) {
11173                 struct bpf_link **link = s->progs[i].link;
11174
11175                 bpf_link__destroy(*link);
11176                 *link = NULL;
11177         }
11178 }
11179
11180 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11181 {
11182         if (s->progs)
11183                 bpf_object__detach_skeleton(s);
11184         if (s->obj)
11185                 bpf_object__close(*s->obj);
11186         free(s->maps);
11187         free(s->progs);
11188         free(s);
11189 }