1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
12 #include <sys/utsname.h>
13 #include <sys/param.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
22 #include "libbpf_internal.h"
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 static struct btf_type btf_void;
32 /* raw BTF data in native endianness */
34 /* raw BTF data in non-native endianness */
35 void *raw_data_swapped;
37 /* whether target endianness differs from the native one */
41 * When BTF is loaded from an ELF or raw memory it is stored
42 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 * point inside that memory region to their respective parts of BTF
46 * +--------------------------------+
47 * | Header | Types | Strings |
48 * +--------------------------------+
53 * strs_data------------+
55 * If BTF data is later modified, e.g., due to types added or
56 * removed, BTF deduplication performed, etc, this contiguous
57 * representation is broken up into three independently allocated
58 * memory regions to be able to modify them independently.
59 * raw_data is nulled out at that point, but can be later allocated
60 * and cached again if user calls btf__get_raw_data(), at which point
61 * raw_data will contain a contiguous copy of header, types, and
64 * +----------+ +---------+ +-----------+
65 * | Header | | Types | | Strings |
66 * +----------+ +---------+ +-----------+
71 * strset__data(strs_set)-----+
73 * +----------+---------+-----------+
74 * | Header | Types | Strings |
75 * raw_data----->+----------+---------+-----------+
77 struct btf_header *hdr;
80 size_t types_data_cap; /* used size stored in hdr->type_len */
82 /* type ID to `struct btf_type *` lookup index
83 * type_offs[0] corresponds to the first non-VOID type:
84 * - for base BTF it's type [1];
85 * - for split BTF it's the first non-base BTF type.
89 /* number of types in this BTF instance:
90 * - doesn't include special [0] void type;
91 * - for split BTF counts number of types added on top of base BTF.
94 /* if not NULL, points to the base BTF on top of which the current
98 /* BTF type ID of the first type in this BTF instance:
99 * - for base BTF it's equal to 1;
100 * - for split BTF it's equal to biggest type ID of base BTF plus 1.
103 /* logical string offset of this BTF instance:
104 * - for base BTF it's equal to 0;
105 * - for split BTF it's equal to total size of base BTF's string section size.
109 /* only one of strs_data or strs_set can be non-NULL, depending on
110 * whether BTF is in a modifiable state (strs_set is used) or not
111 * (strs_data points inside raw_data)
114 /* a set of unique strings */
115 struct strset *strs_set;
116 /* whether strings are already deduplicated */
119 /* BTF object FD, if loaded into kernel */
122 /* Pointer size (in bytes) for a target architecture of this BTF */
126 static inline __u64 ptr_to_u64(const void *ptr)
128 return (__u64) (unsigned long) ptr;
131 /* Ensure given dynamically allocated memory region pointed to by *data* with
132 * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
133 * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements
134 * are already used. At most *max_cnt* elements can be ever allocated.
135 * If necessary, memory is reallocated and all existing data is copied over,
136 * new pointer to the memory region is stored at *data, new memory region
137 * capacity (in number of elements) is stored in *cap.
138 * On success, memory pointer to the beginning of unused memory is returned.
139 * On error, NULL is returned.
141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
142 size_t cur_cnt, size_t max_cnt, size_t add_cnt)
147 if (cur_cnt + add_cnt <= *cap_cnt)
148 return *data + cur_cnt * elem_sz;
150 /* requested more than the set limit */
151 if (cur_cnt + add_cnt > max_cnt)
155 new_cnt += new_cnt / 4; /* expand by 25% */
156 if (new_cnt < 16) /* but at least 16 elements */
158 if (new_cnt > max_cnt) /* but not exceeding a set limit */
160 if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */
161 new_cnt = cur_cnt + add_cnt;
163 new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
167 /* zero out newly allocated portion of memory */
168 memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
172 return new_data + cur_cnt * elem_sz;
175 /* Ensure given dynamically allocated memory region has enough allocated space
176 * to accommodate *need_cnt* elements of size *elem_sz* bytes each
178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
182 if (need_cnt <= *cap_cnt)
185 p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
192 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
196 p = libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
197 btf->nr_types, BTF_MAX_NR_TYPES, 1);
205 static void btf_bswap_hdr(struct btf_header *h)
207 h->magic = bswap_16(h->magic);
208 h->hdr_len = bswap_32(h->hdr_len);
209 h->type_off = bswap_32(h->type_off);
210 h->type_len = bswap_32(h->type_len);
211 h->str_off = bswap_32(h->str_off);
212 h->str_len = bswap_32(h->str_len);
215 static int btf_parse_hdr(struct btf *btf)
217 struct btf_header *hdr = btf->hdr;
220 if (btf->raw_size < sizeof(struct btf_header)) {
221 pr_debug("BTF header not found\n");
225 if (hdr->magic == bswap_16(BTF_MAGIC)) {
226 btf->swapped_endian = true;
227 if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
228 pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
229 bswap_32(hdr->hdr_len));
233 } else if (hdr->magic != BTF_MAGIC) {
234 pr_debug("Invalid BTF magic:%x\n", hdr->magic);
238 meta_left = btf->raw_size - sizeof(*hdr);
239 if (meta_left < hdr->str_off + hdr->str_len) {
240 pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
244 if (hdr->type_off + hdr->type_len > hdr->str_off) {
245 pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
246 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
250 if (hdr->type_off % 4) {
251 pr_debug("BTF type section is not aligned to 4 bytes\n");
258 static int btf_parse_str_sec(struct btf *btf)
260 const struct btf_header *hdr = btf->hdr;
261 const char *start = btf->strs_data;
262 const char *end = start + btf->hdr->str_len;
264 if (btf->base_btf && hdr->str_len == 0)
266 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
267 pr_debug("Invalid BTF string section\n");
270 if (!btf->base_btf && start[0]) {
271 pr_debug("Invalid BTF string section\n");
277 static int btf_type_size(const struct btf_type *t)
279 const int base_size = sizeof(struct btf_type);
280 __u16 vlen = btf_vlen(t);
282 switch (btf_kind(t)) {
285 case BTF_KIND_VOLATILE:
286 case BTF_KIND_RESTRICT:
288 case BTF_KIND_TYPEDEF:
293 return base_size + sizeof(__u32);
295 return base_size + vlen * sizeof(struct btf_enum);
297 return base_size + sizeof(struct btf_array);
298 case BTF_KIND_STRUCT:
300 return base_size + vlen * sizeof(struct btf_member);
301 case BTF_KIND_FUNC_PROTO:
302 return base_size + vlen * sizeof(struct btf_param);
304 return base_size + sizeof(struct btf_var);
305 case BTF_KIND_DATASEC:
306 return base_size + vlen * sizeof(struct btf_var_secinfo);
308 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
313 static void btf_bswap_type_base(struct btf_type *t)
315 t->name_off = bswap_32(t->name_off);
316 t->info = bswap_32(t->info);
317 t->type = bswap_32(t->type);
320 static int btf_bswap_type_rest(struct btf_type *t)
322 struct btf_var_secinfo *v;
323 struct btf_member *m;
327 __u16 vlen = btf_vlen(t);
330 switch (btf_kind(t)) {
333 case BTF_KIND_VOLATILE:
334 case BTF_KIND_RESTRICT:
336 case BTF_KIND_TYPEDEF:
341 *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
344 for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
345 e->name_off = bswap_32(e->name_off);
346 e->val = bswap_32(e->val);
351 a->type = bswap_32(a->type);
352 a->index_type = bswap_32(a->index_type);
353 a->nelems = bswap_32(a->nelems);
355 case BTF_KIND_STRUCT:
357 for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
358 m->name_off = bswap_32(m->name_off);
359 m->type = bswap_32(m->type);
360 m->offset = bswap_32(m->offset);
363 case BTF_KIND_FUNC_PROTO:
364 for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
365 p->name_off = bswap_32(p->name_off);
366 p->type = bswap_32(p->type);
370 btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
372 case BTF_KIND_DATASEC:
373 for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
374 v->type = bswap_32(v->type);
375 v->offset = bswap_32(v->offset);
376 v->size = bswap_32(v->size);
380 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
385 static int btf_parse_type_sec(struct btf *btf)
387 struct btf_header *hdr = btf->hdr;
388 void *next_type = btf->types_data;
389 void *end_type = next_type + hdr->type_len;
392 while (next_type + sizeof(struct btf_type) <= end_type) {
393 if (btf->swapped_endian)
394 btf_bswap_type_base(next_type);
396 type_size = btf_type_size(next_type);
399 if (next_type + type_size > end_type) {
400 pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
404 if (btf->swapped_endian && btf_bswap_type_rest(next_type))
407 err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
411 next_type += type_size;
415 if (next_type != end_type) {
416 pr_warn("BTF types data is malformed\n");
423 __u32 btf__get_nr_types(const struct btf *btf)
425 return btf->start_id + btf->nr_types - 1;
428 const struct btf *btf__base_btf(const struct btf *btf)
430 return btf->base_btf;
433 /* internal helper returning non-const pointer to a type */
434 struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
438 if (type_id < btf->start_id)
439 return btf_type_by_id(btf->base_btf, type_id);
440 return btf->types_data + btf->type_offs[type_id - btf->start_id];
443 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
445 if (type_id >= btf->start_id + btf->nr_types)
446 return errno = EINVAL, NULL;
447 return btf_type_by_id((struct btf *)btf, type_id);
450 static int determine_ptr_size(const struct btf *btf)
452 const struct btf_type *t;
456 if (btf->base_btf && btf->base_btf->ptr_sz > 0)
457 return btf->base_btf->ptr_sz;
459 n = btf__get_nr_types(btf);
460 for (i = 1; i <= n; i++) {
461 t = btf__type_by_id(btf, i);
465 name = btf__name_by_offset(btf, t->name_off);
469 if (strcmp(name, "long int") == 0 ||
470 strcmp(name, "long unsigned int") == 0) {
471 if (t->size != 4 && t->size != 8)
480 static size_t btf_ptr_sz(const struct btf *btf)
483 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
484 return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
487 /* Return pointer size this BTF instance assumes. The size is heuristically
488 * determined by looking for 'long' or 'unsigned long' integer type and
489 * recording its size in bytes. If BTF type information doesn't have any such
490 * type, this function returns 0. In the latter case, native architecture's
491 * pointer size is assumed, so will be either 4 or 8, depending on
492 * architecture that libbpf was compiled for. It's possible to override
493 * guessed value by using btf__set_pointer_size() API.
495 size_t btf__pointer_size(const struct btf *btf)
498 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
501 /* not enough BTF type info to guess */
507 /* Override or set pointer size in bytes. Only values of 4 and 8 are
510 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
512 if (ptr_sz != 4 && ptr_sz != 8)
513 return libbpf_err(-EINVAL);
514 btf->ptr_sz = ptr_sz;
518 static bool is_host_big_endian(void)
520 #if __BYTE_ORDER == __LITTLE_ENDIAN
522 #elif __BYTE_ORDER == __BIG_ENDIAN
525 # error "Unrecognized __BYTE_ORDER__"
529 enum btf_endianness btf__endianness(const struct btf *btf)
531 if (is_host_big_endian())
532 return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
534 return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
537 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
539 if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
540 return libbpf_err(-EINVAL);
542 btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
543 if (!btf->swapped_endian) {
544 free(btf->raw_data_swapped);
545 btf->raw_data_swapped = NULL;
550 static bool btf_type_is_void(const struct btf_type *t)
552 return t == &btf_void || btf_is_fwd(t);
555 static bool btf_type_is_void_or_null(const struct btf_type *t)
557 return !t || btf_type_is_void(t);
560 #define MAX_RESOLVE_DEPTH 32
562 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
564 const struct btf_array *array;
565 const struct btf_type *t;
570 t = btf__type_by_id(btf, type_id);
571 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
572 switch (btf_kind(t)) {
574 case BTF_KIND_STRUCT:
577 case BTF_KIND_DATASEC:
582 size = btf_ptr_sz(btf);
584 case BTF_KIND_TYPEDEF:
585 case BTF_KIND_VOLATILE:
587 case BTF_KIND_RESTRICT:
592 array = btf_array(t);
593 if (nelems && array->nelems > UINT32_MAX / nelems)
594 return libbpf_err(-E2BIG);
595 nelems *= array->nelems;
596 type_id = array->type;
599 return libbpf_err(-EINVAL);
602 t = btf__type_by_id(btf, type_id);
607 return libbpf_err(-EINVAL);
608 if (nelems && size > UINT32_MAX / nelems)
609 return libbpf_err(-E2BIG);
611 return nelems * size;
614 int btf__align_of(const struct btf *btf, __u32 id)
616 const struct btf_type *t = btf__type_by_id(btf, id);
617 __u16 kind = btf_kind(t);
623 return min(btf_ptr_sz(btf), (size_t)t->size);
625 return btf_ptr_sz(btf);
626 case BTF_KIND_TYPEDEF:
627 case BTF_KIND_VOLATILE:
629 case BTF_KIND_RESTRICT:
630 return btf__align_of(btf, t->type);
632 return btf__align_of(btf, btf_array(t)->type);
633 case BTF_KIND_STRUCT:
634 case BTF_KIND_UNION: {
635 const struct btf_member *m = btf_members(t);
636 __u16 vlen = btf_vlen(t);
637 int i, max_align = 1, align;
639 for (i = 0; i < vlen; i++, m++) {
640 align = btf__align_of(btf, m->type);
642 return libbpf_err(align);
643 max_align = max(max_align, align);
649 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
650 return errno = EINVAL, 0;
654 int btf__resolve_type(const struct btf *btf, __u32 type_id)
656 const struct btf_type *t;
659 t = btf__type_by_id(btf, type_id);
660 while (depth < MAX_RESOLVE_DEPTH &&
661 !btf_type_is_void_or_null(t) &&
662 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
664 t = btf__type_by_id(btf, type_id);
668 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
669 return libbpf_err(-EINVAL);
674 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
676 __u32 i, nr_types = btf__get_nr_types(btf);
678 if (!strcmp(type_name, "void"))
681 for (i = 1; i <= nr_types; i++) {
682 const struct btf_type *t = btf__type_by_id(btf, i);
683 const char *name = btf__name_by_offset(btf, t->name_off);
685 if (name && !strcmp(type_name, name))
689 return libbpf_err(-ENOENT);
692 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
695 __u32 i, nr_types = btf__get_nr_types(btf);
697 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
700 for (i = 1; i <= nr_types; i++) {
701 const struct btf_type *t = btf__type_by_id(btf, i);
704 if (btf_kind(t) != kind)
706 name = btf__name_by_offset(btf, t->name_off);
707 if (name && !strcmp(type_name, name))
711 return libbpf_err(-ENOENT);
714 static bool btf_is_modifiable(const struct btf *btf)
716 return (void *)btf->hdr != btf->raw_data;
719 void btf__free(struct btf *btf)
721 if (IS_ERR_OR_NULL(btf))
727 if (btf_is_modifiable(btf)) {
728 /* if BTF was modified after loading, it will have a split
729 * in-memory representation for header, types, and strings
730 * sections, so we need to free all of them individually. It
731 * might still have a cached contiguous raw data present,
732 * which will be unconditionally freed below.
735 free(btf->types_data);
736 strset__free(btf->strs_set);
739 free(btf->raw_data_swapped);
740 free(btf->type_offs);
744 static struct btf *btf_new_empty(struct btf *base_btf)
748 btf = calloc(1, sizeof(*btf));
750 return ERR_PTR(-ENOMEM);
754 btf->start_str_off = 0;
756 btf->ptr_sz = sizeof(void *);
757 btf->swapped_endian = false;
760 btf->base_btf = base_btf;
761 btf->start_id = btf__get_nr_types(base_btf) + 1;
762 btf->start_str_off = base_btf->hdr->str_len;
765 /* +1 for empty string at offset 0 */
766 btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
767 btf->raw_data = calloc(1, btf->raw_size);
768 if (!btf->raw_data) {
770 return ERR_PTR(-ENOMEM);
773 btf->hdr = btf->raw_data;
774 btf->hdr->hdr_len = sizeof(struct btf_header);
775 btf->hdr->magic = BTF_MAGIC;
776 btf->hdr->version = BTF_VERSION;
778 btf->types_data = btf->raw_data + btf->hdr->hdr_len;
779 btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
780 btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
785 struct btf *btf__new_empty(void)
787 return libbpf_ptr(btf_new_empty(NULL));
790 struct btf *btf__new_empty_split(struct btf *base_btf)
792 return libbpf_ptr(btf_new_empty(base_btf));
795 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
800 btf = calloc(1, sizeof(struct btf));
802 return ERR_PTR(-ENOMEM);
806 btf->start_str_off = 0;
810 btf->base_btf = base_btf;
811 btf->start_id = btf__get_nr_types(base_btf) + 1;
812 btf->start_str_off = base_btf->hdr->str_len;
815 btf->raw_data = malloc(size);
816 if (!btf->raw_data) {
820 memcpy(btf->raw_data, data, size);
821 btf->raw_size = size;
823 btf->hdr = btf->raw_data;
824 err = btf_parse_hdr(btf);
828 btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
829 btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
831 err = btf_parse_str_sec(btf);
832 err = err ?: btf_parse_type_sec(btf);
845 struct btf *btf__new(const void *data, __u32 size)
847 return libbpf_ptr(btf_new(data, size, NULL));
850 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
851 struct btf_ext **btf_ext)
853 Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
854 int err = 0, fd = -1, idx = 0;
855 struct btf *btf = NULL;
861 if (elf_version(EV_CURRENT) == EV_NONE) {
862 pr_warn("failed to init libelf for %s\n", path);
863 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
866 fd = open(path, O_RDONLY);
869 pr_warn("failed to open %s: %s\n", path, strerror(errno));
873 err = -LIBBPF_ERRNO__FORMAT;
875 elf = elf_begin(fd, ELF_C_READ, NULL);
877 pr_warn("failed to open %s as ELF file\n", path);
880 if (!gelf_getehdr(elf, &ehdr)) {
881 pr_warn("failed to get EHDR from %s\n", path);
885 if (elf_getshdrstrndx(elf, &shstrndx)) {
886 pr_warn("failed to get section names section index for %s\n",
891 if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
892 pr_warn("failed to get e_shstrndx from %s\n", path);
896 while ((scn = elf_nextscn(elf, scn)) != NULL) {
901 if (gelf_getshdr(scn, &sh) != &sh) {
902 pr_warn("failed to get section(%d) header from %s\n",
906 name = elf_strptr(elf, shstrndx, sh.sh_name);
908 pr_warn("failed to get section(%d) name from %s\n",
912 if (strcmp(name, BTF_ELF_SEC) == 0) {
913 btf_data = elf_getdata(scn, 0);
915 pr_warn("failed to get section(%d, %s) data from %s\n",
920 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
921 btf_ext_data = elf_getdata(scn, 0);
923 pr_warn("failed to get section(%d, %s) data from %s\n",
937 btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
938 err = libbpf_get_error(btf);
942 switch (gelf_getclass(elf)) {
944 btf__set_pointer_size(btf, 4);
947 btf__set_pointer_size(btf, 8);
950 pr_warn("failed to get ELF class (bitness) for %s\n", path);
954 if (btf_ext && btf_ext_data) {
955 *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
956 err = libbpf_get_error(*btf_ext);
959 } else if (btf_ext) {
971 btf_ext__free(*btf_ext);
977 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
979 return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
982 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
984 return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
987 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
989 struct btf *btf = NULL;
996 f = fopen(path, "rb");
1002 /* check BTF magic */
1003 if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1007 if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1008 /* definitely not a raw BTF */
1014 if (fseek(f, 0, SEEK_END)) {
1023 /* rewind to the start */
1024 if (fseek(f, 0, SEEK_SET)) {
1029 /* pre-alloc memory and read all of BTF data */
1035 if (fread(data, 1, sz, f) < sz) {
1040 /* finally parse BTF data */
1041 btf = btf_new(data, sz, base_btf);
1047 return err ? ERR_PTR(err) : btf;
1050 struct btf *btf__parse_raw(const char *path)
1052 return libbpf_ptr(btf_parse_raw(path, NULL));
1055 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1057 return libbpf_ptr(btf_parse_raw(path, base_btf));
1060 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1068 btf = btf_parse_raw(path, base_btf);
1069 err = libbpf_get_error(btf);
1073 return ERR_PTR(err);
1074 return btf_parse_elf(path, base_btf, btf_ext);
1077 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1079 return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1082 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1084 return libbpf_ptr(btf_parse(path, base_btf, NULL));
1087 static int compare_vsi_off(const void *_a, const void *_b)
1089 const struct btf_var_secinfo *a = _a;
1090 const struct btf_var_secinfo *b = _b;
1092 return a->offset - b->offset;
1095 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
1098 __u32 size = 0, off = 0, i, vars = btf_vlen(t);
1099 const char *name = btf__name_by_offset(btf, t->name_off);
1100 const struct btf_type *t_var;
1101 struct btf_var_secinfo *vsi;
1102 const struct btf_var *var;
1106 pr_debug("No name found in string section for DATASEC kind.\n");
1110 /* .extern datasec size and var offsets were set correctly during
1111 * extern collection step, so just skip straight to sorting variables
1116 ret = bpf_object__section_size(obj, name, &size);
1117 if (ret || !size || (t->size && t->size != size)) {
1118 pr_debug("Invalid size for section %s: %u bytes\n", name, size);
1124 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
1125 t_var = btf__type_by_id(btf, vsi->type);
1126 var = btf_var(t_var);
1128 if (!btf_is_var(t_var)) {
1129 pr_debug("Non-VAR type seen in section %s\n", name);
1133 if (var->linkage == BTF_VAR_STATIC)
1136 name = btf__name_by_offset(btf, t_var->name_off);
1138 pr_debug("No name found in string section for VAR kind\n");
1142 ret = bpf_object__variable_offset(obj, name, &off);
1144 pr_debug("No offset found in symbol table for VAR %s\n",
1153 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
1157 int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
1162 for (i = 1; i <= btf->nr_types; i++) {
1163 struct btf_type *t = btf_type_by_id(btf, i);
1165 /* Loader needs to fix up some of the things compiler
1166 * couldn't get its hands on while emitting BTF. This
1167 * is section size and global variable offset. We use
1168 * the info from the ELF itself for this purpose.
1170 if (btf_is_datasec(t)) {
1171 err = btf_fixup_datasec(obj, btf, t);
1177 return libbpf_err(err);
1180 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1182 int btf__load(struct btf *btf)
1184 __u32 log_buf_size = 0, raw_size;
1185 char *log_buf = NULL;
1190 return libbpf_err(-EEXIST);
1194 log_buf = malloc(log_buf_size);
1196 return libbpf_err(-ENOMEM);
1201 raw_data = btf_get_raw_data(btf, &raw_size, false);
1206 /* cache native raw data representation */
1207 btf->raw_size = raw_size;
1208 btf->raw_data = raw_data;
1210 btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false);
1212 if (!log_buf || errno == ENOSPC) {
1213 log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
1220 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
1222 pr_warn("%s\n", log_buf);
1228 return libbpf_err(err);
1231 int btf__fd(const struct btf *btf)
1236 void btf__set_fd(struct btf *btf, int fd)
1241 static const void *btf_strs_data(const struct btf *btf)
1243 return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1246 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1248 struct btf_header *hdr = btf->hdr;
1254 data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1256 *size = btf->raw_size;
1260 data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1261 data = calloc(1, data_sz);
1266 memcpy(p, hdr, hdr->hdr_len);
1271 memcpy(p, btf->types_data, hdr->type_len);
1273 for (i = 0; i < btf->nr_types; i++) {
1274 t = p + btf->type_offs[i];
1275 /* btf_bswap_type_rest() relies on native t->info, so
1276 * we swap base type info after we swapped all the
1277 * additional information
1279 if (btf_bswap_type_rest(t))
1281 btf_bswap_type_base(t);
1286 memcpy(p, btf_strs_data(btf), hdr->str_len);
1296 const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
1298 struct btf *btf = (struct btf *)btf_ro;
1302 data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1304 return errno = -ENOMEM, NULL;
1306 btf->raw_size = data_sz;
1307 if (btf->swapped_endian)
1308 btf->raw_data_swapped = data;
1310 btf->raw_data = data;
1315 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1317 if (offset < btf->start_str_off)
1318 return btf__str_by_offset(btf->base_btf, offset);
1319 else if (offset - btf->start_str_off < btf->hdr->str_len)
1320 return btf_strs_data(btf) + (offset - btf->start_str_off);
1322 return errno = EINVAL, NULL;
1325 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1327 return btf__str_by_offset(btf, offset);
1330 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1332 struct bpf_btf_info btf_info;
1333 __u32 len = sizeof(btf_info);
1339 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
1340 * let's start with a sane default - 4KiB here - and resize it only if
1341 * bpf_obj_get_info_by_fd() needs a bigger buffer.
1344 ptr = malloc(last_size);
1346 return ERR_PTR(-ENOMEM);
1348 memset(&btf_info, 0, sizeof(btf_info));
1349 btf_info.btf = ptr_to_u64(ptr);
1350 btf_info.btf_size = last_size;
1351 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
1353 if (!err && btf_info.btf_size > last_size) {
1356 last_size = btf_info.btf_size;
1357 temp_ptr = realloc(ptr, last_size);
1359 btf = ERR_PTR(-ENOMEM);
1364 len = sizeof(btf_info);
1365 memset(&btf_info, 0, sizeof(btf_info));
1366 btf_info.btf = ptr_to_u64(ptr);
1367 btf_info.btf_size = last_size;
1369 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
1372 if (err || btf_info.btf_size > last_size) {
1373 btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1377 btf = btf_new(ptr, btf_info.btf_size, base_btf);
1384 int btf__get_from_id(__u32 id, struct btf **btf)
1390 btf_fd = bpf_btf_get_fd_by_id(id);
1392 return libbpf_err(-errno);
1394 res = btf_get_from_fd(btf_fd, NULL);
1395 err = libbpf_get_error(res);
1400 return libbpf_err(err);
1406 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
1407 __u32 expected_key_size, __u32 expected_value_size,
1408 __u32 *key_type_id, __u32 *value_type_id)
1410 const struct btf_type *container_type;
1411 const struct btf_member *key, *value;
1412 const size_t max_name = 256;
1413 char container_name[max_name];
1414 __s64 key_size, value_size;
1417 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) {
1418 pr_warn("map:%s length of '____btf_map_%s' is too long\n",
1419 map_name, map_name);
1420 return libbpf_err(-EINVAL);
1423 container_id = btf__find_by_name(btf, container_name);
1424 if (container_id < 0) {
1425 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1426 map_name, container_name);
1427 return libbpf_err(container_id);
1430 container_type = btf__type_by_id(btf, container_id);
1431 if (!container_type) {
1432 pr_warn("map:%s cannot find BTF type for container_id:%u\n",
1433 map_name, container_id);
1434 return libbpf_err(-EINVAL);
1437 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
1438 pr_warn("map:%s container_name:%s is an invalid container struct\n",
1439 map_name, container_name);
1440 return libbpf_err(-EINVAL);
1443 key = btf_members(container_type);
1446 key_size = btf__resolve_size(btf, key->type);
1448 pr_warn("map:%s invalid BTF key_type_size\n", map_name);
1449 return libbpf_err(key_size);
1452 if (expected_key_size != key_size) {
1453 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1454 map_name, (__u32)key_size, expected_key_size);
1455 return libbpf_err(-EINVAL);
1458 value_size = btf__resolve_size(btf, value->type);
1459 if (value_size < 0) {
1460 pr_warn("map:%s invalid BTF value_type_size\n", map_name);
1461 return libbpf_err(value_size);
1464 if (expected_value_size != value_size) {
1465 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1466 map_name, (__u32)value_size, expected_value_size);
1467 return libbpf_err(-EINVAL);
1470 *key_type_id = key->type;
1471 *value_type_id = value->type;
1476 static void btf_invalidate_raw_data(struct btf *btf)
1478 if (btf->raw_data) {
1479 free(btf->raw_data);
1480 btf->raw_data = NULL;
1482 if (btf->raw_data_swapped) {
1483 free(btf->raw_data_swapped);
1484 btf->raw_data_swapped = NULL;
1488 /* Ensure BTF is ready to be modified (by splitting into a three memory
1489 * regions for header, types, and strings). Also invalidate cached
1492 static int btf_ensure_modifiable(struct btf *btf)
1495 struct strset *set = NULL;
1498 if (btf_is_modifiable(btf)) {
1499 /* any BTF modification invalidates raw_data */
1500 btf_invalidate_raw_data(btf);
1504 /* split raw data into three memory regions */
1505 hdr = malloc(btf->hdr->hdr_len);
1506 types = malloc(btf->hdr->type_len);
1510 memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1511 memcpy(types, btf->types_data, btf->hdr->type_len);
1513 /* build lookup index for all strings */
1514 set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1520 /* only when everything was successful, update internal state */
1522 btf->types_data = types;
1523 btf->types_data_cap = btf->hdr->type_len;
1524 btf->strs_data = NULL;
1525 btf->strs_set = set;
1526 /* if BTF was created from scratch, all strings are guaranteed to be
1527 * unique and deduplicated
1529 if (btf->hdr->str_len == 0)
1530 btf->strs_deduped = true;
1531 if (!btf->base_btf && btf->hdr->str_len == 1)
1532 btf->strs_deduped = true;
1534 /* invalidate raw_data representation */
1535 btf_invalidate_raw_data(btf);
1546 /* Find an offset in BTF string section that corresponds to a given string *s*.
1548 * - >0 offset into string section, if string is found;
1549 * - -ENOENT, if string is not in the string section;
1550 * - <0, on any other error.
1552 int btf__find_str(struct btf *btf, const char *s)
1556 if (btf->base_btf) {
1557 off = btf__find_str(btf->base_btf, s);
1562 /* BTF needs to be in a modifiable state to build string lookup index */
1563 if (btf_ensure_modifiable(btf))
1564 return libbpf_err(-ENOMEM);
1566 off = strset__find_str(btf->strs_set, s);
1568 return libbpf_err(off);
1570 return btf->start_str_off + off;
1573 /* Add a string s to the BTF string section.
1575 * - > 0 offset into string section, on success;
1578 int btf__add_str(struct btf *btf, const char *s)
1582 if (btf->base_btf) {
1583 off = btf__find_str(btf->base_btf, s);
1588 if (btf_ensure_modifiable(btf))
1589 return libbpf_err(-ENOMEM);
1591 off = strset__add_str(btf->strs_set, s);
1593 return libbpf_err(off);
1595 btf->hdr->str_len = strset__data_size(btf->strs_set);
1597 return btf->start_str_off + off;
1600 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1602 return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1603 btf->hdr->type_len, UINT_MAX, add_sz);
1606 static void btf_type_inc_vlen(struct btf_type *t)
1608 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1611 static int btf_commit_type(struct btf *btf, int data_sz)
1615 err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1617 return libbpf_err(err);
1619 btf->hdr->type_len += data_sz;
1620 btf->hdr->str_off += data_sz;
1622 return btf->start_id + btf->nr_types - 1;
1626 const struct btf *src;
1630 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1632 struct btf_pipe *p = ctx;
1635 if (!*str_off) /* nothing to do for empty strings */
1638 off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1646 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1648 struct btf_pipe p = { .src = src_btf, .dst = btf };
1652 sz = btf_type_size(src_type);
1654 return libbpf_err(sz);
1656 /* deconstruct BTF, if necessary, and invalidate raw_data */
1657 if (btf_ensure_modifiable(btf))
1658 return libbpf_err(-ENOMEM);
1660 t = btf_add_type_mem(btf, sz);
1662 return libbpf_err(-ENOMEM);
1664 memcpy(t, src_type, sz);
1666 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1668 return libbpf_err(err);
1670 return btf_commit_type(btf, sz);
1674 * Append new BTF_KIND_INT type with:
1675 * - *name* - non-empty, non-NULL type name;
1676 * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1677 * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1679 * - >0, type ID of newly added BTF type;
1682 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1687 /* non-empty name */
1688 if (!name || !name[0])
1689 return libbpf_err(-EINVAL);
1690 /* byte_sz must be power of 2 */
1691 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1692 return libbpf_err(-EINVAL);
1693 if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1694 return libbpf_err(-EINVAL);
1696 /* deconstruct BTF, if necessary, and invalidate raw_data */
1697 if (btf_ensure_modifiable(btf))
1698 return libbpf_err(-ENOMEM);
1700 sz = sizeof(struct btf_type) + sizeof(int);
1701 t = btf_add_type_mem(btf, sz);
1703 return libbpf_err(-ENOMEM);
1705 /* if something goes wrong later, we might end up with an extra string,
1706 * but that shouldn't be a problem, because BTF can't be constructed
1707 * completely anyway and will most probably be just discarded
1709 name_off = btf__add_str(btf, name);
1713 t->name_off = name_off;
1714 t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1716 /* set INT info, we don't allow setting legacy bit offset/size */
1717 *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1719 return btf_commit_type(btf, sz);
1723 * Append new BTF_KIND_FLOAT type with:
1724 * - *name* - non-empty, non-NULL type name;
1725 * - *sz* - size of the type, in bytes;
1727 * - >0, type ID of newly added BTF type;
1730 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1735 /* non-empty name */
1736 if (!name || !name[0])
1737 return libbpf_err(-EINVAL);
1739 /* byte_sz must be one of the explicitly allowed values */
1740 if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1742 return libbpf_err(-EINVAL);
1744 if (btf_ensure_modifiable(btf))
1745 return libbpf_err(-ENOMEM);
1747 sz = sizeof(struct btf_type);
1748 t = btf_add_type_mem(btf, sz);
1750 return libbpf_err(-ENOMEM);
1752 name_off = btf__add_str(btf, name);
1756 t->name_off = name_off;
1757 t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
1760 return btf_commit_type(btf, sz);
1763 /* it's completely legal to append BTF types with type IDs pointing forward to
1764 * types that haven't been appended yet, so we only make sure that id looks
1765 * sane, we can't guarantee that ID will always be valid
1767 static int validate_type_id(int id)
1769 if (id < 0 || id > BTF_MAX_NR_TYPES)
1774 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
1775 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
1778 int sz, name_off = 0;
1780 if (validate_type_id(ref_type_id))
1781 return libbpf_err(-EINVAL);
1783 if (btf_ensure_modifiable(btf))
1784 return libbpf_err(-ENOMEM);
1786 sz = sizeof(struct btf_type);
1787 t = btf_add_type_mem(btf, sz);
1789 return libbpf_err(-ENOMEM);
1791 if (name && name[0]) {
1792 name_off = btf__add_str(btf, name);
1797 t->name_off = name_off;
1798 t->info = btf_type_info(kind, 0, 0);
1799 t->type = ref_type_id;
1801 return btf_commit_type(btf, sz);
1805 * Append new BTF_KIND_PTR type with:
1806 * - *ref_type_id* - referenced type ID, it might not exist yet;
1808 * - >0, type ID of newly added BTF type;
1811 int btf__add_ptr(struct btf *btf, int ref_type_id)
1813 return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
1817 * Append new BTF_KIND_ARRAY type with:
1818 * - *index_type_id* - type ID of the type describing array index;
1819 * - *elem_type_id* - type ID of the type describing array element;
1820 * - *nr_elems* - the size of the array;
1822 * - >0, type ID of newly added BTF type;
1825 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
1828 struct btf_array *a;
1831 if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
1832 return libbpf_err(-EINVAL);
1834 if (btf_ensure_modifiable(btf))
1835 return libbpf_err(-ENOMEM);
1837 sz = sizeof(struct btf_type) + sizeof(struct btf_array);
1838 t = btf_add_type_mem(btf, sz);
1840 return libbpf_err(-ENOMEM);
1843 t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
1847 a->type = elem_type_id;
1848 a->index_type = index_type_id;
1849 a->nelems = nr_elems;
1851 return btf_commit_type(btf, sz);
1854 /* generic STRUCT/UNION append function */
1855 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
1858 int sz, name_off = 0;
1860 if (btf_ensure_modifiable(btf))
1861 return libbpf_err(-ENOMEM);
1863 sz = sizeof(struct btf_type);
1864 t = btf_add_type_mem(btf, sz);
1866 return libbpf_err(-ENOMEM);
1868 if (name && name[0]) {
1869 name_off = btf__add_str(btf, name);
1874 /* start out with vlen=0 and no kflag; this will be adjusted when
1875 * adding each member
1877 t->name_off = name_off;
1878 t->info = btf_type_info(kind, 0, 0);
1881 return btf_commit_type(btf, sz);
1885 * Append new BTF_KIND_STRUCT type with:
1886 * - *name* - name of the struct, can be NULL or empty for anonymous structs;
1887 * - *byte_sz* - size of the struct, in bytes;
1889 * Struct initially has no fields in it. Fields can be added by
1890 * btf__add_field() right after btf__add_struct() succeeds.
1893 * - >0, type ID of newly added BTF type;
1896 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
1898 return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
1902 * Append new BTF_KIND_UNION type with:
1903 * - *name* - name of the union, can be NULL or empty for anonymous union;
1904 * - *byte_sz* - size of the union, in bytes;
1906 * Union initially has no fields in it. Fields can be added by
1907 * btf__add_field() right after btf__add_union() succeeds. All fields
1908 * should have *bit_offset* of 0.
1911 * - >0, type ID of newly added BTF type;
1914 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
1916 return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
1919 static struct btf_type *btf_last_type(struct btf *btf)
1921 return btf_type_by_id(btf, btf__get_nr_types(btf));
1925 * Append new field for the current STRUCT/UNION type with:
1926 * - *name* - name of the field, can be NULL or empty for anonymous field;
1927 * - *type_id* - type ID for the type describing field type;
1928 * - *bit_offset* - bit offset of the start of the field within struct/union;
1929 * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
1934 int btf__add_field(struct btf *btf, const char *name, int type_id,
1935 __u32 bit_offset, __u32 bit_size)
1938 struct btf_member *m;
1940 int sz, name_off = 0;
1942 /* last type should be union/struct */
1943 if (btf->nr_types == 0)
1944 return libbpf_err(-EINVAL);
1945 t = btf_last_type(btf);
1946 if (!btf_is_composite(t))
1947 return libbpf_err(-EINVAL);
1949 if (validate_type_id(type_id))
1950 return libbpf_err(-EINVAL);
1951 /* best-effort bit field offset/size enforcement */
1952 is_bitfield = bit_size || (bit_offset % 8 != 0);
1953 if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
1954 return libbpf_err(-EINVAL);
1956 /* only offset 0 is allowed for unions */
1957 if (btf_is_union(t) && bit_offset)
1958 return libbpf_err(-EINVAL);
1960 /* decompose and invalidate raw data */
1961 if (btf_ensure_modifiable(btf))
1962 return libbpf_err(-ENOMEM);
1964 sz = sizeof(struct btf_member);
1965 m = btf_add_type_mem(btf, sz);
1967 return libbpf_err(-ENOMEM);
1969 if (name && name[0]) {
1970 name_off = btf__add_str(btf, name);
1975 m->name_off = name_off;
1977 m->offset = bit_offset | (bit_size << 24);
1979 /* btf_add_type_mem can invalidate t pointer */
1980 t = btf_last_type(btf);
1981 /* update parent type's vlen and kflag */
1982 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
1984 btf->hdr->type_len += sz;
1985 btf->hdr->str_off += sz;
1990 * Append new BTF_KIND_ENUM type with:
1991 * - *name* - name of the enum, can be NULL or empty for anonymous enums;
1992 * - *byte_sz* - size of the enum, in bytes.
1994 * Enum initially has no enum values in it (and corresponds to enum forward
1995 * declaration). Enumerator values can be added by btf__add_enum_value()
1996 * immediately after btf__add_enum() succeeds.
1999 * - >0, type ID of newly added BTF type;
2002 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2005 int sz, name_off = 0;
2007 /* byte_sz must be power of 2 */
2008 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2009 return libbpf_err(-EINVAL);
2011 if (btf_ensure_modifiable(btf))
2012 return libbpf_err(-ENOMEM);
2014 sz = sizeof(struct btf_type);
2015 t = btf_add_type_mem(btf, sz);
2017 return libbpf_err(-ENOMEM);
2019 if (name && name[0]) {
2020 name_off = btf__add_str(btf, name);
2025 /* start out with vlen=0; it will be adjusted when adding enum values */
2026 t->name_off = name_off;
2027 t->info = btf_type_info(BTF_KIND_ENUM, 0, 0);
2030 return btf_commit_type(btf, sz);
2034 * Append new enum value for the current ENUM type with:
2035 * - *name* - name of the enumerator value, can't be NULL or empty;
2036 * - *value* - integer value corresponding to enum value *name*;
2041 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2047 /* last type should be BTF_KIND_ENUM */
2048 if (btf->nr_types == 0)
2049 return libbpf_err(-EINVAL);
2050 t = btf_last_type(btf);
2051 if (!btf_is_enum(t))
2052 return libbpf_err(-EINVAL);
2054 /* non-empty name */
2055 if (!name || !name[0])
2056 return libbpf_err(-EINVAL);
2057 if (value < INT_MIN || value > UINT_MAX)
2058 return libbpf_err(-E2BIG);
2060 /* decompose and invalidate raw data */
2061 if (btf_ensure_modifiable(btf))
2062 return libbpf_err(-ENOMEM);
2064 sz = sizeof(struct btf_enum);
2065 v = btf_add_type_mem(btf, sz);
2067 return libbpf_err(-ENOMEM);
2069 name_off = btf__add_str(btf, name);
2073 v->name_off = name_off;
2076 /* update parent type's vlen */
2077 t = btf_last_type(btf);
2078 btf_type_inc_vlen(t);
2080 btf->hdr->type_len += sz;
2081 btf->hdr->str_off += sz;
2086 * Append new BTF_KIND_FWD type with:
2087 * - *name*, non-empty/non-NULL name;
2088 * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2089 * BTF_FWD_UNION, or BTF_FWD_ENUM;
2091 * - >0, type ID of newly added BTF type;
2094 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2096 if (!name || !name[0])
2097 return libbpf_err(-EINVAL);
2100 case BTF_FWD_STRUCT:
2101 case BTF_FWD_UNION: {
2105 id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2108 t = btf_type_by_id(btf, id);
2109 t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2113 /* enum forward in BTF currently is just an enum with no enum
2114 * values; we also assume a standard 4-byte size for it
2116 return btf__add_enum(btf, name, sizeof(int));
2118 return libbpf_err(-EINVAL);
2123 * Append new BTF_KING_TYPEDEF type with:
2124 * - *name*, non-empty/non-NULL name;
2125 * - *ref_type_id* - referenced type ID, it might not exist yet;
2127 * - >0, type ID of newly added BTF type;
2130 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2132 if (!name || !name[0])
2133 return libbpf_err(-EINVAL);
2135 return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2139 * Append new BTF_KIND_VOLATILE type with:
2140 * - *ref_type_id* - referenced type ID, it might not exist yet;
2142 * - >0, type ID of newly added BTF type;
2145 int btf__add_volatile(struct btf *btf, int ref_type_id)
2147 return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2151 * Append new BTF_KIND_CONST type with:
2152 * - *ref_type_id* - referenced type ID, it might not exist yet;
2154 * - >0, type ID of newly added BTF type;
2157 int btf__add_const(struct btf *btf, int ref_type_id)
2159 return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2163 * Append new BTF_KIND_RESTRICT type with:
2164 * - *ref_type_id* - referenced type ID, it might not exist yet;
2166 * - >0, type ID of newly added BTF type;
2169 int btf__add_restrict(struct btf *btf, int ref_type_id)
2171 return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2175 * Append new BTF_KIND_FUNC type with:
2176 * - *name*, non-empty/non-NULL name;
2177 * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2179 * - >0, type ID of newly added BTF type;
2182 int btf__add_func(struct btf *btf, const char *name,
2183 enum btf_func_linkage linkage, int proto_type_id)
2187 if (!name || !name[0])
2188 return libbpf_err(-EINVAL);
2189 if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2190 linkage != BTF_FUNC_EXTERN)
2191 return libbpf_err(-EINVAL);
2193 id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2195 struct btf_type *t = btf_type_by_id(btf, id);
2197 t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2199 return libbpf_err(id);
2203 * Append new BTF_KIND_FUNC_PROTO with:
2204 * - *ret_type_id* - type ID for return result of a function.
2206 * Function prototype initially has no arguments, but they can be added by
2207 * btf__add_func_param() one by one, immediately after
2208 * btf__add_func_proto() succeeded.
2211 * - >0, type ID of newly added BTF type;
2214 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2219 if (validate_type_id(ret_type_id))
2220 return libbpf_err(-EINVAL);
2222 if (btf_ensure_modifiable(btf))
2223 return libbpf_err(-ENOMEM);
2225 sz = sizeof(struct btf_type);
2226 t = btf_add_type_mem(btf, sz);
2228 return libbpf_err(-ENOMEM);
2230 /* start out with vlen=0; this will be adjusted when adding enum
2231 * values, if necessary
2234 t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2235 t->type = ret_type_id;
2237 return btf_commit_type(btf, sz);
2241 * Append new function parameter for current FUNC_PROTO type with:
2242 * - *name* - parameter name, can be NULL or empty;
2243 * - *type_id* - type ID describing the type of the parameter.
2248 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2251 struct btf_param *p;
2252 int sz, name_off = 0;
2254 if (validate_type_id(type_id))
2255 return libbpf_err(-EINVAL);
2257 /* last type should be BTF_KIND_FUNC_PROTO */
2258 if (btf->nr_types == 0)
2259 return libbpf_err(-EINVAL);
2260 t = btf_last_type(btf);
2261 if (!btf_is_func_proto(t))
2262 return libbpf_err(-EINVAL);
2264 /* decompose and invalidate raw data */
2265 if (btf_ensure_modifiable(btf))
2266 return libbpf_err(-ENOMEM);
2268 sz = sizeof(struct btf_param);
2269 p = btf_add_type_mem(btf, sz);
2271 return libbpf_err(-ENOMEM);
2273 if (name && name[0]) {
2274 name_off = btf__add_str(btf, name);
2279 p->name_off = name_off;
2282 /* update parent type's vlen */
2283 t = btf_last_type(btf);
2284 btf_type_inc_vlen(t);
2286 btf->hdr->type_len += sz;
2287 btf->hdr->str_off += sz;
2292 * Append new BTF_KIND_VAR type with:
2293 * - *name* - non-empty/non-NULL name;
2294 * - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2295 * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2296 * - *type_id* - type ID of the type describing the type of the variable.
2298 * - >0, type ID of newly added BTF type;
2301 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2307 /* non-empty name */
2308 if (!name || !name[0])
2309 return libbpf_err(-EINVAL);
2310 if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2311 linkage != BTF_VAR_GLOBAL_EXTERN)
2312 return libbpf_err(-EINVAL);
2313 if (validate_type_id(type_id))
2314 return libbpf_err(-EINVAL);
2316 /* deconstruct BTF, if necessary, and invalidate raw_data */
2317 if (btf_ensure_modifiable(btf))
2318 return libbpf_err(-ENOMEM);
2320 sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2321 t = btf_add_type_mem(btf, sz);
2323 return libbpf_err(-ENOMEM);
2325 name_off = btf__add_str(btf, name);
2329 t->name_off = name_off;
2330 t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2334 v->linkage = linkage;
2336 return btf_commit_type(btf, sz);
2340 * Append new BTF_KIND_DATASEC type with:
2341 * - *name* - non-empty/non-NULL name;
2342 * - *byte_sz* - data section size, in bytes.
2344 * Data section is initially empty. Variables info can be added with
2345 * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2348 * - >0, type ID of newly added BTF type;
2351 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2356 /* non-empty name */
2357 if (!name || !name[0])
2358 return libbpf_err(-EINVAL);
2360 if (btf_ensure_modifiable(btf))
2361 return libbpf_err(-ENOMEM);
2363 sz = sizeof(struct btf_type);
2364 t = btf_add_type_mem(btf, sz);
2366 return libbpf_err(-ENOMEM);
2368 name_off = btf__add_str(btf, name);
2372 /* start with vlen=0, which will be update as var_secinfos are added */
2373 t->name_off = name_off;
2374 t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2377 return btf_commit_type(btf, sz);
2381 * Append new data section variable information entry for current DATASEC type:
2382 * - *var_type_id* - type ID, describing type of the variable;
2383 * - *offset* - variable offset within data section, in bytes;
2384 * - *byte_sz* - variable size, in bytes.
2390 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2393 struct btf_var_secinfo *v;
2396 /* last type should be BTF_KIND_DATASEC */
2397 if (btf->nr_types == 0)
2398 return libbpf_err(-EINVAL);
2399 t = btf_last_type(btf);
2400 if (!btf_is_datasec(t))
2401 return libbpf_err(-EINVAL);
2403 if (validate_type_id(var_type_id))
2404 return libbpf_err(-EINVAL);
2406 /* decompose and invalidate raw data */
2407 if (btf_ensure_modifiable(btf))
2408 return libbpf_err(-ENOMEM);
2410 sz = sizeof(struct btf_var_secinfo);
2411 v = btf_add_type_mem(btf, sz);
2413 return libbpf_err(-ENOMEM);
2415 v->type = var_type_id;
2419 /* update parent type's vlen */
2420 t = btf_last_type(btf);
2421 btf_type_inc_vlen(t);
2423 btf->hdr->type_len += sz;
2424 btf->hdr->str_off += sz;
2428 struct btf_ext_sec_setup_param {
2432 struct btf_ext_info *ext_info;
2436 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2437 struct btf_ext_sec_setup_param *ext_sec)
2439 const struct btf_ext_info_sec *sinfo;
2440 struct btf_ext_info *ext_info;
2441 __u32 info_left, record_size;
2442 /* The start of the info sec (including the __u32 record_size). */
2445 if (ext_sec->len == 0)
2448 if (ext_sec->off & 0x03) {
2449 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2454 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2455 info_left = ext_sec->len;
2457 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2458 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2459 ext_sec->desc, ext_sec->off, ext_sec->len);
2463 /* At least a record size */
2464 if (info_left < sizeof(__u32)) {
2465 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2469 /* The record size needs to meet the minimum standard */
2470 record_size = *(__u32 *)info;
2471 if (record_size < ext_sec->min_rec_size ||
2472 record_size & 0x03) {
2473 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2474 ext_sec->desc, record_size);
2478 sinfo = info + sizeof(__u32);
2479 info_left -= sizeof(__u32);
2481 /* If no records, return failure now so .BTF.ext won't be used. */
2483 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2488 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2489 __u64 total_record_size;
2492 if (info_left < sec_hdrlen) {
2493 pr_debug("%s section header is not found in .BTF.ext\n",
2498 num_records = sinfo->num_info;
2499 if (num_records == 0) {
2500 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2505 total_record_size = sec_hdrlen +
2506 (__u64)num_records * record_size;
2507 if (info_left < total_record_size) {
2508 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2513 info_left -= total_record_size;
2514 sinfo = (void *)sinfo + total_record_size;
2517 ext_info = ext_sec->ext_info;
2518 ext_info->len = ext_sec->len - sizeof(__u32);
2519 ext_info->rec_size = record_size;
2520 ext_info->info = info + sizeof(__u32);
2525 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2527 struct btf_ext_sec_setup_param param = {
2528 .off = btf_ext->hdr->func_info_off,
2529 .len = btf_ext->hdr->func_info_len,
2530 .min_rec_size = sizeof(struct bpf_func_info_min),
2531 .ext_info = &btf_ext->func_info,
2535 return btf_ext_setup_info(btf_ext, ¶m);
2538 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2540 struct btf_ext_sec_setup_param param = {
2541 .off = btf_ext->hdr->line_info_off,
2542 .len = btf_ext->hdr->line_info_len,
2543 .min_rec_size = sizeof(struct bpf_line_info_min),
2544 .ext_info = &btf_ext->line_info,
2545 .desc = "line_info",
2548 return btf_ext_setup_info(btf_ext, ¶m);
2551 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2553 struct btf_ext_sec_setup_param param = {
2554 .off = btf_ext->hdr->core_relo_off,
2555 .len = btf_ext->hdr->core_relo_len,
2556 .min_rec_size = sizeof(struct bpf_core_relo),
2557 .ext_info = &btf_ext->core_relo_info,
2558 .desc = "core_relo",
2561 return btf_ext_setup_info(btf_ext, ¶m);
2564 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2566 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2568 if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2569 data_size < hdr->hdr_len) {
2570 pr_debug("BTF.ext header not found");
2574 if (hdr->magic == bswap_16(BTF_MAGIC)) {
2575 pr_warn("BTF.ext in non-native endianness is not supported\n");
2577 } else if (hdr->magic != BTF_MAGIC) {
2578 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2582 if (hdr->version != BTF_VERSION) {
2583 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2588 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2592 if (data_size == hdr->hdr_len) {
2593 pr_debug("BTF.ext has no data\n");
2600 void btf_ext__free(struct btf_ext *btf_ext)
2602 if (IS_ERR_OR_NULL(btf_ext))
2604 free(btf_ext->data);
2608 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
2610 struct btf_ext *btf_ext;
2613 err = btf_ext_parse_hdr(data, size);
2615 return libbpf_err_ptr(err);
2617 btf_ext = calloc(1, sizeof(struct btf_ext));
2619 return libbpf_err_ptr(-ENOMEM);
2621 btf_ext->data_size = size;
2622 btf_ext->data = malloc(size);
2623 if (!btf_ext->data) {
2627 memcpy(btf_ext->data, data, size);
2629 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
2634 err = btf_ext_setup_func_info(btf_ext);
2638 err = btf_ext_setup_line_info(btf_ext);
2642 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) {
2647 err = btf_ext_setup_core_relos(btf_ext);
2653 btf_ext__free(btf_ext);
2654 return libbpf_err_ptr(err);
2660 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
2662 *size = btf_ext->data_size;
2663 return btf_ext->data;
2666 static int btf_ext_reloc_info(const struct btf *btf,
2667 const struct btf_ext_info *ext_info,
2668 const char *sec_name, __u32 insns_cnt,
2669 void **info, __u32 *cnt)
2671 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
2672 __u32 i, record_size, existing_len, records_len;
2673 struct btf_ext_info_sec *sinfo;
2674 const char *info_sec_name;
2678 record_size = ext_info->rec_size;
2679 sinfo = ext_info->info;
2680 remain_len = ext_info->len;
2681 while (remain_len > 0) {
2682 records_len = sinfo->num_info * record_size;
2683 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
2684 if (strcmp(info_sec_name, sec_name)) {
2685 remain_len -= sec_hdrlen + records_len;
2686 sinfo = (void *)sinfo + sec_hdrlen + records_len;
2690 existing_len = (*cnt) * record_size;
2691 data = realloc(*info, existing_len + records_len);
2693 return libbpf_err(-ENOMEM);
2695 memcpy(data + existing_len, sinfo->data, records_len);
2696 /* adjust insn_off only, the rest data will be passed
2699 for (i = 0; i < sinfo->num_info; i++) {
2702 insn_off = data + existing_len + (i * record_size);
2703 *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt;
2706 *cnt += sinfo->num_info;
2710 return libbpf_err(-ENOENT);
2713 int btf_ext__reloc_func_info(const struct btf *btf,
2714 const struct btf_ext *btf_ext,
2715 const char *sec_name, __u32 insns_cnt,
2716 void **func_info, __u32 *cnt)
2718 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
2719 insns_cnt, func_info, cnt);
2722 int btf_ext__reloc_line_info(const struct btf *btf,
2723 const struct btf_ext *btf_ext,
2724 const char *sec_name, __u32 insns_cnt,
2725 void **line_info, __u32 *cnt)
2727 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
2728 insns_cnt, line_info, cnt);
2731 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
2733 return btf_ext->func_info.rec_size;
2736 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
2738 return btf_ext->line_info.rec_size;
2743 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
2744 const struct btf_dedup_opts *opts);
2745 static void btf_dedup_free(struct btf_dedup *d);
2746 static int btf_dedup_prep(struct btf_dedup *d);
2747 static int btf_dedup_strings(struct btf_dedup *d);
2748 static int btf_dedup_prim_types(struct btf_dedup *d);
2749 static int btf_dedup_struct_types(struct btf_dedup *d);
2750 static int btf_dedup_ref_types(struct btf_dedup *d);
2751 static int btf_dedup_compact_types(struct btf_dedup *d);
2752 static int btf_dedup_remap_types(struct btf_dedup *d);
2755 * Deduplicate BTF types and strings.
2757 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
2758 * section with all BTF type descriptors and string data. It overwrites that
2759 * memory in-place with deduplicated types and strings without any loss of
2760 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
2761 * is provided, all the strings referenced from .BTF.ext section are honored
2762 * and updated to point to the right offsets after deduplication.
2764 * If function returns with error, type/string data might be garbled and should
2767 * More verbose and detailed description of both problem btf_dedup is solving,
2768 * as well as solution could be found at:
2769 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
2771 * Problem description and justification
2772 * =====================================
2774 * BTF type information is typically emitted either as a result of conversion
2775 * from DWARF to BTF or directly by compiler. In both cases, each compilation
2776 * unit contains information about a subset of all the types that are used
2777 * in an application. These subsets are frequently overlapping and contain a lot
2778 * of duplicated information when later concatenated together into a single
2779 * binary. This algorithm ensures that each unique type is represented by single
2780 * BTF type descriptor, greatly reducing resulting size of BTF data.
2782 * Compilation unit isolation and subsequent duplication of data is not the only
2783 * problem. The same type hierarchy (e.g., struct and all the type that struct
2784 * references) in different compilation units can be represented in BTF to
2785 * various degrees of completeness (or, rather, incompleteness) due to
2786 * struct/union forward declarations.
2788 * Let's take a look at an example, that we'll use to better understand the
2789 * problem (and solution). Suppose we have two compilation units, each using
2790 * same `struct S`, but each of them having incomplete type information about
2819 * In case of CU #1, BTF data will know only that `struct B` exist (but no
2820 * more), but will know the complete type information about `struct A`. While
2821 * for CU #2, it will know full type information about `struct B`, but will
2822 * only know about forward declaration of `struct A` (in BTF terms, it will
2823 * have `BTF_KIND_FWD` type descriptor with name `B`).
2825 * This compilation unit isolation means that it's possible that there is no
2826 * single CU with complete type information describing structs `S`, `A`, and
2827 * `B`. Also, we might get tons of duplicated and redundant type information.
2829 * Additional complication we need to keep in mind comes from the fact that
2830 * types, in general, can form graphs containing cycles, not just DAGs.
2832 * While algorithm does deduplication, it also merges and resolves type
2833 * information (unless disabled throught `struct btf_opts`), whenever possible.
2834 * E.g., in the example above with two compilation units having partial type
2835 * information for structs `A` and `B`, the output of algorithm will emit
2836 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
2837 * (as well as type information for `int` and pointers), as if they were defined
2838 * in a single compilation unit as:
2858 * Algorithm completes its work in 6 separate passes:
2860 * 1. Strings deduplication.
2861 * 2. Primitive types deduplication (int, enum, fwd).
2862 * 3. Struct/union types deduplication.
2863 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
2864 * protos, and const/volatile/restrict modifiers).
2865 * 5. Types compaction.
2866 * 6. Types remapping.
2868 * Algorithm determines canonical type descriptor, which is a single
2869 * representative type for each truly unique type. This canonical type is the
2870 * one that will go into final deduplicated BTF type information. For
2871 * struct/unions, it is also the type that algorithm will merge additional type
2872 * information into (while resolving FWDs), as it discovers it from data in
2873 * other CUs. Each input BTF type eventually gets either mapped to itself, if
2874 * that type is canonical, or to some other type, if that type is equivalent
2875 * and was chosen as canonical representative. This mapping is stored in
2876 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
2877 * FWD type got resolved to.
2879 * To facilitate fast discovery of canonical types, we also maintain canonical
2880 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
2881 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
2882 * that match that signature. With sufficiently good choice of type signature
2883 * hashing function, we can limit number of canonical types for each unique type
2884 * signature to a very small number, allowing to find canonical type for any
2885 * duplicated type very quickly.
2887 * Struct/union deduplication is the most critical part and algorithm for
2888 * deduplicating structs/unions is described in greater details in comments for
2889 * `btf_dedup_is_equiv` function.
2891 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
2892 const struct btf_dedup_opts *opts)
2894 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
2898 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
2899 return libbpf_err(-EINVAL);
2902 if (btf_ensure_modifiable(btf))
2903 return libbpf_err(-ENOMEM);
2905 err = btf_dedup_prep(d);
2907 pr_debug("btf_dedup_prep failed:%d\n", err);
2910 err = btf_dedup_strings(d);
2912 pr_debug("btf_dedup_strings failed:%d\n", err);
2915 err = btf_dedup_prim_types(d);
2917 pr_debug("btf_dedup_prim_types failed:%d\n", err);
2920 err = btf_dedup_struct_types(d);
2922 pr_debug("btf_dedup_struct_types failed:%d\n", err);
2925 err = btf_dedup_ref_types(d);
2927 pr_debug("btf_dedup_ref_types failed:%d\n", err);
2930 err = btf_dedup_compact_types(d);
2932 pr_debug("btf_dedup_compact_types failed:%d\n", err);
2935 err = btf_dedup_remap_types(d);
2937 pr_debug("btf_dedup_remap_types failed:%d\n", err);
2943 return libbpf_err(err);
2946 #define BTF_UNPROCESSED_ID ((__u32)-1)
2947 #define BTF_IN_PROGRESS_ID ((__u32)-2)
2950 /* .BTF section to be deduped in-place */
2953 * Optional .BTF.ext section. When provided, any strings referenced
2954 * from it will be taken into account when deduping strings
2956 struct btf_ext *btf_ext;
2958 * This is a map from any type's signature hash to a list of possible
2959 * canonical representative type candidates. Hash collisions are
2960 * ignored, so even types of various kinds can share same list of
2961 * candidates, which is fine because we rely on subsequent
2962 * btf_xxx_equal() checks to authoritatively verify type equality.
2964 struct hashmap *dedup_table;
2965 /* Canonical types map */
2967 /* Hypothetical mapping, used during type graph equivalence checks */
2972 /* Whether hypothetical mapping, if successful, would need to adjust
2973 * already canonicalized types (due to a new forward declaration to
2974 * concrete type resolution). In such case, during split BTF dedup
2975 * candidate type would still be considered as different, because base
2976 * BTF is considered to be immutable.
2978 bool hypot_adjust_canon;
2979 /* Various option modifying behavior of algorithm */
2980 struct btf_dedup_opts opts;
2981 /* temporary strings deduplication state */
2982 struct strset *strs_set;
2985 static long hash_combine(long h, long value)
2987 return h * 31 + value;
2990 #define for_each_dedup_cand(d, node, hash) \
2991 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
2993 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
2995 return hashmap__append(d->dedup_table,
2996 (void *)hash, (void *)(long)type_id);
2999 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3000 __u32 from_id, __u32 to_id)
3002 if (d->hypot_cnt == d->hypot_cap) {
3005 d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3006 new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3009 d->hypot_list = new_list;
3011 d->hypot_list[d->hypot_cnt++] = from_id;
3012 d->hypot_map[from_id] = to_id;
3016 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3020 for (i = 0; i < d->hypot_cnt; i++)
3021 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3023 d->hypot_adjust_canon = false;
3026 static void btf_dedup_free(struct btf_dedup *d)
3028 hashmap__free(d->dedup_table);
3029 d->dedup_table = NULL;
3035 d->hypot_map = NULL;
3037 free(d->hypot_list);
3038 d->hypot_list = NULL;
3043 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
3048 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
3053 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
3058 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
3059 const struct btf_dedup_opts *opts)
3061 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3062 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3063 int i, err = 0, type_cnt;
3066 return ERR_PTR(-ENOMEM);
3068 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
3069 /* dedup_table_size is now used only to force collisions in tests */
3070 if (opts && opts->dedup_table_size == 1)
3071 hash_fn = btf_dedup_collision_hash_fn;
3074 d->btf_ext = btf_ext;
3076 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3077 if (IS_ERR(d->dedup_table)) {
3078 err = PTR_ERR(d->dedup_table);
3079 d->dedup_table = NULL;
3083 type_cnt = btf__get_nr_types(btf) + 1;
3084 d->map = malloc(sizeof(__u32) * type_cnt);
3089 /* special BTF "void" type is made canonical immediately */
3091 for (i = 1; i < type_cnt; i++) {
3092 struct btf_type *t = btf_type_by_id(d->btf, i);
3094 /* VAR and DATASEC are never deduped and are self-canonical */
3095 if (btf_is_var(t) || btf_is_datasec(t))
3098 d->map[i] = BTF_UNPROCESSED_ID;
3101 d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3102 if (!d->hypot_map) {
3106 for (i = 0; i < type_cnt; i++)
3107 d->hypot_map[i] = BTF_UNPROCESSED_ID;
3112 return ERR_PTR(err);
3119 * Iterate over all possible places in .BTF and .BTF.ext that can reference
3120 * string and pass pointer to it to a provided callback `fn`.
3122 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3126 for (i = 0; i < d->btf->nr_types; i++) {
3127 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3129 r = btf_type_visit_str_offs(t, fn, ctx);
3137 r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3144 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3146 struct btf_dedup *d = ctx;
3147 __u32 str_off = *str_off_ptr;
3151 /* don't touch empty string or string in main BTF */
3152 if (str_off == 0 || str_off < d->btf->start_str_off)
3155 s = btf__str_by_offset(d->btf, str_off);
3156 if (d->btf->base_btf) {
3157 err = btf__find_str(d->btf->base_btf, s);
3166 off = strset__add_str(d->strs_set, s);
3170 *str_off_ptr = d->btf->start_str_off + off;
3175 * Dedup string and filter out those that are not referenced from either .BTF
3176 * or .BTF.ext (if provided) sections.
3178 * This is done by building index of all strings in BTF's string section,
3179 * then iterating over all entities that can reference strings (e.g., type
3180 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3181 * strings as used. After that all used strings are deduped and compacted into
3182 * sequential blob of memory and new offsets are calculated. Then all the string
3183 * references are iterated again and rewritten using new offsets.
3185 static int btf_dedup_strings(struct btf_dedup *d)
3189 if (d->btf->strs_deduped)
3192 d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3193 if (IS_ERR(d->strs_set)) {
3194 err = PTR_ERR(d->strs_set);
3198 if (!d->btf->base_btf) {
3199 /* insert empty string; we won't be looking it up during strings
3200 * dedup, but it's good to have it for generic BTF string lookups
3202 err = strset__add_str(d->strs_set, "");
3207 /* remap string offsets */
3208 err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3212 /* replace BTF string data and hash with deduped ones */
3213 strset__free(d->btf->strs_set);
3214 d->btf->hdr->str_len = strset__data_size(d->strs_set);
3215 d->btf->strs_set = d->strs_set;
3217 d->btf->strs_deduped = true;
3221 strset__free(d->strs_set);
3227 static long btf_hash_common(struct btf_type *t)
3231 h = hash_combine(0, t->name_off);
3232 h = hash_combine(h, t->info);
3233 h = hash_combine(h, t->size);
3237 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3239 return t1->name_off == t2->name_off &&
3240 t1->info == t2->info &&
3241 t1->size == t2->size;
3244 /* Calculate type signature hash of INT. */
3245 static long btf_hash_int(struct btf_type *t)
3247 __u32 info = *(__u32 *)(t + 1);
3250 h = btf_hash_common(t);
3251 h = hash_combine(h, info);
3255 /* Check structural equality of two INTs. */
3256 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
3260 if (!btf_equal_common(t1, t2))
3262 info1 = *(__u32 *)(t1 + 1);
3263 info2 = *(__u32 *)(t2 + 1);
3264 return info1 == info2;
3267 /* Calculate type signature hash of ENUM. */
3268 static long btf_hash_enum(struct btf_type *t)
3272 /* don't hash vlen and enum members to support enum fwd resolving */
3273 h = hash_combine(0, t->name_off);
3274 h = hash_combine(h, t->info & ~0xffff);
3275 h = hash_combine(h, t->size);
3279 /* Check structural equality of two ENUMs. */
3280 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3282 const struct btf_enum *m1, *m2;
3286 if (!btf_equal_common(t1, t2))
3289 vlen = btf_vlen(t1);
3292 for (i = 0; i < vlen; i++) {
3293 if (m1->name_off != m2->name_off || m1->val != m2->val)
3301 static inline bool btf_is_enum_fwd(struct btf_type *t)
3303 return btf_is_enum(t) && btf_vlen(t) == 0;
3306 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3308 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3309 return btf_equal_enum(t1, t2);
3310 /* ignore vlen when comparing */
3311 return t1->name_off == t2->name_off &&
3312 (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
3313 t1->size == t2->size;
3317 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3318 * as referenced type IDs equivalence is established separately during type
3319 * graph equivalence check algorithm.
3321 static long btf_hash_struct(struct btf_type *t)
3323 const struct btf_member *member = btf_members(t);
3324 __u32 vlen = btf_vlen(t);
3325 long h = btf_hash_common(t);
3328 for (i = 0; i < vlen; i++) {
3329 h = hash_combine(h, member->name_off);
3330 h = hash_combine(h, member->offset);
3331 /* no hashing of referenced type ID, it can be unresolved yet */
3338 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3339 * IDs. This check is performed during type graph equivalence check and
3340 * referenced types equivalence is checked separately.
3342 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3344 const struct btf_member *m1, *m2;
3348 if (!btf_equal_common(t1, t2))
3351 vlen = btf_vlen(t1);
3352 m1 = btf_members(t1);
3353 m2 = btf_members(t2);
3354 for (i = 0; i < vlen; i++) {
3355 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3364 * Calculate type signature hash of ARRAY, including referenced type IDs,
3365 * under assumption that they were already resolved to canonical type IDs and
3366 * are not going to change.
3368 static long btf_hash_array(struct btf_type *t)
3370 const struct btf_array *info = btf_array(t);
3371 long h = btf_hash_common(t);
3373 h = hash_combine(h, info->type);
3374 h = hash_combine(h, info->index_type);
3375 h = hash_combine(h, info->nelems);
3380 * Check exact equality of two ARRAYs, taking into account referenced
3381 * type IDs, under assumption that they were already resolved to canonical
3382 * type IDs and are not going to change.
3383 * This function is called during reference types deduplication to compare
3384 * ARRAY to potential canonical representative.
3386 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3388 const struct btf_array *info1, *info2;
3390 if (!btf_equal_common(t1, t2))
3393 info1 = btf_array(t1);
3394 info2 = btf_array(t2);
3395 return info1->type == info2->type &&
3396 info1->index_type == info2->index_type &&
3397 info1->nelems == info2->nelems;
3401 * Check structural compatibility of two ARRAYs, ignoring referenced type
3402 * IDs. This check is performed during type graph equivalence check and
3403 * referenced types equivalence is checked separately.
3405 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3407 if (!btf_equal_common(t1, t2))
3410 return btf_array(t1)->nelems == btf_array(t2)->nelems;
3414 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3415 * under assumption that they were already resolved to canonical type IDs and
3416 * are not going to change.
3418 static long btf_hash_fnproto(struct btf_type *t)
3420 const struct btf_param *member = btf_params(t);
3421 __u16 vlen = btf_vlen(t);
3422 long h = btf_hash_common(t);
3425 for (i = 0; i < vlen; i++) {
3426 h = hash_combine(h, member->name_off);
3427 h = hash_combine(h, member->type);
3434 * Check exact equality of two FUNC_PROTOs, taking into account referenced
3435 * type IDs, under assumption that they were already resolved to canonical
3436 * type IDs and are not going to change.
3437 * This function is called during reference types deduplication to compare
3438 * FUNC_PROTO to potential canonical representative.
3440 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3442 const struct btf_param *m1, *m2;
3446 if (!btf_equal_common(t1, t2))
3449 vlen = btf_vlen(t1);
3450 m1 = btf_params(t1);
3451 m2 = btf_params(t2);
3452 for (i = 0; i < vlen; i++) {
3453 if (m1->name_off != m2->name_off || m1->type != m2->type)
3462 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3463 * IDs. This check is performed during type graph equivalence check and
3464 * referenced types equivalence is checked separately.
3466 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3468 const struct btf_param *m1, *m2;
3472 /* skip return type ID */
3473 if (t1->name_off != t2->name_off || t1->info != t2->info)
3476 vlen = btf_vlen(t1);
3477 m1 = btf_params(t1);
3478 m2 = btf_params(t2);
3479 for (i = 0; i < vlen; i++) {
3480 if (m1->name_off != m2->name_off)
3488 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3489 * types and initializing the rest of the state (canonical type mapping) for
3490 * the fixed base BTF part.
3492 static int btf_dedup_prep(struct btf_dedup *d)
3498 if (!d->btf->base_btf)
3501 for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3502 t = btf_type_by_id(d->btf, type_id);
3504 /* all base BTF types are self-canonical by definition */
3505 d->map[type_id] = type_id;
3507 switch (btf_kind(t)) {
3509 case BTF_KIND_DATASEC:
3510 /* VAR and DATASEC are never hash/deduplicated */
3512 case BTF_KIND_CONST:
3513 case BTF_KIND_VOLATILE:
3514 case BTF_KIND_RESTRICT:
3517 case BTF_KIND_TYPEDEF:
3519 case BTF_KIND_FLOAT:
3520 h = btf_hash_common(t);
3523 h = btf_hash_int(t);
3526 h = btf_hash_enum(t);
3528 case BTF_KIND_STRUCT:
3529 case BTF_KIND_UNION:
3530 h = btf_hash_struct(t);
3532 case BTF_KIND_ARRAY:
3533 h = btf_hash_array(t);
3535 case BTF_KIND_FUNC_PROTO:
3536 h = btf_hash_fnproto(t);
3539 pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3542 if (btf_dedup_table_add(d, h, type_id))
3550 * Deduplicate primitive types, that can't reference other types, by calculating
3551 * their type signature hash and comparing them with any possible canonical
3552 * candidate. If no canonical candidate matches, type itself is marked as
3553 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3555 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3557 struct btf_type *t = btf_type_by_id(d->btf, type_id);
3558 struct hashmap_entry *hash_entry;
3559 struct btf_type *cand;
3560 /* if we don't find equivalent type, then we are canonical */
3561 __u32 new_id = type_id;
3565 switch (btf_kind(t)) {
3566 case BTF_KIND_CONST:
3567 case BTF_KIND_VOLATILE:
3568 case BTF_KIND_RESTRICT:
3570 case BTF_KIND_TYPEDEF:
3571 case BTF_KIND_ARRAY:
3572 case BTF_KIND_STRUCT:
3573 case BTF_KIND_UNION:
3575 case BTF_KIND_FUNC_PROTO:
3577 case BTF_KIND_DATASEC:
3581 h = btf_hash_int(t);
3582 for_each_dedup_cand(d, hash_entry, h) {
3583 cand_id = (__u32)(long)hash_entry->value;
3584 cand = btf_type_by_id(d->btf, cand_id);
3585 if (btf_equal_int(t, cand)) {
3593 h = btf_hash_enum(t);
3594 for_each_dedup_cand(d, hash_entry, h) {
3595 cand_id = (__u32)(long)hash_entry->value;
3596 cand = btf_type_by_id(d->btf, cand_id);
3597 if (btf_equal_enum(t, cand)) {
3601 if (d->opts.dont_resolve_fwds)
3603 if (btf_compat_enum(t, cand)) {
3604 if (btf_is_enum_fwd(t)) {
3605 /* resolve fwd to full enum */
3609 /* resolve canonical enum fwd to full enum */
3610 d->map[cand_id] = type_id;
3616 case BTF_KIND_FLOAT:
3617 h = btf_hash_common(t);
3618 for_each_dedup_cand(d, hash_entry, h) {
3619 cand_id = (__u32)(long)hash_entry->value;
3620 cand = btf_type_by_id(d->btf, cand_id);
3621 if (btf_equal_common(t, cand)) {
3632 d->map[type_id] = new_id;
3633 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
3639 static int btf_dedup_prim_types(struct btf_dedup *d)
3643 for (i = 0; i < d->btf->nr_types; i++) {
3644 err = btf_dedup_prim_type(d, d->btf->start_id + i);
3652 * Check whether type is already mapped into canonical one (could be to itself).
3654 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
3656 return d->map[type_id] <= BTF_MAX_NR_TYPES;
3660 * Resolve type ID into its canonical type ID, if any; otherwise return original
3661 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
3662 * STRUCT/UNION link and resolve it into canonical type ID as well.
3664 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
3666 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
3667 type_id = d->map[type_id];
3672 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
3675 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
3677 __u32 orig_type_id = type_id;
3679 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
3682 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
3683 type_id = d->map[type_id];
3685 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
3688 return orig_type_id;
3692 static inline __u16 btf_fwd_kind(struct btf_type *t)
3694 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
3697 /* Check if given two types are identical ARRAY definitions */
3698 static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
3700 struct btf_type *t1, *t2;
3702 t1 = btf_type_by_id(d->btf, id1);
3703 t2 = btf_type_by_id(d->btf, id2);
3704 if (!btf_is_array(t1) || !btf_is_array(t2))
3707 return btf_equal_array(t1, t2);
3711 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
3712 * call it "candidate graph" in this description for brevity) to a type graph
3713 * formed by (potential) canonical struct/union ("canonical graph" for brevity
3714 * here, though keep in mind that not all types in canonical graph are
3715 * necessarily canonical representatives themselves, some of them might be
3716 * duplicates or its uniqueness might not have been established yet).
3718 * - >0, if type graphs are equivalent;
3719 * - 0, if not equivalent;
3722 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
3723 * equivalence of BTF types at each step. If at any point BTF types in candidate
3724 * and canonical graphs are not compatible structurally, whole graphs are
3725 * incompatible. If types are structurally equivalent (i.e., all information
3726 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
3727 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
3728 * If a type references other types, then those referenced types are checked
3729 * for equivalence recursively.
3731 * During DFS traversal, if we find that for current `canon_id` type we
3732 * already have some mapping in hypothetical map, we check for two possible
3734 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
3735 * happen when type graphs have cycles. In this case we assume those two
3736 * types are equivalent.
3737 * - `canon_id` is mapped to different type. This is contradiction in our
3738 * hypothetical mapping, because same graph in canonical graph corresponds
3739 * to two different types in candidate graph, which for equivalent type
3740 * graphs shouldn't happen. This condition terminates equivalence check
3741 * with negative result.
3743 * If type graphs traversal exhausts types to check and find no contradiction,
3744 * then type graphs are equivalent.
3746 * When checking types for equivalence, there is one special case: FWD types.
3747 * If FWD type resolution is allowed and one of the types (either from canonical
3748 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
3749 * flag) and their names match, hypothetical mapping is updated to point from
3750 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
3751 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
3753 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
3754 * if there are two exactly named (or anonymous) structs/unions that are
3755 * compatible structurally, one of which has FWD field, while other is concrete
3756 * STRUCT/UNION, but according to C sources they are different structs/unions
3757 * that are referencing different types with the same name. This is extremely
3758 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
3759 * this logic is causing problems.
3761 * Doing FWD resolution means that both candidate and/or canonical graphs can
3762 * consists of portions of the graph that come from multiple compilation units.
3763 * This is due to the fact that types within single compilation unit are always
3764 * deduplicated and FWDs are already resolved, if referenced struct/union
3765 * definiton is available. So, if we had unresolved FWD and found corresponding
3766 * STRUCT/UNION, they will be from different compilation units. This
3767 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
3768 * type graph will likely have at least two different BTF types that describe
3769 * same type (e.g., most probably there will be two different BTF types for the
3770 * same 'int' primitive type) and could even have "overlapping" parts of type
3771 * graph that describe same subset of types.
3773 * This in turn means that our assumption that each type in canonical graph
3774 * must correspond to exactly one type in candidate graph might not hold
3775 * anymore and will make it harder to detect contradictions using hypothetical
3776 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
3777 * resolution only in canonical graph. FWDs in candidate graphs are never
3778 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
3780 * - Both types in canonical and candidate graphs are FWDs. If they are
3781 * structurally equivalent, then they can either be both resolved to the
3782 * same STRUCT/UNION or not resolved at all. In both cases they are
3783 * equivalent and there is no need to resolve FWD on candidate side.
3784 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
3785 * so nothing to resolve as well, algorithm will check equivalence anyway.
3786 * - Type in canonical graph is FWD, while type in candidate is concrete
3787 * STRUCT/UNION. In this case candidate graph comes from single compilation
3788 * unit, so there is exactly one BTF type for each unique C type. After
3789 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
3790 * in canonical graph mapping to single BTF type in candidate graph, but
3791 * because hypothetical mapping maps from canonical to candidate types, it's
3792 * alright, and we still maintain the property of having single `canon_id`
3793 * mapping to single `cand_id` (there could be two different `canon_id`
3794 * mapped to the same `cand_id`, but it's not contradictory).
3795 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
3796 * graph is FWD. In this case we are just going to check compatibility of
3797 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
3798 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
3799 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
3800 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
3803 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
3806 struct btf_type *cand_type;
3807 struct btf_type *canon_type;
3808 __u32 hypot_type_id;
3813 /* if both resolve to the same canonical, they must be equivalent */
3814 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
3817 canon_id = resolve_fwd_id(d, canon_id);
3819 hypot_type_id = d->hypot_map[canon_id];
3820 if (hypot_type_id <= BTF_MAX_NR_TYPES) {
3821 /* In some cases compiler will generate different DWARF types
3822 * for *identical* array type definitions and use them for
3823 * different fields within the *same* struct. This breaks type
3824 * equivalence check, which makes an assumption that candidate
3825 * types sub-graph has a consistent and deduped-by-compiler
3826 * types within a single CU. So work around that by explicitly
3827 * allowing identical array types here.
3829 return hypot_type_id == cand_id ||
3830 btf_dedup_identical_arrays(d, hypot_type_id, cand_id);
3833 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
3836 cand_type = btf_type_by_id(d->btf, cand_id);
3837 canon_type = btf_type_by_id(d->btf, canon_id);
3838 cand_kind = btf_kind(cand_type);
3839 canon_kind = btf_kind(canon_type);
3841 if (cand_type->name_off != canon_type->name_off)
3844 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
3845 if (!d->opts.dont_resolve_fwds
3846 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
3847 && cand_kind != canon_kind) {
3851 if (cand_kind == BTF_KIND_FWD) {
3852 real_kind = canon_kind;
3853 fwd_kind = btf_fwd_kind(cand_type);
3855 real_kind = cand_kind;
3856 fwd_kind = btf_fwd_kind(canon_type);
3857 /* we'd need to resolve base FWD to STRUCT/UNION */
3858 if (fwd_kind == real_kind && canon_id < d->btf->start_id)
3859 d->hypot_adjust_canon = true;
3861 return fwd_kind == real_kind;
3864 if (cand_kind != canon_kind)
3867 switch (cand_kind) {
3869 return btf_equal_int(cand_type, canon_type);
3872 if (d->opts.dont_resolve_fwds)
3873 return btf_equal_enum(cand_type, canon_type);
3875 return btf_compat_enum(cand_type, canon_type);
3878 case BTF_KIND_FLOAT:
3879 return btf_equal_common(cand_type, canon_type);
3881 case BTF_KIND_CONST:
3882 case BTF_KIND_VOLATILE:
3883 case BTF_KIND_RESTRICT:
3885 case BTF_KIND_TYPEDEF:
3887 if (cand_type->info != canon_type->info)
3889 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
3891 case BTF_KIND_ARRAY: {
3892 const struct btf_array *cand_arr, *canon_arr;
3894 if (!btf_compat_array(cand_type, canon_type))
3896 cand_arr = btf_array(cand_type);
3897 canon_arr = btf_array(canon_type);
3898 eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
3901 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
3904 case BTF_KIND_STRUCT:
3905 case BTF_KIND_UNION: {
3906 const struct btf_member *cand_m, *canon_m;
3909 if (!btf_shallow_equal_struct(cand_type, canon_type))
3911 vlen = btf_vlen(cand_type);
3912 cand_m = btf_members(cand_type);
3913 canon_m = btf_members(canon_type);
3914 for (i = 0; i < vlen; i++) {
3915 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
3925 case BTF_KIND_FUNC_PROTO: {
3926 const struct btf_param *cand_p, *canon_p;
3929 if (!btf_compat_fnproto(cand_type, canon_type))
3931 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
3934 vlen = btf_vlen(cand_type);
3935 cand_p = btf_params(cand_type);
3936 canon_p = btf_params(canon_type);
3937 for (i = 0; i < vlen; i++) {
3938 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
3954 * Use hypothetical mapping, produced by successful type graph equivalence
3955 * check, to augment existing struct/union canonical mapping, where possible.
3957 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
3958 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
3959 * it doesn't matter if FWD type was part of canonical graph or candidate one,
3960 * we are recording the mapping anyway. As opposed to carefulness required
3961 * for struct/union correspondence mapping (described below), for FWD resolution
3962 * it's not important, as by the time that FWD type (reference type) will be
3963 * deduplicated all structs/unions will be deduped already anyway.
3965 * Recording STRUCT/UNION mapping is purely a performance optimization and is
3966 * not required for correctness. It needs to be done carefully to ensure that
3967 * struct/union from candidate's type graph is not mapped into corresponding
3968 * struct/union from canonical type graph that itself hasn't been resolved into
3969 * canonical representative. The only guarantee we have is that canonical
3970 * struct/union was determined as canonical and that won't change. But any
3971 * types referenced through that struct/union fields could have been not yet
3972 * resolved, so in case like that it's too early to establish any kind of
3973 * correspondence between structs/unions.
3975 * No canonical correspondence is derived for primitive types (they are already
3976 * deduplicated completely already anyway) or reference types (they rely on
3977 * stability of struct/union canonical relationship for equivalence checks).
3979 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
3981 __u32 canon_type_id, targ_type_id;
3982 __u16 t_kind, c_kind;
3986 for (i = 0; i < d->hypot_cnt; i++) {
3987 canon_type_id = d->hypot_list[i];
3988 targ_type_id = d->hypot_map[canon_type_id];
3989 t_id = resolve_type_id(d, targ_type_id);
3990 c_id = resolve_type_id(d, canon_type_id);
3991 t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
3992 c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
3994 * Resolve FWD into STRUCT/UNION.
3995 * It's ok to resolve FWD into STRUCT/UNION that's not yet
3996 * mapped to canonical representative (as opposed to
3997 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
3998 * eventually that struct is going to be mapped and all resolved
3999 * FWDs will automatically resolve to correct canonical
4000 * representative. This will happen before ref type deduping,
4001 * which critically depends on stability of these mapping. This
4002 * stability is not a requirement for STRUCT/UNION equivalence
4006 /* if it's the split BTF case, we still need to point base FWD
4007 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4008 * will be resolved against base FWD. If we don't point base
4009 * canonical FWD to the resolved STRUCT/UNION, then all the
4010 * FWDs in split BTF won't be correctly resolved to a proper
4013 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4014 d->map[c_id] = t_id;
4016 /* if graph equivalence determined that we'd need to adjust
4017 * base canonical types, then we need to only point base FWDs
4018 * to STRUCTs/UNIONs and do no more modifications. For all
4019 * other purposes the type graphs were not equivalent.
4021 if (d->hypot_adjust_canon)
4024 if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4025 d->map[t_id] = c_id;
4027 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4028 c_kind != BTF_KIND_FWD &&
4029 is_type_mapped(d, c_id) &&
4030 !is_type_mapped(d, t_id)) {
4032 * as a perf optimization, we can map struct/union
4033 * that's part of type graph we just verified for
4034 * equivalence. We can do that for struct/union that has
4035 * canonical representative only, though.
4037 d->map[t_id] = c_id;
4043 * Deduplicate struct/union types.
4045 * For each struct/union type its type signature hash is calculated, taking
4046 * into account type's name, size, number, order and names of fields, but
4047 * ignoring type ID's referenced from fields, because they might not be deduped
4048 * completely until after reference types deduplication phase. This type hash
4049 * is used to iterate over all potential canonical types, sharing same hash.
4050 * For each canonical candidate we check whether type graphs that they form
4051 * (through referenced types in fields and so on) are equivalent using algorithm
4052 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4053 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4054 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4055 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4056 * potentially map other structs/unions to their canonical representatives,
4057 * if such relationship hasn't yet been established. This speeds up algorithm
4058 * by eliminating some of the duplicate work.
4060 * If no matching canonical representative was found, struct/union is marked
4061 * as canonical for itself and is added into btf_dedup->dedup_table hash map
4062 * for further look ups.
4064 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4066 struct btf_type *cand_type, *t;
4067 struct hashmap_entry *hash_entry;
4068 /* if we don't find equivalent type, then we are canonical */
4069 __u32 new_id = type_id;
4073 /* already deduped or is in process of deduping (loop detected) */
4074 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4077 t = btf_type_by_id(d->btf, type_id);
4080 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4083 h = btf_hash_struct(t);
4084 for_each_dedup_cand(d, hash_entry, h) {
4085 __u32 cand_id = (__u32)(long)hash_entry->value;
4089 * Even though btf_dedup_is_equiv() checks for
4090 * btf_shallow_equal_struct() internally when checking two
4091 * structs (unions) for equivalence, we need to guard here
4092 * from picking matching FWD type as a dedup candidate.
4093 * This can happen due to hash collision. In such case just
4094 * relying on btf_dedup_is_equiv() would lead to potentially
4095 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4096 * FWD and compatible STRUCT/UNION are considered equivalent.
4098 cand_type = btf_type_by_id(d->btf, cand_id);
4099 if (!btf_shallow_equal_struct(t, cand_type))
4102 btf_dedup_clear_hypot_map(d);
4103 eq = btf_dedup_is_equiv(d, type_id, cand_id);
4108 btf_dedup_merge_hypot_map(d);
4109 if (d->hypot_adjust_canon) /* not really equivalent */
4115 d->map[type_id] = new_id;
4116 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4122 static int btf_dedup_struct_types(struct btf_dedup *d)
4126 for (i = 0; i < d->btf->nr_types; i++) {
4127 err = btf_dedup_struct_type(d, d->btf->start_id + i);
4135 * Deduplicate reference type.
4137 * Once all primitive and struct/union types got deduplicated, we can easily
4138 * deduplicate all other (reference) BTF types. This is done in two steps:
4140 * 1. Resolve all referenced type IDs into their canonical type IDs. This
4141 * resolution can be done either immediately for primitive or struct/union types
4142 * (because they were deduped in previous two phases) or recursively for
4143 * reference types. Recursion will always terminate at either primitive or
4144 * struct/union type, at which point we can "unwind" chain of reference types
4145 * one by one. There is no danger of encountering cycles because in C type
4146 * system the only way to form type cycle is through struct/union, so any chain
4147 * of reference types, even those taking part in a type cycle, will inevitably
4148 * reach struct/union at some point.
4150 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4151 * becomes "stable", in the sense that no further deduplication will cause
4152 * any changes to it. With that, it's now possible to calculate type's signature
4153 * hash (this time taking into account referenced type IDs) and loop over all
4154 * potential canonical representatives. If no match was found, current type
4155 * will become canonical representative of itself and will be added into
4156 * btf_dedup->dedup_table as another possible canonical representative.
4158 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4160 struct hashmap_entry *hash_entry;
4161 __u32 new_id = type_id, cand_id;
4162 struct btf_type *t, *cand;
4163 /* if we don't find equivalent type, then we are representative type */
4167 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4169 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4170 return resolve_type_id(d, type_id);
4172 t = btf_type_by_id(d->btf, type_id);
4173 d->map[type_id] = BTF_IN_PROGRESS_ID;
4175 switch (btf_kind(t)) {
4176 case BTF_KIND_CONST:
4177 case BTF_KIND_VOLATILE:
4178 case BTF_KIND_RESTRICT:
4180 case BTF_KIND_TYPEDEF:
4182 ref_type_id = btf_dedup_ref_type(d, t->type);
4183 if (ref_type_id < 0)
4185 t->type = ref_type_id;
4187 h = btf_hash_common(t);
4188 for_each_dedup_cand(d, hash_entry, h) {
4189 cand_id = (__u32)(long)hash_entry->value;
4190 cand = btf_type_by_id(d->btf, cand_id);
4191 if (btf_equal_common(t, cand)) {
4198 case BTF_KIND_ARRAY: {
4199 struct btf_array *info = btf_array(t);
4201 ref_type_id = btf_dedup_ref_type(d, info->type);
4202 if (ref_type_id < 0)
4204 info->type = ref_type_id;
4206 ref_type_id = btf_dedup_ref_type(d, info->index_type);
4207 if (ref_type_id < 0)
4209 info->index_type = ref_type_id;
4211 h = btf_hash_array(t);
4212 for_each_dedup_cand(d, hash_entry, h) {
4213 cand_id = (__u32)(long)hash_entry->value;
4214 cand = btf_type_by_id(d->btf, cand_id);
4215 if (btf_equal_array(t, cand)) {
4223 case BTF_KIND_FUNC_PROTO: {
4224 struct btf_param *param;
4228 ref_type_id = btf_dedup_ref_type(d, t->type);
4229 if (ref_type_id < 0)
4231 t->type = ref_type_id;
4234 param = btf_params(t);
4235 for (i = 0; i < vlen; i++) {
4236 ref_type_id = btf_dedup_ref_type(d, param->type);
4237 if (ref_type_id < 0)
4239 param->type = ref_type_id;
4243 h = btf_hash_fnproto(t);
4244 for_each_dedup_cand(d, hash_entry, h) {
4245 cand_id = (__u32)(long)hash_entry->value;
4246 cand = btf_type_by_id(d->btf, cand_id);
4247 if (btf_equal_fnproto(t, cand)) {
4259 d->map[type_id] = new_id;
4260 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4266 static int btf_dedup_ref_types(struct btf_dedup *d)
4270 for (i = 0; i < d->btf->nr_types; i++) {
4271 err = btf_dedup_ref_type(d, d->btf->start_id + i);
4275 /* we won't need d->dedup_table anymore */
4276 hashmap__free(d->dedup_table);
4277 d->dedup_table = NULL;
4284 * After we established for each type its corresponding canonical representative
4285 * type, we now can eliminate types that are not canonical and leave only
4286 * canonical ones layed out sequentially in memory by copying them over
4287 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4288 * a map from original type ID to a new compacted type ID, which will be used
4289 * during next phase to "fix up" type IDs, referenced from struct/union and
4292 static int btf_dedup_compact_types(struct btf_dedup *d)
4295 __u32 next_type_id = d->btf->start_id;
4296 const struct btf_type *t;
4300 /* we are going to reuse hypot_map to store compaction remapping */
4301 d->hypot_map[0] = 0;
4302 /* base BTF types are not renumbered */
4303 for (id = 1; id < d->btf->start_id; id++)
4304 d->hypot_map[id] = id;
4305 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4306 d->hypot_map[id] = BTF_UNPROCESSED_ID;
4308 p = d->btf->types_data;
4310 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4311 if (d->map[id] != id)
4314 t = btf__type_by_id(d->btf, id);
4315 len = btf_type_size(t);
4320 d->hypot_map[id] = next_type_id;
4321 d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4326 /* shrink struct btf's internal types index and update btf_header */
4327 d->btf->nr_types = next_type_id - d->btf->start_id;
4328 d->btf->type_offs_cap = d->btf->nr_types;
4329 d->btf->hdr->type_len = p - d->btf->types_data;
4330 new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4332 if (d->btf->type_offs_cap && !new_offs)
4334 d->btf->type_offs = new_offs;
4335 d->btf->hdr->str_off = d->btf->hdr->type_len;
4336 d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4341 * Figure out final (deduplicated and compacted) type ID for provided original
4342 * `type_id` by first resolving it into corresponding canonical type ID and
4343 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4344 * which is populated during compaction phase.
4346 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4348 struct btf_dedup *d = ctx;
4349 __u32 resolved_type_id, new_type_id;
4351 resolved_type_id = resolve_type_id(d, *type_id);
4352 new_type_id = d->hypot_map[resolved_type_id];
4353 if (new_type_id > BTF_MAX_NR_TYPES)
4356 *type_id = new_type_id;
4361 * Remap referenced type IDs into deduped type IDs.
4363 * After BTF types are deduplicated and compacted, their final type IDs may
4364 * differ from original ones. The map from original to a corresponding
4365 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4366 * compaction phase. During remapping phase we are rewriting all type IDs
4367 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4368 * their final deduped type IDs.
4370 static int btf_dedup_remap_types(struct btf_dedup *d)
4374 for (i = 0; i < d->btf->nr_types; i++) {
4375 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4377 r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
4385 r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4393 * Probe few well-known locations for vmlinux kernel image and try to load BTF
4394 * data out of it to use for target BTF.
4396 struct btf *libbpf_find_kernel_btf(void)
4399 const char *path_fmt;
4402 /* try canonical vmlinux BTF through sysfs first */
4403 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
4404 /* fall back to trying to find vmlinux ELF on disk otherwise */
4405 { "/boot/vmlinux-%1$s" },
4406 { "/lib/modules/%1$s/vmlinux-%1$s" },
4407 { "/lib/modules/%1$s/build/vmlinux" },
4408 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
4409 { "/usr/lib/debug/boot/vmlinux-%1$s" },
4410 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
4411 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
4413 char path[PATH_MAX + 1];
4420 for (i = 0; i < ARRAY_SIZE(locations); i++) {
4421 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
4423 if (access(path, R_OK))
4426 if (locations[i].raw_btf)
4427 btf = btf__parse_raw(path);
4429 btf = btf__parse_elf(path, NULL);
4430 err = libbpf_get_error(btf);
4431 pr_debug("loading kernel BTF '%s': %d\n", path, err);
4438 pr_warn("failed to find valid kernel BTF\n");
4439 return libbpf_err_ptr(-ESRCH);
4442 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
4446 switch (btf_kind(t)) {
4448 case BTF_KIND_FLOAT:
4453 case BTF_KIND_CONST:
4454 case BTF_KIND_VOLATILE:
4455 case BTF_KIND_RESTRICT:
4457 case BTF_KIND_TYPEDEF:
4460 return visit(&t->type, ctx);
4462 case BTF_KIND_ARRAY: {
4463 struct btf_array *a = btf_array(t);
4465 err = visit(&a->type, ctx);
4466 err = err ?: visit(&a->index_type, ctx);
4470 case BTF_KIND_STRUCT:
4471 case BTF_KIND_UNION: {
4472 struct btf_member *m = btf_members(t);
4474 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4475 err = visit(&m->type, ctx);
4482 case BTF_KIND_FUNC_PROTO: {
4483 struct btf_param *m = btf_params(t);
4485 err = visit(&t->type, ctx);
4488 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4489 err = visit(&m->type, ctx);
4496 case BTF_KIND_DATASEC: {
4497 struct btf_var_secinfo *m = btf_var_secinfos(t);
4499 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4500 err = visit(&m->type, ctx);
4512 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
4516 err = visit(&t->name_off, ctx);
4520 switch (btf_kind(t)) {
4521 case BTF_KIND_STRUCT:
4522 case BTF_KIND_UNION: {
4523 struct btf_member *m = btf_members(t);
4525 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4526 err = visit(&m->name_off, ctx);
4532 case BTF_KIND_ENUM: {
4533 struct btf_enum *m = btf_enum(t);
4535 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4536 err = visit(&m->name_off, ctx);
4542 case BTF_KIND_FUNC_PROTO: {
4543 struct btf_param *m = btf_params(t);
4545 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4546 err = visit(&m->name_off, ctx);
4559 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
4561 const struct btf_ext_info *seg;
4562 struct btf_ext_info_sec *sec;
4565 seg = &btf_ext->func_info;
4566 for_each_btf_ext_sec(seg, sec) {
4567 struct bpf_func_info_min *rec;
4569 for_each_btf_ext_rec(seg, sec, i, rec) {
4570 err = visit(&rec->type_id, ctx);
4576 seg = &btf_ext->core_relo_info;
4577 for_each_btf_ext_sec(seg, sec) {
4578 struct bpf_core_relo *rec;
4580 for_each_btf_ext_rec(seg, sec, i, rec) {
4581 err = visit(&rec->type_id, ctx);
4590 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
4592 const struct btf_ext_info *seg;
4593 struct btf_ext_info_sec *sec;
4596 seg = &btf_ext->func_info;
4597 for_each_btf_ext_sec(seg, sec) {
4598 err = visit(&sec->sec_name_off, ctx);
4603 seg = &btf_ext->line_info;
4604 for_each_btf_ext_sec(seg, sec) {
4605 struct bpf_line_info_min *rec;
4607 err = visit(&sec->sec_name_off, ctx);
4611 for_each_btf_ext_rec(seg, sec, i, rec) {
4612 err = visit(&rec->file_name_off, ctx);
4615 err = visit(&rec->line_off, ctx);
4621 seg = &btf_ext->core_relo_info;
4622 for_each_btf_ext_sec(seg, sec) {
4623 struct bpf_core_relo *rec;
4625 err = visit(&sec->sec_name_off, ctx);
4629 for_each_btf_ext_rec(seg, sec, i, rec) {
4630 err = visit(&rec->access_str_off, ctx);