1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
10 #include <linux/btf.h>
14 #include "libbpf_util.h"
16 #define max(a, b) ((a) > (b) ? (a) : (b))
17 #define min(a, b) ((a) < (b) ? (a) : (b))
19 #define BTF_MAX_NR_TYPES 0x7fffffff
20 #define BTF_MAX_STR_OFFSET 0x7fffffff
22 #define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
23 ((k) == BTF_KIND_VOLATILE) || \
24 ((k) == BTF_KIND_CONST) || \
25 ((k) == BTF_KIND_RESTRICT))
27 static struct btf_type btf_void;
31 struct btf_header *hdr;
34 struct btf_type **types;
45 * info points to the individual info section (e.g. func_info and
46 * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
55 struct btf_ext_header *hdr;
58 struct btf_ext_info func_info;
59 struct btf_ext_info line_info;
63 struct btf_ext_info_sec {
66 /* Followed by num_info * record_size number of bytes */
70 /* The minimum bpf_func_info checked by the loader */
71 struct bpf_func_info_min {
76 /* The minimum bpf_line_info checked by the loader */
77 struct bpf_line_info_min {
84 static inline __u64 ptr_to_u64(const void *ptr)
86 return (__u64) (unsigned long) ptr;
89 static int btf_add_type(struct btf *btf, struct btf_type *t)
91 if (btf->types_size - btf->nr_types < 2) {
92 struct btf_type **new_types;
93 __u32 expand_by, new_size;
95 if (btf->types_size == BTF_MAX_NR_TYPES)
98 expand_by = max(btf->types_size >> 2, 16);
99 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
101 new_types = realloc(btf->types, sizeof(*new_types) * new_size);
105 if (btf->nr_types == 0)
106 new_types[0] = &btf_void;
108 btf->types = new_types;
109 btf->types_size = new_size;
112 btf->types[++(btf->nr_types)] = t;
117 static int btf_parse_hdr(struct btf *btf)
119 const struct btf_header *hdr = btf->hdr;
122 if (btf->data_size < sizeof(struct btf_header)) {
123 pr_debug("BTF header not found\n");
127 if (hdr->magic != BTF_MAGIC) {
128 pr_debug("Invalid BTF magic:%x\n", hdr->magic);
132 if (hdr->version != BTF_VERSION) {
133 pr_debug("Unsupported BTF version:%u\n", hdr->version);
138 pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
142 meta_left = btf->data_size - sizeof(*hdr);
144 pr_debug("BTF has no data\n");
148 if (meta_left < hdr->type_off) {
149 pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
153 if (meta_left < hdr->str_off) {
154 pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
158 if (hdr->type_off >= hdr->str_off) {
159 pr_debug("BTF type section offset >= string section offset. No type?\n");
163 if (hdr->type_off & 0x02) {
164 pr_debug("BTF type section is not aligned to 4 bytes\n");
168 btf->nohdr_data = btf->hdr + 1;
173 static int btf_parse_str_sec(struct btf *btf)
175 const struct btf_header *hdr = btf->hdr;
176 const char *start = btf->nohdr_data + hdr->str_off;
177 const char *end = start + btf->hdr->str_len;
179 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
180 start[0] || end[-1]) {
181 pr_debug("Invalid BTF string section\n");
185 btf->strings = start;
190 static int btf_type_size(struct btf_type *t)
192 int base_size = sizeof(struct btf_type);
193 __u16 vlen = BTF_INFO_VLEN(t->info);
195 switch (BTF_INFO_KIND(t->info)) {
198 case BTF_KIND_VOLATILE:
199 case BTF_KIND_RESTRICT:
201 case BTF_KIND_TYPEDEF:
205 return base_size + sizeof(__u32);
207 return base_size + vlen * sizeof(struct btf_enum);
209 return base_size + sizeof(struct btf_array);
210 case BTF_KIND_STRUCT:
212 return base_size + vlen * sizeof(struct btf_member);
213 case BTF_KIND_FUNC_PROTO:
214 return base_size + vlen * sizeof(struct btf_param);
216 pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
221 static int btf_parse_type_sec(struct btf *btf)
223 struct btf_header *hdr = btf->hdr;
224 void *nohdr_data = btf->nohdr_data;
225 void *next_type = nohdr_data + hdr->type_off;
226 void *end_type = nohdr_data + hdr->str_off;
228 while (next_type < end_type) {
229 struct btf_type *t = next_type;
233 type_size = btf_type_size(t);
236 next_type += type_size;
237 err = btf_add_type(btf, t);
245 __u32 btf__get_nr_types(const struct btf *btf)
247 return btf->nr_types;
250 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
252 if (type_id > btf->nr_types)
255 return btf->types[type_id];
258 static bool btf_type_is_void(const struct btf_type *t)
260 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
263 static bool btf_type_is_void_or_null(const struct btf_type *t)
265 return !t || btf_type_is_void(t);
268 #define MAX_RESOLVE_DEPTH 32
270 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
272 const struct btf_array *array;
273 const struct btf_type *t;
278 t = btf__type_by_id(btf, type_id);
279 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
281 switch (BTF_INFO_KIND(t->info)) {
283 case BTF_KIND_STRUCT:
289 size = sizeof(void *);
291 case BTF_KIND_TYPEDEF:
292 case BTF_KIND_VOLATILE:
294 case BTF_KIND_RESTRICT:
298 array = (const struct btf_array *)(t + 1);
299 if (nelems && array->nelems > UINT32_MAX / nelems)
301 nelems *= array->nelems;
302 type_id = array->type;
308 t = btf__type_by_id(btf, type_id);
315 if (nelems && size > UINT32_MAX / nelems)
318 return nelems * size;
321 int btf__resolve_type(const struct btf *btf, __u32 type_id)
323 const struct btf_type *t;
326 t = btf__type_by_id(btf, type_id);
327 while (depth < MAX_RESOLVE_DEPTH &&
328 !btf_type_is_void_or_null(t) &&
329 IS_MODIFIER(BTF_INFO_KIND(t->info))) {
331 t = btf__type_by_id(btf, type_id);
335 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
341 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
345 if (!strcmp(type_name, "void"))
348 for (i = 1; i <= btf->nr_types; i++) {
349 const struct btf_type *t = btf->types[i];
350 const char *name = btf__name_by_offset(btf, t->name_off);
352 if (name && !strcmp(type_name, name))
359 void btf__free(struct btf *btf)
372 struct btf *btf__new(__u8 *data, __u32 size)
377 btf = calloc(1, sizeof(struct btf));
379 return ERR_PTR(-ENOMEM);
383 btf->data = malloc(size);
389 memcpy(btf->data, data, size);
390 btf->data_size = size;
392 err = btf_parse_hdr(btf);
396 err = btf_parse_str_sec(btf);
400 err = btf_parse_type_sec(btf);
411 int btf__load(struct btf *btf)
413 __u32 log_buf_size = BPF_LOG_BUF_SIZE;
414 char *log_buf = NULL;
420 log_buf = malloc(log_buf_size);
426 btf->fd = bpf_load_btf(btf->data, btf->data_size,
427 log_buf, log_buf_size, false);
430 pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
432 pr_warning("%s\n", log_buf);
441 int btf__fd(const struct btf *btf)
446 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
448 *size = btf->data_size;
452 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
454 if (offset < btf->hdr->str_len)
455 return &btf->strings[offset];
460 int btf__get_from_id(__u32 id, struct btf **btf)
462 struct bpf_btf_info btf_info = { 0 };
463 __u32 len = sizeof(btf_info);
471 btf_fd = bpf_btf_get_fd_by_id(id);
475 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
476 * let's start with a sane default - 4KiB here - and resize it only if
477 * bpf_obj_get_info_by_fd() needs a bigger buffer.
479 btf_info.btf_size = 4096;
480 last_size = btf_info.btf_size;
481 ptr = malloc(last_size);
487 memset(ptr, 0, last_size);
488 btf_info.btf = ptr_to_u64(ptr);
489 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
491 if (!err && btf_info.btf_size > last_size) {
494 last_size = btf_info.btf_size;
495 temp_ptr = realloc(ptr, last_size);
501 memset(ptr, 0, last_size);
502 btf_info.btf = ptr_to_u64(ptr);
503 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
506 if (err || btf_info.btf_size > last_size) {
511 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
524 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
525 __u32 expected_key_size, __u32 expected_value_size,
526 __u32 *key_type_id, __u32 *value_type_id)
528 const struct btf_type *container_type;
529 const struct btf_member *key, *value;
530 const size_t max_name = 256;
531 char container_name[max_name];
532 __s64 key_size, value_size;
535 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
537 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
542 container_id = btf__find_by_name(btf, container_name);
543 if (container_id < 0) {
544 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
545 map_name, container_name);
549 container_type = btf__type_by_id(btf, container_id);
550 if (!container_type) {
551 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
552 map_name, container_id);
556 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
557 BTF_INFO_VLEN(container_type->info) < 2) {
558 pr_warning("map:%s container_name:%s is an invalid container struct\n",
559 map_name, container_name);
563 key = (struct btf_member *)(container_type + 1);
566 key_size = btf__resolve_size(btf, key->type);
568 pr_warning("map:%s invalid BTF key_type_size\n", map_name);
572 if (expected_key_size != key_size) {
573 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
574 map_name, (__u32)key_size, expected_key_size);
578 value_size = btf__resolve_size(btf, value->type);
579 if (value_size < 0) {
580 pr_warning("map:%s invalid BTF value_type_size\n", map_name);
584 if (expected_value_size != value_size) {
585 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
586 map_name, (__u32)value_size, expected_value_size);
590 *key_type_id = key->type;
591 *value_type_id = value->type;
596 struct btf_ext_sec_setup_param {
600 struct btf_ext_info *ext_info;
604 static int btf_ext_setup_info(struct btf_ext *btf_ext,
605 struct btf_ext_sec_setup_param *ext_sec)
607 const struct btf_ext_info_sec *sinfo;
608 struct btf_ext_info *ext_info;
609 __u32 info_left, record_size;
610 /* The start of the info sec (including the __u32 record_size). */
613 if (ext_sec->off & 0x03) {
614 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
619 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
620 info_left = ext_sec->len;
622 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
623 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
624 ext_sec->desc, ext_sec->off, ext_sec->len);
628 /* At least a record size */
629 if (info_left < sizeof(__u32)) {
630 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
634 /* The record size needs to meet the minimum standard */
635 record_size = *(__u32 *)info;
636 if (record_size < ext_sec->min_rec_size ||
637 record_size & 0x03) {
638 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
639 ext_sec->desc, record_size);
643 sinfo = info + sizeof(__u32);
644 info_left -= sizeof(__u32);
646 /* If no records, return failure now so .BTF.ext won't be used. */
648 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
653 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
654 __u64 total_record_size;
657 if (info_left < sec_hdrlen) {
658 pr_debug("%s section header is not found in .BTF.ext\n",
663 num_records = sinfo->num_info;
664 if (num_records == 0) {
665 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
670 total_record_size = sec_hdrlen +
671 (__u64)num_records * record_size;
672 if (info_left < total_record_size) {
673 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
678 info_left -= total_record_size;
679 sinfo = (void *)sinfo + total_record_size;
682 ext_info = ext_sec->ext_info;
683 ext_info->len = ext_sec->len - sizeof(__u32);
684 ext_info->rec_size = record_size;
685 ext_info->info = info + sizeof(__u32);
690 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
692 struct btf_ext_sec_setup_param param = {
693 .off = btf_ext->hdr->func_info_off,
694 .len = btf_ext->hdr->func_info_len,
695 .min_rec_size = sizeof(struct bpf_func_info_min),
696 .ext_info = &btf_ext->func_info,
700 return btf_ext_setup_info(btf_ext, ¶m);
703 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
705 struct btf_ext_sec_setup_param param = {
706 .off = btf_ext->hdr->line_info_off,
707 .len = btf_ext->hdr->line_info_len,
708 .min_rec_size = sizeof(struct bpf_line_info_min),
709 .ext_info = &btf_ext->line_info,
713 return btf_ext_setup_info(btf_ext, ¶m);
716 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
718 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
720 if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
721 data_size < hdr->hdr_len) {
722 pr_debug("BTF.ext header not found");
726 if (hdr->magic != BTF_MAGIC) {
727 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
731 if (hdr->version != BTF_VERSION) {
732 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
737 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
741 if (data_size == hdr->hdr_len) {
742 pr_debug("BTF.ext has no data\n");
749 void btf_ext__free(struct btf_ext *btf_ext)
757 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
759 struct btf_ext *btf_ext;
762 err = btf_ext_parse_hdr(data, size);
766 btf_ext = calloc(1, sizeof(struct btf_ext));
768 return ERR_PTR(-ENOMEM);
770 btf_ext->data_size = size;
771 btf_ext->data = malloc(size);
772 if (!btf_ext->data) {
776 memcpy(btf_ext->data, data, size);
778 err = btf_ext_setup_func_info(btf_ext);
782 err = btf_ext_setup_line_info(btf_ext);
788 btf_ext__free(btf_ext);
795 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
797 *size = btf_ext->data_size;
798 return btf_ext->data;
801 static int btf_ext_reloc_info(const struct btf *btf,
802 const struct btf_ext_info *ext_info,
803 const char *sec_name, __u32 insns_cnt,
804 void **info, __u32 *cnt)
806 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
807 __u32 i, record_size, existing_len, records_len;
808 struct btf_ext_info_sec *sinfo;
809 const char *info_sec_name;
813 record_size = ext_info->rec_size;
814 sinfo = ext_info->info;
815 remain_len = ext_info->len;
816 while (remain_len > 0) {
817 records_len = sinfo->num_info * record_size;
818 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
819 if (strcmp(info_sec_name, sec_name)) {
820 remain_len -= sec_hdrlen + records_len;
821 sinfo = (void *)sinfo + sec_hdrlen + records_len;
825 existing_len = (*cnt) * record_size;
826 data = realloc(*info, existing_len + records_len);
830 memcpy(data + existing_len, sinfo->data, records_len);
831 /* adjust insn_off only, the rest data will be passed
834 for (i = 0; i < sinfo->num_info; i++) {
837 insn_off = data + existing_len + (i * record_size);
838 *insn_off = *insn_off / sizeof(struct bpf_insn) +
842 *cnt += sinfo->num_info;
849 int btf_ext__reloc_func_info(const struct btf *btf,
850 const struct btf_ext *btf_ext,
851 const char *sec_name, __u32 insns_cnt,
852 void **func_info, __u32 *cnt)
854 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
855 insns_cnt, func_info, cnt);
858 int btf_ext__reloc_line_info(const struct btf *btf,
859 const struct btf_ext *btf_ext,
860 const char *sec_name, __u32 insns_cnt,
861 void **line_info, __u32 *cnt)
863 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
864 insns_cnt, line_info, cnt);
867 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
869 return btf_ext->func_info.rec_size;
872 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
874 return btf_ext->line_info.rec_size;
879 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
880 const struct btf_dedup_opts *opts);
881 static void btf_dedup_free(struct btf_dedup *d);
882 static int btf_dedup_strings(struct btf_dedup *d);
883 static int btf_dedup_prim_types(struct btf_dedup *d);
884 static int btf_dedup_struct_types(struct btf_dedup *d);
885 static int btf_dedup_ref_types(struct btf_dedup *d);
886 static int btf_dedup_compact_types(struct btf_dedup *d);
887 static int btf_dedup_remap_types(struct btf_dedup *d);
890 * Deduplicate BTF types and strings.
892 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
893 * section with all BTF type descriptors and string data. It overwrites that
894 * memory in-place with deduplicated types and strings without any loss of
895 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
896 * is provided, all the strings referenced from .BTF.ext section are honored
897 * and updated to point to the right offsets after deduplication.
899 * If function returns with error, type/string data might be garbled and should
902 * More verbose and detailed description of both problem btf_dedup is solving,
903 * as well as solution could be found at:
904 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
906 * Problem description and justification
907 * =====================================
909 * BTF type information is typically emitted either as a result of conversion
910 * from DWARF to BTF or directly by compiler. In both cases, each compilation
911 * unit contains information about a subset of all the types that are used
912 * in an application. These subsets are frequently overlapping and contain a lot
913 * of duplicated information when later concatenated together into a single
914 * binary. This algorithm ensures that each unique type is represented by single
915 * BTF type descriptor, greatly reducing resulting size of BTF data.
917 * Compilation unit isolation and subsequent duplication of data is not the only
918 * problem. The same type hierarchy (e.g., struct and all the type that struct
919 * references) in different compilation units can be represented in BTF to
920 * various degrees of completeness (or, rather, incompleteness) due to
921 * struct/union forward declarations.
923 * Let's take a look at an example, that we'll use to better understand the
924 * problem (and solution). Suppose we have two compilation units, each using
925 * same `struct S`, but each of them having incomplete type information about
954 * In case of CU #1, BTF data will know only that `struct B` exist (but no
955 * more), but will know the complete type information about `struct A`. While
956 * for CU #2, it will know full type information about `struct B`, but will
957 * only know about forward declaration of `struct A` (in BTF terms, it will
958 * have `BTF_KIND_FWD` type descriptor with name `B`).
960 * This compilation unit isolation means that it's possible that there is no
961 * single CU with complete type information describing structs `S`, `A`, and
962 * `B`. Also, we might get tons of duplicated and redundant type information.
964 * Additional complication we need to keep in mind comes from the fact that
965 * types, in general, can form graphs containing cycles, not just DAGs.
967 * While algorithm does deduplication, it also merges and resolves type
968 * information (unless disabled throught `struct btf_opts`), whenever possible.
969 * E.g., in the example above with two compilation units having partial type
970 * information for structs `A` and `B`, the output of algorithm will emit
971 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
972 * (as well as type information for `int` and pointers), as if they were defined
973 * in a single compilation unit as:
993 * Algorithm completes its work in 6 separate passes:
995 * 1. Strings deduplication.
996 * 2. Primitive types deduplication (int, enum, fwd).
997 * 3. Struct/union types deduplication.
998 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
999 * protos, and const/volatile/restrict modifiers).
1000 * 5. Types compaction.
1001 * 6. Types remapping.
1003 * Algorithm determines canonical type descriptor, which is a single
1004 * representative type for each truly unique type. This canonical type is the
1005 * one that will go into final deduplicated BTF type information. For
1006 * struct/unions, it is also the type that algorithm will merge additional type
1007 * information into (while resolving FWDs), as it discovers it from data in
1008 * other CUs. Each input BTF type eventually gets either mapped to itself, if
1009 * that type is canonical, or to some other type, if that type is equivalent
1010 * and was chosen as canonical representative. This mapping is stored in
1011 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1012 * FWD type got resolved to.
1014 * To facilitate fast discovery of canonical types, we also maintain canonical
1015 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1016 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1017 * that match that signature. With sufficiently good choice of type signature
1018 * hashing function, we can limit number of canonical types for each unique type
1019 * signature to a very small number, allowing to find canonical type for any
1020 * duplicated type very quickly.
1022 * Struct/union deduplication is the most critical part and algorithm for
1023 * deduplicating structs/unions is described in greater details in comments for
1024 * `btf_dedup_is_equiv` function.
1026 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1027 const struct btf_dedup_opts *opts)
1029 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1033 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1037 err = btf_dedup_strings(d);
1039 pr_debug("btf_dedup_strings failed:%d\n", err);
1042 err = btf_dedup_prim_types(d);
1044 pr_debug("btf_dedup_prim_types failed:%d\n", err);
1047 err = btf_dedup_struct_types(d);
1049 pr_debug("btf_dedup_struct_types failed:%d\n", err);
1052 err = btf_dedup_ref_types(d);
1054 pr_debug("btf_dedup_ref_types failed:%d\n", err);
1057 err = btf_dedup_compact_types(d);
1059 pr_debug("btf_dedup_compact_types failed:%d\n", err);
1062 err = btf_dedup_remap_types(d);
1064 pr_debug("btf_dedup_remap_types failed:%d\n", err);
1073 #define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
1074 #define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
1075 #define BTF_UNPROCESSED_ID ((__u32)-1)
1076 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1078 struct btf_dedup_node {
1079 struct btf_dedup_node *next;
1084 /* .BTF section to be deduped in-place */
1087 * Optional .BTF.ext section. When provided, any strings referenced
1088 * from it will be taken into account when deduping strings
1090 struct btf_ext *btf_ext;
1092 * This is a map from any type's signature hash to a list of possible
1093 * canonical representative type candidates. Hash collisions are
1094 * ignored, so even types of various kinds can share same list of
1095 * candidates, which is fine because we rely on subsequent
1096 * btf_xxx_equal() checks to authoritatively verify type equality.
1098 struct btf_dedup_node **dedup_table;
1099 /* Canonical types map */
1101 /* Hypothetical mapping, used during type graph equivalence checks */
1106 /* Various option modifying behavior of algorithm */
1107 struct btf_dedup_opts opts;
1110 struct btf_str_ptr {
1116 struct btf_str_ptrs {
1117 struct btf_str_ptr *ptrs;
1123 static inline __u32 hash_combine(__u32 h, __u32 value)
1125 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
1126 #define GOLDEN_RATIO_PRIME 0x9e370001UL
1127 return h * 37 + value * GOLDEN_RATIO_PRIME;
1128 #undef GOLDEN_RATIO_PRIME
1131 #define for_each_dedup_cand(d, hash, node) \
1132 for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
1136 static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
1138 struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
1139 int bucket = hash & (d->opts.dedup_table_size - 1);
1143 node->type_id = type_id;
1144 node->next = d->dedup_table[bucket];
1145 d->dedup_table[bucket] = node;
1149 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1150 __u32 from_id, __u32 to_id)
1152 if (d->hypot_cnt == d->hypot_cap) {
1155 d->hypot_cap += max(16, d->hypot_cap / 2);
1156 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1159 d->hypot_list = new_list;
1161 d->hypot_list[d->hypot_cnt++] = from_id;
1162 d->hypot_map[from_id] = to_id;
1166 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1170 for (i = 0; i < d->hypot_cnt; i++)
1171 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1175 static void btf_dedup_table_free(struct btf_dedup *d)
1177 struct btf_dedup_node *head, *tmp;
1180 if (!d->dedup_table)
1183 for (i = 0; i < d->opts.dedup_table_size; i++) {
1184 while (d->dedup_table[i]) {
1185 tmp = d->dedup_table[i];
1186 d->dedup_table[i] = tmp->next;
1190 head = d->dedup_table[i];
1198 free(d->dedup_table);
1199 d->dedup_table = NULL;
1202 static void btf_dedup_free(struct btf_dedup *d)
1204 btf_dedup_table_free(d);
1210 d->hypot_map = NULL;
1212 free(d->hypot_list);
1213 d->hypot_list = NULL;
1218 /* Find closest power of two >= to size, capped at 2^max_size_log */
1219 static __u32 roundup_pow2_max(__u32 size, int max_size_log)
1223 for (i = 0; i < max_size_log && (1U << i) < size; i++)
1229 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1230 const struct btf_dedup_opts *opts)
1232 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1237 return ERR_PTR(-ENOMEM);
1239 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1240 sz = opts && opts->dedup_table_size ? opts->dedup_table_size
1241 : BTF_DEDUP_TABLE_DEFAULT_SIZE;
1242 sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG);
1243 d->opts.dedup_table_size = sz;
1246 d->btf_ext = btf_ext;
1248 d->dedup_table = calloc(d->opts.dedup_table_size,
1249 sizeof(struct btf_dedup_node *));
1250 if (!d->dedup_table) {
1255 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1260 /* special BTF "void" type is made canonical immediately */
1262 for (i = 1; i <= btf->nr_types; i++)
1263 d->map[i] = BTF_UNPROCESSED_ID;
1265 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1266 if (!d->hypot_map) {
1270 for (i = 0; i <= btf->nr_types; i++)
1271 d->hypot_map[i] = BTF_UNPROCESSED_ID;
1276 return ERR_PTR(err);
1282 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1285 * Iterate over all possible places in .BTF and .BTF.ext that can reference
1286 * string and pass pointer to it to a provided callback `fn`.
1288 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1290 void *line_data_cur, *line_data_end;
1291 int i, j, r, rec_size;
1294 for (i = 1; i <= d->btf->nr_types; i++) {
1295 t = d->btf->types[i];
1296 r = fn(&t->name_off, ctx);
1300 switch (BTF_INFO_KIND(t->info)) {
1301 case BTF_KIND_STRUCT:
1302 case BTF_KIND_UNION: {
1303 struct btf_member *m = (struct btf_member *)(t + 1);
1304 __u16 vlen = BTF_INFO_VLEN(t->info);
1306 for (j = 0; j < vlen; j++) {
1307 r = fn(&m->name_off, ctx);
1314 case BTF_KIND_ENUM: {
1315 struct btf_enum *m = (struct btf_enum *)(t + 1);
1316 __u16 vlen = BTF_INFO_VLEN(t->info);
1318 for (j = 0; j < vlen; j++) {
1319 r = fn(&m->name_off, ctx);
1326 case BTF_KIND_FUNC_PROTO: {
1327 struct btf_param *m = (struct btf_param *)(t + 1);
1328 __u16 vlen = BTF_INFO_VLEN(t->info);
1330 for (j = 0; j < vlen; j++) {
1331 r = fn(&m->name_off, ctx);
1346 line_data_cur = d->btf_ext->line_info.info;
1347 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1348 rec_size = d->btf_ext->line_info.rec_size;
1350 while (line_data_cur < line_data_end) {
1351 struct btf_ext_info_sec *sec = line_data_cur;
1352 struct bpf_line_info_min *line_info;
1353 __u32 num_info = sec->num_info;
1355 r = fn(&sec->sec_name_off, ctx);
1359 line_data_cur += sizeof(struct btf_ext_info_sec);
1360 for (i = 0; i < num_info; i++) {
1361 line_info = line_data_cur;
1362 r = fn(&line_info->file_name_off, ctx);
1365 r = fn(&line_info->line_off, ctx);
1368 line_data_cur += rec_size;
1375 static int str_sort_by_content(const void *a1, const void *a2)
1377 const struct btf_str_ptr *p1 = a1;
1378 const struct btf_str_ptr *p2 = a2;
1380 return strcmp(p1->str, p2->str);
1383 static int str_sort_by_offset(const void *a1, const void *a2)
1385 const struct btf_str_ptr *p1 = a1;
1386 const struct btf_str_ptr *p2 = a2;
1388 if (p1->str != p2->str)
1389 return p1->str < p2->str ? -1 : 1;
1393 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1395 const struct btf_str_ptr *p = pelem;
1397 if (str_ptr != p->str)
1398 return (const char *)str_ptr < p->str ? -1 : 1;
1402 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1404 struct btf_str_ptrs *strs;
1405 struct btf_str_ptr *s;
1407 if (*str_off_ptr == 0)
1411 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1412 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1419 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1421 struct btf_str_ptrs *strs;
1422 struct btf_str_ptr *s;
1424 if (*str_off_ptr == 0)
1428 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1429 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1432 *str_off_ptr = s->new_off;
1437 * Dedup string and filter out those that are not referenced from either .BTF
1438 * or .BTF.ext (if provided) sections.
1440 * This is done by building index of all strings in BTF's string section,
1441 * then iterating over all entities that can reference strings (e.g., type
1442 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1443 * strings as used. After that all used strings are deduped and compacted into
1444 * sequential blob of memory and new offsets are calculated. Then all the string
1445 * references are iterated again and rewritten using new offsets.
1447 static int btf_dedup_strings(struct btf_dedup *d)
1449 const struct btf_header *hdr = d->btf->hdr;
1450 char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1451 char *end = start + d->btf->hdr->str_len;
1452 char *p = start, *tmp_strs = NULL;
1453 struct btf_str_ptrs strs = {
1459 int i, j, err = 0, grp_idx;
1462 /* build index of all strings */
1464 if (strs.cnt + 1 > strs.cap) {
1465 struct btf_str_ptr *new_ptrs;
1467 strs.cap += max(strs.cnt / 2, 16);
1468 new_ptrs = realloc(strs.ptrs,
1469 sizeof(strs.ptrs[0]) * strs.cap);
1474 strs.ptrs = new_ptrs;
1477 strs.ptrs[strs.cnt].str = p;
1478 strs.ptrs[strs.cnt].used = false;
1484 /* temporary storage for deduplicated strings */
1485 tmp_strs = malloc(d->btf->hdr->str_len);
1491 /* mark all used strings */
1492 strs.ptrs[0].used = true;
1493 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1497 /* sort strings by context, so that we can identify duplicates */
1498 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1501 * iterate groups of equal strings and if any instance in a group was
1502 * referenced, emit single instance and remember new offset
1506 grp_used = strs.ptrs[0].used;
1507 /* iterate past end to avoid code duplication after loop */
1508 for (i = 1; i <= strs.cnt; i++) {
1510 * when i == strs.cnt, we want to skip string comparison and go
1511 * straight to handling last group of strings (otherwise we'd
1512 * need to handle last group after the loop w/ duplicated code)
1515 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1516 grp_used = grp_used || strs.ptrs[i].used;
1521 * this check would have been required after the loop to handle
1522 * last group of strings, but due to <= condition in a loop
1523 * we avoid that duplication
1526 int new_off = p - tmp_strs;
1527 __u32 len = strlen(strs.ptrs[grp_idx].str);
1529 memmove(p, strs.ptrs[grp_idx].str, len + 1);
1530 for (j = grp_idx; j < i; j++)
1531 strs.ptrs[j].new_off = new_off;
1537 grp_used = strs.ptrs[i].used;
1541 /* replace original strings with deduped ones */
1542 d->btf->hdr->str_len = p - tmp_strs;
1543 memmove(start, tmp_strs, d->btf->hdr->str_len);
1544 end = start + d->btf->hdr->str_len;
1546 /* restore original order for further binary search lookups */
1547 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1549 /* remap string offsets */
1550 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1554 d->btf->hdr->str_len = end - start;
1562 static __u32 btf_hash_common(struct btf_type *t)
1566 h = hash_combine(0, t->name_off);
1567 h = hash_combine(h, t->info);
1568 h = hash_combine(h, t->size);
1572 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1574 return t1->name_off == t2->name_off &&
1575 t1->info == t2->info &&
1576 t1->size == t2->size;
1579 /* Calculate type signature hash of INT. */
1580 static __u32 btf_hash_int(struct btf_type *t)
1582 __u32 info = *(__u32 *)(t + 1);
1585 h = btf_hash_common(t);
1586 h = hash_combine(h, info);
1590 /* Check structural equality of two INTs. */
1591 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1595 if (!btf_equal_common(t1, t2))
1597 info1 = *(__u32 *)(t1 + 1);
1598 info2 = *(__u32 *)(t2 + 1);
1599 return info1 == info2;
1602 /* Calculate type signature hash of ENUM. */
1603 static __u32 btf_hash_enum(struct btf_type *t)
1607 /* don't hash vlen and enum members to support enum fwd resolving */
1608 h = hash_combine(0, t->name_off);
1609 h = hash_combine(h, t->info & ~0xffff);
1610 h = hash_combine(h, t->size);
1614 /* Check structural equality of two ENUMs. */
1615 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1617 struct btf_enum *m1, *m2;
1621 if (!btf_equal_common(t1, t2))
1624 vlen = BTF_INFO_VLEN(t1->info);
1625 m1 = (struct btf_enum *)(t1 + 1);
1626 m2 = (struct btf_enum *)(t2 + 1);
1627 for (i = 0; i < vlen; i++) {
1628 if (m1->name_off != m2->name_off || m1->val != m2->val)
1636 static inline bool btf_is_enum_fwd(struct btf_type *t)
1638 return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
1639 BTF_INFO_VLEN(t->info) == 0;
1642 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1644 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
1645 return btf_equal_enum(t1, t2);
1646 /* ignore vlen when comparing */
1647 return t1->name_off == t2->name_off &&
1648 (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
1649 t1->size == t2->size;
1653 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1654 * as referenced type IDs equivalence is established separately during type
1655 * graph equivalence check algorithm.
1657 static __u32 btf_hash_struct(struct btf_type *t)
1659 struct btf_member *member = (struct btf_member *)(t + 1);
1660 __u32 vlen = BTF_INFO_VLEN(t->info);
1661 __u32 h = btf_hash_common(t);
1664 for (i = 0; i < vlen; i++) {
1665 h = hash_combine(h, member->name_off);
1666 h = hash_combine(h, member->offset);
1667 /* no hashing of referenced type ID, it can be unresolved yet */
1674 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1675 * IDs. This check is performed during type graph equivalence check and
1676 * referenced types equivalence is checked separately.
1678 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
1680 struct btf_member *m1, *m2;
1684 if (!btf_equal_common(t1, t2))
1687 vlen = BTF_INFO_VLEN(t1->info);
1688 m1 = (struct btf_member *)(t1 + 1);
1689 m2 = (struct btf_member *)(t2 + 1);
1690 for (i = 0; i < vlen; i++) {
1691 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
1700 * Calculate type signature hash of ARRAY, including referenced type IDs,
1701 * under assumption that they were already resolved to canonical type IDs and
1702 * are not going to change.
1704 static __u32 btf_hash_array(struct btf_type *t)
1706 struct btf_array *info = (struct btf_array *)(t + 1);
1707 __u32 h = btf_hash_common(t);
1709 h = hash_combine(h, info->type);
1710 h = hash_combine(h, info->index_type);
1711 h = hash_combine(h, info->nelems);
1716 * Check exact equality of two ARRAYs, taking into account referenced
1717 * type IDs, under assumption that they were already resolved to canonical
1718 * type IDs and are not going to change.
1719 * This function is called during reference types deduplication to compare
1720 * ARRAY to potential canonical representative.
1722 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
1724 struct btf_array *info1, *info2;
1726 if (!btf_equal_common(t1, t2))
1729 info1 = (struct btf_array *)(t1 + 1);
1730 info2 = (struct btf_array *)(t2 + 1);
1731 return info1->type == info2->type &&
1732 info1->index_type == info2->index_type &&
1733 info1->nelems == info2->nelems;
1737 * Check structural compatibility of two ARRAYs, ignoring referenced type
1738 * IDs. This check is performed during type graph equivalence check and
1739 * referenced types equivalence is checked separately.
1741 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1743 struct btf_array *info1, *info2;
1745 if (!btf_equal_common(t1, t2))
1748 info1 = (struct btf_array *)(t1 + 1);
1749 info2 = (struct btf_array *)(t2 + 1);
1750 return info1->nelems == info2->nelems;
1754 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1755 * under assumption that they were already resolved to canonical type IDs and
1756 * are not going to change.
1758 static inline __u32 btf_hash_fnproto(struct btf_type *t)
1760 struct btf_param *member = (struct btf_param *)(t + 1);
1761 __u16 vlen = BTF_INFO_VLEN(t->info);
1762 __u32 h = btf_hash_common(t);
1765 for (i = 0; i < vlen; i++) {
1766 h = hash_combine(h, member->name_off);
1767 h = hash_combine(h, member->type);
1774 * Check exact equality of two FUNC_PROTOs, taking into account referenced
1775 * type IDs, under assumption that they were already resolved to canonical
1776 * type IDs and are not going to change.
1777 * This function is called during reference types deduplication to compare
1778 * FUNC_PROTO to potential canonical representative.
1780 static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
1782 struct btf_param *m1, *m2;
1786 if (!btf_equal_common(t1, t2))
1789 vlen = BTF_INFO_VLEN(t1->info);
1790 m1 = (struct btf_param *)(t1 + 1);
1791 m2 = (struct btf_param *)(t2 + 1);
1792 for (i = 0; i < vlen; i++) {
1793 if (m1->name_off != m2->name_off || m1->type != m2->type)
1802 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1803 * IDs. This check is performed during type graph equivalence check and
1804 * referenced types equivalence is checked separately.
1806 static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
1808 struct btf_param *m1, *m2;
1812 /* skip return type ID */
1813 if (t1->name_off != t2->name_off || t1->info != t2->info)
1816 vlen = BTF_INFO_VLEN(t1->info);
1817 m1 = (struct btf_param *)(t1 + 1);
1818 m2 = (struct btf_param *)(t2 + 1);
1819 for (i = 0; i < vlen; i++) {
1820 if (m1->name_off != m2->name_off)
1829 * Deduplicate primitive types, that can't reference other types, by calculating
1830 * their type signature hash and comparing them with any possible canonical
1831 * candidate. If no canonical candidate matches, type itself is marked as
1832 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
1834 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1836 struct btf_type *t = d->btf->types[type_id];
1837 struct btf_type *cand;
1838 struct btf_dedup_node *cand_node;
1839 /* if we don't find equivalent type, then we are canonical */
1840 __u32 new_id = type_id;
1843 switch (BTF_INFO_KIND(t->info)) {
1844 case BTF_KIND_CONST:
1845 case BTF_KIND_VOLATILE:
1846 case BTF_KIND_RESTRICT:
1848 case BTF_KIND_TYPEDEF:
1849 case BTF_KIND_ARRAY:
1850 case BTF_KIND_STRUCT:
1851 case BTF_KIND_UNION:
1853 case BTF_KIND_FUNC_PROTO:
1857 h = btf_hash_int(t);
1858 for_each_dedup_cand(d, h, cand_node) {
1859 cand = d->btf->types[cand_node->type_id];
1860 if (btf_equal_int(t, cand)) {
1861 new_id = cand_node->type_id;
1868 h = btf_hash_enum(t);
1869 for_each_dedup_cand(d, h, cand_node) {
1870 cand = d->btf->types[cand_node->type_id];
1871 if (btf_equal_enum(t, cand)) {
1872 new_id = cand_node->type_id;
1875 if (d->opts.dont_resolve_fwds)
1877 if (btf_compat_enum(t, cand)) {
1878 if (btf_is_enum_fwd(t)) {
1879 /* resolve fwd to full enum */
1880 new_id = cand_node->type_id;
1883 /* resolve canonical enum fwd to full enum */
1884 d->map[cand_node->type_id] = type_id;
1890 h = btf_hash_common(t);
1891 for_each_dedup_cand(d, h, cand_node) {
1892 cand = d->btf->types[cand_node->type_id];
1893 if (btf_equal_common(t, cand)) {
1894 new_id = cand_node->type_id;
1904 d->map[type_id] = new_id;
1905 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
1911 static int btf_dedup_prim_types(struct btf_dedup *d)
1915 for (i = 1; i <= d->btf->nr_types; i++) {
1916 err = btf_dedup_prim_type(d, i);
1924 * Check whether type is already mapped into canonical one (could be to itself).
1926 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
1928 return d->map[type_id] <= BTF_MAX_NR_TYPES;
1932 * Resolve type ID into its canonical type ID, if any; otherwise return original
1933 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
1934 * STRUCT/UNION link and resolve it into canonical type ID as well.
1936 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
1938 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
1939 type_id = d->map[type_id];
1944 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
1947 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
1949 __u32 orig_type_id = type_id;
1951 if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
1954 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
1955 type_id = d->map[type_id];
1957 if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
1960 return orig_type_id;
1964 static inline __u16 btf_fwd_kind(struct btf_type *t)
1966 return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
1970 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
1971 * call it "candidate graph" in this description for brevity) to a type graph
1972 * formed by (potential) canonical struct/union ("canonical graph" for brevity
1973 * here, though keep in mind that not all types in canonical graph are
1974 * necessarily canonical representatives themselves, some of them might be
1975 * duplicates or its uniqueness might not have been established yet).
1977 * - >0, if type graphs are equivalent;
1978 * - 0, if not equivalent;
1981 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
1982 * equivalence of BTF types at each step. If at any point BTF types in candidate
1983 * and canonical graphs are not compatible structurally, whole graphs are
1984 * incompatible. If types are structurally equivalent (i.e., all information
1985 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
1986 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
1987 * If a type references other types, then those referenced types are checked
1988 * for equivalence recursively.
1990 * During DFS traversal, if we find that for current `canon_id` type we
1991 * already have some mapping in hypothetical map, we check for two possible
1993 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
1994 * happen when type graphs have cycles. In this case we assume those two
1995 * types are equivalent.
1996 * - `canon_id` is mapped to different type. This is contradiction in our
1997 * hypothetical mapping, because same graph in canonical graph corresponds
1998 * to two different types in candidate graph, which for equivalent type
1999 * graphs shouldn't happen. This condition terminates equivalence check
2000 * with negative result.
2002 * If type graphs traversal exhausts types to check and find no contradiction,
2003 * then type graphs are equivalent.
2005 * When checking types for equivalence, there is one special case: FWD types.
2006 * If FWD type resolution is allowed and one of the types (either from canonical
2007 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2008 * flag) and their names match, hypothetical mapping is updated to point from
2009 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2010 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2012 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2013 * if there are two exactly named (or anonymous) structs/unions that are
2014 * compatible structurally, one of which has FWD field, while other is concrete
2015 * STRUCT/UNION, but according to C sources they are different structs/unions
2016 * that are referencing different types with the same name. This is extremely
2017 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2018 * this logic is causing problems.
2020 * Doing FWD resolution means that both candidate and/or canonical graphs can
2021 * consists of portions of the graph that come from multiple compilation units.
2022 * This is due to the fact that types within single compilation unit are always
2023 * deduplicated and FWDs are already resolved, if referenced struct/union
2024 * definiton is available. So, if we had unresolved FWD and found corresponding
2025 * STRUCT/UNION, they will be from different compilation units. This
2026 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2027 * type graph will likely have at least two different BTF types that describe
2028 * same type (e.g., most probably there will be two different BTF types for the
2029 * same 'int' primitive type) and could even have "overlapping" parts of type
2030 * graph that describe same subset of types.
2032 * This in turn means that our assumption that each type in canonical graph
2033 * must correspond to exactly one type in candidate graph might not hold
2034 * anymore and will make it harder to detect contradictions using hypothetical
2035 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2036 * resolution only in canonical graph. FWDs in candidate graphs are never
2037 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2039 * - Both types in canonical and candidate graphs are FWDs. If they are
2040 * structurally equivalent, then they can either be both resolved to the
2041 * same STRUCT/UNION or not resolved at all. In both cases they are
2042 * equivalent and there is no need to resolve FWD on candidate side.
2043 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2044 * so nothing to resolve as well, algorithm will check equivalence anyway.
2045 * - Type in canonical graph is FWD, while type in candidate is concrete
2046 * STRUCT/UNION. In this case candidate graph comes from single compilation
2047 * unit, so there is exactly one BTF type for each unique C type. After
2048 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
2049 * in canonical graph mapping to single BTF type in candidate graph, but
2050 * because hypothetical mapping maps from canonical to candidate types, it's
2051 * alright, and we still maintain the property of having single `canon_id`
2052 * mapping to single `cand_id` (there could be two different `canon_id`
2053 * mapped to the same `cand_id`, but it's not contradictory).
2054 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2055 * graph is FWD. In this case we are just going to check compatibility of
2056 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2057 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2058 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2059 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2062 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2065 struct btf_type *cand_type;
2066 struct btf_type *canon_type;
2067 __u32 hypot_type_id;
2072 /* if both resolve to the same canonical, they must be equivalent */
2073 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2076 canon_id = resolve_fwd_id(d, canon_id);
2078 hypot_type_id = d->hypot_map[canon_id];
2079 if (hypot_type_id <= BTF_MAX_NR_TYPES)
2080 return hypot_type_id == cand_id;
2082 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2085 cand_type = d->btf->types[cand_id];
2086 canon_type = d->btf->types[canon_id];
2087 cand_kind = BTF_INFO_KIND(cand_type->info);
2088 canon_kind = BTF_INFO_KIND(canon_type->info);
2090 if (cand_type->name_off != canon_type->name_off)
2093 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
2094 if (!d->opts.dont_resolve_fwds
2095 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2096 && cand_kind != canon_kind) {
2100 if (cand_kind == BTF_KIND_FWD) {
2101 real_kind = canon_kind;
2102 fwd_kind = btf_fwd_kind(cand_type);
2104 real_kind = cand_kind;
2105 fwd_kind = btf_fwd_kind(canon_type);
2107 return fwd_kind == real_kind;
2110 if (cand_kind != canon_kind)
2113 switch (cand_kind) {
2115 return btf_equal_int(cand_type, canon_type);
2118 if (d->opts.dont_resolve_fwds)
2119 return btf_equal_enum(cand_type, canon_type);
2121 return btf_compat_enum(cand_type, canon_type);
2124 return btf_equal_common(cand_type, canon_type);
2126 case BTF_KIND_CONST:
2127 case BTF_KIND_VOLATILE:
2128 case BTF_KIND_RESTRICT:
2130 case BTF_KIND_TYPEDEF:
2132 if (cand_type->info != canon_type->info)
2134 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2136 case BTF_KIND_ARRAY: {
2137 struct btf_array *cand_arr, *canon_arr;
2139 if (!btf_compat_array(cand_type, canon_type))
2141 cand_arr = (struct btf_array *)(cand_type + 1);
2142 canon_arr = (struct btf_array *)(canon_type + 1);
2143 eq = btf_dedup_is_equiv(d,
2144 cand_arr->index_type, canon_arr->index_type);
2147 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2150 case BTF_KIND_STRUCT:
2151 case BTF_KIND_UNION: {
2152 struct btf_member *cand_m, *canon_m;
2155 if (!btf_shallow_equal_struct(cand_type, canon_type))
2157 vlen = BTF_INFO_VLEN(cand_type->info);
2158 cand_m = (struct btf_member *)(cand_type + 1);
2159 canon_m = (struct btf_member *)(canon_type + 1);
2160 for (i = 0; i < vlen; i++) {
2161 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2171 case BTF_KIND_FUNC_PROTO: {
2172 struct btf_param *cand_p, *canon_p;
2175 if (!btf_compat_fnproto(cand_type, canon_type))
2177 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2180 vlen = BTF_INFO_VLEN(cand_type->info);
2181 cand_p = (struct btf_param *)(cand_type + 1);
2182 canon_p = (struct btf_param *)(canon_type + 1);
2183 for (i = 0; i < vlen; i++) {
2184 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2200 * Use hypothetical mapping, produced by successful type graph equivalence
2201 * check, to augment existing struct/union canonical mapping, where possible.
2203 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2204 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2205 * it doesn't matter if FWD type was part of canonical graph or candidate one,
2206 * we are recording the mapping anyway. As opposed to carefulness required
2207 * for struct/union correspondence mapping (described below), for FWD resolution
2208 * it's not important, as by the time that FWD type (reference type) will be
2209 * deduplicated all structs/unions will be deduped already anyway.
2211 * Recording STRUCT/UNION mapping is purely a performance optimization and is
2212 * not required for correctness. It needs to be done carefully to ensure that
2213 * struct/union from candidate's type graph is not mapped into corresponding
2214 * struct/union from canonical type graph that itself hasn't been resolved into
2215 * canonical representative. The only guarantee we have is that canonical
2216 * struct/union was determined as canonical and that won't change. But any
2217 * types referenced through that struct/union fields could have been not yet
2218 * resolved, so in case like that it's too early to establish any kind of
2219 * correspondence between structs/unions.
2221 * No canonical correspondence is derived for primitive types (they are already
2222 * deduplicated completely already anyway) or reference types (they rely on
2223 * stability of struct/union canonical relationship for equivalence checks).
2225 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2227 __u32 cand_type_id, targ_type_id;
2228 __u16 t_kind, c_kind;
2232 for (i = 0; i < d->hypot_cnt; i++) {
2233 cand_type_id = d->hypot_list[i];
2234 targ_type_id = d->hypot_map[cand_type_id];
2235 t_id = resolve_type_id(d, targ_type_id);
2236 c_id = resolve_type_id(d, cand_type_id);
2237 t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info);
2238 c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info);
2240 * Resolve FWD into STRUCT/UNION.
2241 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2242 * mapped to canonical representative (as opposed to
2243 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2244 * eventually that struct is going to be mapped and all resolved
2245 * FWDs will automatically resolve to correct canonical
2246 * representative. This will happen before ref type deduping,
2247 * which critically depends on stability of these mapping. This
2248 * stability is not a requirement for STRUCT/UNION equivalence
2251 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2252 d->map[c_id] = t_id;
2253 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2254 d->map[t_id] = c_id;
2256 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2257 c_kind != BTF_KIND_FWD &&
2258 is_type_mapped(d, c_id) &&
2259 !is_type_mapped(d, t_id)) {
2261 * as a perf optimization, we can map struct/union
2262 * that's part of type graph we just verified for
2263 * equivalence. We can do that for struct/union that has
2264 * canonical representative only, though.
2266 d->map[t_id] = c_id;
2272 * Deduplicate struct/union types.
2274 * For each struct/union type its type signature hash is calculated, taking
2275 * into account type's name, size, number, order and names of fields, but
2276 * ignoring type ID's referenced from fields, because they might not be deduped
2277 * completely until after reference types deduplication phase. This type hash
2278 * is used to iterate over all potential canonical types, sharing same hash.
2279 * For each canonical candidate we check whether type graphs that they form
2280 * (through referenced types in fields and so on) are equivalent using algorithm
2281 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2282 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2283 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2284 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2285 * potentially map other structs/unions to their canonical representatives,
2286 * if such relationship hasn't yet been established. This speeds up algorithm
2287 * by eliminating some of the duplicate work.
2289 * If no matching canonical representative was found, struct/union is marked
2290 * as canonical for itself and is added into btf_dedup->dedup_table hash map
2291 * for further look ups.
2293 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2295 struct btf_dedup_node *cand_node;
2296 struct btf_type *cand_type, *t;
2297 /* if we don't find equivalent type, then we are canonical */
2298 __u32 new_id = type_id;
2302 /* already deduped or is in process of deduping (loop detected) */
2303 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2306 t = d->btf->types[type_id];
2307 kind = BTF_INFO_KIND(t->info);
2309 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2312 h = btf_hash_struct(t);
2313 for_each_dedup_cand(d, h, cand_node) {
2317 * Even though btf_dedup_is_equiv() checks for
2318 * btf_shallow_equal_struct() internally when checking two
2319 * structs (unions) for equivalence, we need to guard here
2320 * from picking matching FWD type as a dedup candidate.
2321 * This can happen due to hash collision. In such case just
2322 * relying on btf_dedup_is_equiv() would lead to potentially
2323 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2324 * FWD and compatible STRUCT/UNION are considered equivalent.
2326 cand_type = d->btf->types[cand_node->type_id];
2327 if (!btf_shallow_equal_struct(t, cand_type))
2330 btf_dedup_clear_hypot_map(d);
2331 eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
2336 new_id = cand_node->type_id;
2337 btf_dedup_merge_hypot_map(d);
2341 d->map[type_id] = new_id;
2342 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2348 static int btf_dedup_struct_types(struct btf_dedup *d)
2352 for (i = 1; i <= d->btf->nr_types; i++) {
2353 err = btf_dedup_struct_type(d, i);
2361 * Deduplicate reference type.
2363 * Once all primitive and struct/union types got deduplicated, we can easily
2364 * deduplicate all other (reference) BTF types. This is done in two steps:
2366 * 1. Resolve all referenced type IDs into their canonical type IDs. This
2367 * resolution can be done either immediately for primitive or struct/union types
2368 * (because they were deduped in previous two phases) or recursively for
2369 * reference types. Recursion will always terminate at either primitive or
2370 * struct/union type, at which point we can "unwind" chain of reference types
2371 * one by one. There is no danger of encountering cycles because in C type
2372 * system the only way to form type cycle is through struct/union, so any chain
2373 * of reference types, even those taking part in a type cycle, will inevitably
2374 * reach struct/union at some point.
2376 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2377 * becomes "stable", in the sense that no further deduplication will cause
2378 * any changes to it. With that, it's now possible to calculate type's signature
2379 * hash (this time taking into account referenced type IDs) and loop over all
2380 * potential canonical representatives. If no match was found, current type
2381 * will become canonical representative of itself and will be added into
2382 * btf_dedup->dedup_table as another possible canonical representative.
2384 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2386 struct btf_dedup_node *cand_node;
2387 struct btf_type *t, *cand;
2388 /* if we don't find equivalent type, then we are representative type */
2389 __u32 new_id = type_id;
2393 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2395 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2396 return resolve_type_id(d, type_id);
2398 t = d->btf->types[type_id];
2399 d->map[type_id] = BTF_IN_PROGRESS_ID;
2401 switch (BTF_INFO_KIND(t->info)) {
2402 case BTF_KIND_CONST:
2403 case BTF_KIND_VOLATILE:
2404 case BTF_KIND_RESTRICT:
2406 case BTF_KIND_TYPEDEF:
2408 ref_type_id = btf_dedup_ref_type(d, t->type);
2409 if (ref_type_id < 0)
2411 t->type = ref_type_id;
2413 h = btf_hash_common(t);
2414 for_each_dedup_cand(d, h, cand_node) {
2415 cand = d->btf->types[cand_node->type_id];
2416 if (btf_equal_common(t, cand)) {
2417 new_id = cand_node->type_id;
2423 case BTF_KIND_ARRAY: {
2424 struct btf_array *info = (struct btf_array *)(t + 1);
2426 ref_type_id = btf_dedup_ref_type(d, info->type);
2427 if (ref_type_id < 0)
2429 info->type = ref_type_id;
2431 ref_type_id = btf_dedup_ref_type(d, info->index_type);
2432 if (ref_type_id < 0)
2434 info->index_type = ref_type_id;
2436 h = btf_hash_array(t);
2437 for_each_dedup_cand(d, h, cand_node) {
2438 cand = d->btf->types[cand_node->type_id];
2439 if (btf_equal_array(t, cand)) {
2440 new_id = cand_node->type_id;
2447 case BTF_KIND_FUNC_PROTO: {
2448 struct btf_param *param;
2452 ref_type_id = btf_dedup_ref_type(d, t->type);
2453 if (ref_type_id < 0)
2455 t->type = ref_type_id;
2457 vlen = BTF_INFO_VLEN(t->info);
2458 param = (struct btf_param *)(t + 1);
2459 for (i = 0; i < vlen; i++) {
2460 ref_type_id = btf_dedup_ref_type(d, param->type);
2461 if (ref_type_id < 0)
2463 param->type = ref_type_id;
2467 h = btf_hash_fnproto(t);
2468 for_each_dedup_cand(d, h, cand_node) {
2469 cand = d->btf->types[cand_node->type_id];
2470 if (btf_equal_fnproto(t, cand)) {
2471 new_id = cand_node->type_id;
2482 d->map[type_id] = new_id;
2483 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2489 static int btf_dedup_ref_types(struct btf_dedup *d)
2493 for (i = 1; i <= d->btf->nr_types; i++) {
2494 err = btf_dedup_ref_type(d, i);
2498 btf_dedup_table_free(d);
2505 * After we established for each type its corresponding canonical representative
2506 * type, we now can eliminate types that are not canonical and leave only
2507 * canonical ones layed out sequentially in memory by copying them over
2508 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2509 * a map from original type ID to a new compacted type ID, which will be used
2510 * during next phase to "fix up" type IDs, referenced from struct/union and
2513 static int btf_dedup_compact_types(struct btf_dedup *d)
2515 struct btf_type **new_types;
2516 __u32 next_type_id = 1;
2517 char *types_start, *p;
2520 /* we are going to reuse hypot_map to store compaction remapping */
2521 d->hypot_map[0] = 0;
2522 for (i = 1; i <= d->btf->nr_types; i++)
2523 d->hypot_map[i] = BTF_UNPROCESSED_ID;
2525 types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2528 for (i = 1; i <= d->btf->nr_types; i++) {
2532 len = btf_type_size(d->btf->types[i]);
2536 memmove(p, d->btf->types[i], len);
2537 d->hypot_map[i] = next_type_id;
2538 d->btf->types[next_type_id] = (struct btf_type *)p;
2543 /* shrink struct btf's internal types index and update btf_header */
2544 d->btf->nr_types = next_type_id - 1;
2545 d->btf->types_size = d->btf->nr_types;
2546 d->btf->hdr->type_len = p - types_start;
2547 new_types = realloc(d->btf->types,
2548 (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2551 d->btf->types = new_types;
2553 /* make sure string section follows type information without gaps */
2554 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2555 memmove(p, d->btf->strings, d->btf->hdr->str_len);
2556 d->btf->strings = p;
2557 p += d->btf->hdr->str_len;
2559 d->btf->data_size = p - (char *)d->btf->data;
2564 * Figure out final (deduplicated and compacted) type ID for provided original
2565 * `type_id` by first resolving it into corresponding canonical type ID and
2566 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2567 * which is populated during compaction phase.
2569 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2571 __u32 resolved_type_id, new_type_id;
2573 resolved_type_id = resolve_type_id(d, type_id);
2574 new_type_id = d->hypot_map[resolved_type_id];
2575 if (new_type_id > BTF_MAX_NR_TYPES)
2581 * Remap referenced type IDs into deduped type IDs.
2583 * After BTF types are deduplicated and compacted, their final type IDs may
2584 * differ from original ones. The map from original to a corresponding
2585 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2586 * compaction phase. During remapping phase we are rewriting all type IDs
2587 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2588 * their final deduped type IDs.
2590 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
2592 struct btf_type *t = d->btf->types[type_id];
2595 switch (BTF_INFO_KIND(t->info)) {
2601 case BTF_KIND_CONST:
2602 case BTF_KIND_VOLATILE:
2603 case BTF_KIND_RESTRICT:
2605 case BTF_KIND_TYPEDEF:
2607 r = btf_dedup_remap_type_id(d, t->type);
2613 case BTF_KIND_ARRAY: {
2614 struct btf_array *arr_info = (struct btf_array *)(t + 1);
2616 r = btf_dedup_remap_type_id(d, arr_info->type);
2620 r = btf_dedup_remap_type_id(d, arr_info->index_type);
2623 arr_info->index_type = r;
2627 case BTF_KIND_STRUCT:
2628 case BTF_KIND_UNION: {
2629 struct btf_member *member = (struct btf_member *)(t + 1);
2630 __u16 vlen = BTF_INFO_VLEN(t->info);
2632 for (i = 0; i < vlen; i++) {
2633 r = btf_dedup_remap_type_id(d, member->type);
2642 case BTF_KIND_FUNC_PROTO: {
2643 struct btf_param *param = (struct btf_param *)(t + 1);
2644 __u16 vlen = BTF_INFO_VLEN(t->info);
2646 r = btf_dedup_remap_type_id(d, t->type);
2651 for (i = 0; i < vlen; i++) {
2652 r = btf_dedup_remap_type_id(d, param->type);
2668 static int btf_dedup_remap_types(struct btf_dedup *d)
2672 for (i = 1; i <= d->btf->nr_types; i++) {
2673 r = btf_dedup_remap_type(d, i);