1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/skmsg.h>
23 #include <linux/perf_event.h>
24 #include <linux/bsearch.h>
25 #include <linux/kobject.h>
26 #include <linux/sysfs.h>
28 #include "../tools/lib/bpf/relo_core.h"
30 /* BTF (BPF Type Format) is the meta data format which describes
31 * the data types of BPF program/map. Hence, it basically focus
32 * on the C programming language which the modern BPF is primary
37 * The BTF data is stored under the ".BTF" ELF section
41 * Each 'struct btf_type' object describes a C data type.
42 * Depending on the type it is describing, a 'struct btf_type'
43 * object may be followed by more data. F.e.
44 * To describe an array, 'struct btf_type' is followed by
47 * 'struct btf_type' and any extra data following it are
52 * The BTF type section contains a list of 'struct btf_type' objects.
53 * Each one describes a C type. Recall from the above section
54 * that a 'struct btf_type' object could be immediately followed by extra
55 * data in order to describe some particular C types.
59 * Each btf_type object is identified by a type_id. The type_id
60 * is implicitly implied by the location of the btf_type object in
61 * the BTF type section. The first one has type_id 1. The second
62 * one has type_id 2...etc. Hence, an earlier btf_type has
65 * A btf_type object may refer to another btf_type object by using
66 * type_id (i.e. the "type" in the "struct btf_type").
68 * NOTE that we cannot assume any reference-order.
69 * A btf_type object can refer to an earlier btf_type object
70 * but it can also refer to a later btf_type object.
72 * For example, to describe "const void *". A btf_type
73 * object describing "const" may refer to another btf_type
74 * object describing "void *". This type-reference is done
75 * by specifying type_id:
77 * [1] CONST (anon) type_id=2
78 * [2] PTR (anon) type_id=0
80 * The above is the btf_verifier debug log:
81 * - Each line started with "[?]" is a btf_type object
82 * - [?] is the type_id of the btf_type object.
83 * - CONST/PTR is the BTF_KIND_XXX
84 * - "(anon)" is the name of the type. It just
85 * happens that CONST and PTR has no name.
86 * - type_id=XXX is the 'u32 type' in btf_type
88 * NOTE: "void" has type_id 0
92 * The BTF string section contains the names used by the type section.
93 * Each string is referred by an "offset" from the beginning of the
96 * Each string is '\0' terminated.
98 * The first character in the string section must be '\0'
99 * which is used to mean 'anonymous'. Some btf_type may not
105 * To verify BTF data, two passes are needed.
109 * The first pass is to collect all btf_type objects to
110 * an array: "btf->types".
112 * Depending on the C type that a btf_type is describing,
113 * a btf_type may be followed by extra data. We don't know
114 * how many btf_type is there, and more importantly we don't
115 * know where each btf_type is located in the type section.
117 * Without knowing the location of each type_id, most verifications
118 * cannot be done. e.g. an earlier btf_type may refer to a later
119 * btf_type (recall the "const void *" above), so we cannot
120 * check this type-reference in the first pass.
122 * In the first pass, it still does some verifications (e.g.
123 * checking the name is a valid offset to the string section).
127 * The main focus is to resolve a btf_type that is referring
130 * We have to ensure the referring type:
131 * 1) does exist in the BTF (i.e. in btf->types[])
132 * 2) does not cause a loop:
141 * btf_type_needs_resolve() decides if a btf_type needs
144 * The needs_resolve type implements the "resolve()" ops which
145 * essentially does a DFS and detects backedge.
147 * During resolve (or DFS), different C types have different
148 * "RESOLVED" conditions.
150 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
151 * members because a member is always referring to another
152 * type. A struct's member can be treated as "RESOLVED" if
153 * it is referring to a BTF_KIND_PTR. Otherwise, the
154 * following valid C struct would be rejected:
161 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
162 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
163 * detect a pointer loop, e.g.:
164 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
166 * +-----------------------------------------+
170 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
171 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
172 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
173 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
174 #define BITS_ROUNDUP_BYTES(bits) \
175 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
177 #define BTF_INFO_MASK 0x9f00ffff
178 #define BTF_INT_MASK 0x0fffffff
179 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
180 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
182 /* 16MB for 64k structs and each has 16 members and
183 * a few MB spaces for the string section.
184 * The hard limit is S32_MAX.
186 #define BTF_MAX_SIZE (16 * 1024 * 1024)
188 #define for_each_member_from(i, from, struct_type, member) \
189 for (i = from, member = btf_type_member(struct_type) + from; \
190 i < btf_type_vlen(struct_type); \
193 #define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
199 DEFINE_SPINLOCK(btf_idr_lock);
201 enum btf_kfunc_hook {
204 BTF_KFUNC_HOOK_STRUCT_OPS,
209 BTF_KFUNC_SET_MAX_CNT = 32,
212 struct btf_kfunc_set_tab {
213 struct btf_id_set *sets[BTF_KFUNC_HOOK_MAX][BTF_KFUNC_TYPE_MAX];
218 struct btf_type **types;
223 struct btf_header hdr;
224 u32 nr_types; /* includes VOID for base BTF */
230 struct btf_kfunc_set_tab *kfunc_set_tab;
232 /* split BTF support */
233 struct btf *base_btf;
234 u32 start_id; /* first type ID in this BTF (0 for base BTF) */
235 u32 start_str_off; /* first string offset (0 for base BTF) */
236 char name[MODULE_NAME_LEN];
240 enum verifier_phase {
245 struct resolve_vertex {
246 const struct btf_type *t;
258 RESOLVE_TBD, /* To Be Determined */
259 RESOLVE_PTR, /* Resolving for Pointer */
260 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
265 #define MAX_RESOLVE_DEPTH 32
267 struct btf_sec_info {
272 struct btf_verifier_env {
275 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
276 struct bpf_verifier_log log;
279 enum verifier_phase phase;
280 enum resolve_mode resolve_mode;
283 static const char * const btf_kind_str[NR_BTF_KINDS] = {
284 [BTF_KIND_UNKN] = "UNKNOWN",
285 [BTF_KIND_INT] = "INT",
286 [BTF_KIND_PTR] = "PTR",
287 [BTF_KIND_ARRAY] = "ARRAY",
288 [BTF_KIND_STRUCT] = "STRUCT",
289 [BTF_KIND_UNION] = "UNION",
290 [BTF_KIND_ENUM] = "ENUM",
291 [BTF_KIND_FWD] = "FWD",
292 [BTF_KIND_TYPEDEF] = "TYPEDEF",
293 [BTF_KIND_VOLATILE] = "VOLATILE",
294 [BTF_KIND_CONST] = "CONST",
295 [BTF_KIND_RESTRICT] = "RESTRICT",
296 [BTF_KIND_FUNC] = "FUNC",
297 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
298 [BTF_KIND_VAR] = "VAR",
299 [BTF_KIND_DATASEC] = "DATASEC",
300 [BTF_KIND_FLOAT] = "FLOAT",
301 [BTF_KIND_DECL_TAG] = "DECL_TAG",
302 [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
305 const char *btf_type_str(const struct btf_type *t)
307 return btf_kind_str[BTF_INFO_KIND(t->info)];
310 /* Chunk size we use in safe copy of data to be shown. */
311 #define BTF_SHOW_OBJ_SAFE_SIZE 32
314 * This is the maximum size of a base type value (equivalent to a
315 * 128-bit int); if we are at the end of our safe buffer and have
316 * less than 16 bytes space we can't be assured of being able
317 * to copy the next type safely, so in such cases we will initiate
320 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16
323 #define BTF_SHOW_NAME_SIZE 80
326 * Common data to all BTF show operations. Private show functions can add
327 * their own data to a structure containing a struct btf_show and consult it
328 * in the show callback. See btf_type_show() below.
330 * One challenge with showing nested data is we want to skip 0-valued
331 * data, but in order to figure out whether a nested object is all zeros
332 * we need to walk through it. As a result, we need to make two passes
333 * when handling structs, unions and arrays; the first path simply looks
334 * for nonzero data, while the second actually does the display. The first
335 * pass is signalled by show->state.depth_check being set, and if we
336 * encounter a non-zero value we set show->state.depth_to_show to
337 * the depth at which we encountered it. When we have completed the
338 * first pass, we will know if anything needs to be displayed if
339 * depth_to_show > depth. See btf_[struct,array]_show() for the
340 * implementation of this.
342 * Another problem is we want to ensure the data for display is safe to
343 * access. To support this, the anonymous "struct {} obj" tracks the data
344 * object and our safe copy of it. We copy portions of the data needed
345 * to the object "copy" buffer, but because its size is limited to
346 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
347 * traverse larger objects for display.
349 * The various data type show functions all start with a call to
350 * btf_show_start_type() which returns a pointer to the safe copy
351 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
352 * raw data itself). btf_show_obj_safe() is responsible for
353 * using copy_from_kernel_nofault() to update the safe data if necessary
354 * as we traverse the object's data. skbuff-like semantics are
357 * - obj.head points to the start of the toplevel object for display
358 * - obj.size is the size of the toplevel object
359 * - obj.data points to the current point in the original data at
360 * which our safe data starts. obj.data will advance as we copy
361 * portions of the data.
363 * In most cases a single copy will suffice, but larger data structures
364 * such as "struct task_struct" will require many copies. The logic in
365 * btf_show_obj_safe() handles the logic that determines if a new
366 * copy_from_kernel_nofault() is needed.
370 void *target; /* target of show operation (seq file, buffer) */
371 void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
372 const struct btf *btf;
373 /* below are used during iteration */
382 int status; /* non-zero for error */
383 const struct btf_type *type;
384 const struct btf_member *member;
385 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */
391 u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
395 struct btf_kind_operations {
396 s32 (*check_meta)(struct btf_verifier_env *env,
397 const struct btf_type *t,
399 int (*resolve)(struct btf_verifier_env *env,
400 const struct resolve_vertex *v);
401 int (*check_member)(struct btf_verifier_env *env,
402 const struct btf_type *struct_type,
403 const struct btf_member *member,
404 const struct btf_type *member_type);
405 int (*check_kflag_member)(struct btf_verifier_env *env,
406 const struct btf_type *struct_type,
407 const struct btf_member *member,
408 const struct btf_type *member_type);
409 void (*log_details)(struct btf_verifier_env *env,
410 const struct btf_type *t);
411 void (*show)(const struct btf *btf, const struct btf_type *t,
412 u32 type_id, void *data, u8 bits_offsets,
413 struct btf_show *show);
416 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
417 static struct btf_type btf_void;
419 static int btf_resolve(struct btf_verifier_env *env,
420 const struct btf_type *t, u32 type_id);
422 static bool btf_type_is_modifier(const struct btf_type *t)
424 /* Some of them is not strictly a C modifier
425 * but they are grouped into the same bucket
427 * A type (t) that refers to another
428 * type through t->type AND its size cannot
429 * be determined without following the t->type.
431 * ptr does not fall into this bucket
432 * because its size is always sizeof(void *).
434 switch (BTF_INFO_KIND(t->info)) {
435 case BTF_KIND_TYPEDEF:
436 case BTF_KIND_VOLATILE:
438 case BTF_KIND_RESTRICT:
439 case BTF_KIND_TYPE_TAG:
446 bool btf_type_is_void(const struct btf_type *t)
448 return t == &btf_void;
451 static bool btf_type_is_fwd(const struct btf_type *t)
453 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
456 static bool btf_type_nosize(const struct btf_type *t)
458 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
459 btf_type_is_func(t) || btf_type_is_func_proto(t);
462 static bool btf_type_nosize_or_null(const struct btf_type *t)
464 return !t || btf_type_nosize(t);
467 static bool __btf_type_is_struct(const struct btf_type *t)
469 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
472 static bool btf_type_is_array(const struct btf_type *t)
474 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
477 static bool btf_type_is_datasec(const struct btf_type *t)
479 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
482 static bool btf_type_is_decl_tag(const struct btf_type *t)
484 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
487 static bool btf_type_is_decl_tag_target(const struct btf_type *t)
489 return btf_type_is_func(t) || btf_type_is_struct(t) ||
490 btf_type_is_var(t) || btf_type_is_typedef(t);
493 u32 btf_nr_types(const struct btf *btf)
498 total += btf->nr_types;
505 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
507 const struct btf_type *t;
511 total = btf_nr_types(btf);
512 for (i = 1; i < total; i++) {
513 t = btf_type_by_id(btf, i);
514 if (BTF_INFO_KIND(t->info) != kind)
517 tname = btf_name_by_offset(btf, t->name_off);
518 if (!strcmp(tname, name))
525 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
528 const struct btf_type *t = btf_type_by_id(btf, id);
530 while (btf_type_is_modifier(t)) {
532 t = btf_type_by_id(btf, t->type);
541 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
544 const struct btf_type *t;
546 t = btf_type_skip_modifiers(btf, id, NULL);
547 if (!btf_type_is_ptr(t))
550 return btf_type_skip_modifiers(btf, t->type, res_id);
553 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
556 const struct btf_type *ptype;
558 ptype = btf_type_resolve_ptr(btf, id, res_id);
559 if (ptype && btf_type_is_func_proto(ptype))
565 /* Types that act only as a source, not sink or intermediate
566 * type when resolving.
568 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
570 return btf_type_is_var(t) ||
571 btf_type_is_decl_tag(t) ||
572 btf_type_is_datasec(t);
575 /* What types need to be resolved?
577 * btf_type_is_modifier() is an obvious one.
579 * btf_type_is_struct() because its member refers to
580 * another type (through member->type).
582 * btf_type_is_var() because the variable refers to
583 * another type. btf_type_is_datasec() holds multiple
584 * btf_type_is_var() types that need resolving.
586 * btf_type_is_array() because its element (array->type)
587 * refers to another type. Array can be thought of a
588 * special case of struct while array just has the same
589 * member-type repeated by array->nelems of times.
591 static bool btf_type_needs_resolve(const struct btf_type *t)
593 return btf_type_is_modifier(t) ||
594 btf_type_is_ptr(t) ||
595 btf_type_is_struct(t) ||
596 btf_type_is_array(t) ||
597 btf_type_is_var(t) ||
598 btf_type_is_decl_tag(t) ||
599 btf_type_is_datasec(t);
602 /* t->size can be used */
603 static bool btf_type_has_size(const struct btf_type *t)
605 switch (BTF_INFO_KIND(t->info)) {
607 case BTF_KIND_STRUCT:
610 case BTF_KIND_DATASEC:
618 static const char *btf_int_encoding_str(u8 encoding)
622 else if (encoding == BTF_INT_SIGNED)
624 else if (encoding == BTF_INT_CHAR)
626 else if (encoding == BTF_INT_BOOL)
632 static u32 btf_type_int(const struct btf_type *t)
634 return *(u32 *)(t + 1);
637 static const struct btf_array *btf_type_array(const struct btf_type *t)
639 return (const struct btf_array *)(t + 1);
642 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
644 return (const struct btf_enum *)(t + 1);
647 static const struct btf_var *btf_type_var(const struct btf_type *t)
649 return (const struct btf_var *)(t + 1);
652 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
654 return (const struct btf_decl_tag *)(t + 1);
657 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
659 return kind_ops[BTF_INFO_KIND(t->info)];
662 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
664 if (!BTF_STR_OFFSET_VALID(offset))
667 while (offset < btf->start_str_off)
670 offset -= btf->start_str_off;
671 return offset < btf->hdr.str_len;
674 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
676 if ((first ? !isalpha(c) :
679 ((c == '.' && !dot_ok) ||
685 static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
687 while (offset < btf->start_str_off)
690 offset -= btf->start_str_off;
691 if (offset < btf->hdr.str_len)
692 return &btf->strings[offset];
697 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
699 /* offset must be valid */
700 const char *src = btf_str_by_offset(btf, offset);
701 const char *src_limit;
703 if (!__btf_name_char_ok(*src, true, dot_ok))
706 /* set a limit on identifier length */
707 src_limit = src + KSYM_NAME_LEN;
709 while (*src && src < src_limit) {
710 if (!__btf_name_char_ok(*src, false, dot_ok))
718 /* Only C-style identifier is permitted. This can be relaxed if
721 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
723 return __btf_name_valid(btf, offset, false);
726 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
728 return __btf_name_valid(btf, offset, true);
731 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
738 name = btf_str_by_offset(btf, offset);
739 return name ?: "(invalid-name-offset)";
742 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
744 return btf_str_by_offset(btf, offset);
747 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
749 while (type_id < btf->start_id)
752 type_id -= btf->start_id;
753 if (type_id >= btf->nr_types)
755 return btf->types[type_id];
759 * Regular int is not a bit field and it must be either
760 * u8/u16/u32/u64 or __int128.
762 static bool btf_type_int_is_regular(const struct btf_type *t)
764 u8 nr_bits, nr_bytes;
767 int_data = btf_type_int(t);
768 nr_bits = BTF_INT_BITS(int_data);
769 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
770 if (BITS_PER_BYTE_MASKED(nr_bits) ||
771 BTF_INT_OFFSET(int_data) ||
772 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
773 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
774 nr_bytes != (2 * sizeof(u64)))) {
782 * Check that given struct member is a regular int with expected
785 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
786 const struct btf_member *m,
787 u32 expected_offset, u32 expected_size)
789 const struct btf_type *t;
794 t = btf_type_id_size(btf, &id, NULL);
795 if (!t || !btf_type_is_int(t))
798 int_data = btf_type_int(t);
799 nr_bits = BTF_INT_BITS(int_data);
800 if (btf_type_kflag(s)) {
801 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
802 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
804 /* if kflag set, int should be a regular int and
805 * bit offset should be at byte boundary.
807 return !bitfield_size &&
808 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
809 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
812 if (BTF_INT_OFFSET(int_data) ||
813 BITS_PER_BYTE_MASKED(m->offset) ||
814 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
815 BITS_PER_BYTE_MASKED(nr_bits) ||
816 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
822 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
823 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
826 const struct btf_type *t = btf_type_by_id(btf, id);
828 while (btf_type_is_modifier(t) &&
829 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
830 t = btf_type_by_id(btf, t->type);
836 #define BTF_SHOW_MAX_ITER 10
838 #define BTF_KIND_BIT(kind) (1ULL << kind)
841 * Populate show->state.name with type name information.
842 * Format of type name is
844 * [.member_name = ] (type_name)
846 static const char *btf_show_name(struct btf_show *show)
848 /* BTF_MAX_ITER array suffixes "[]" */
849 const char *array_suffixes = "[][][][][][][][][][]";
850 const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
851 /* BTF_MAX_ITER pointer suffixes "*" */
852 const char *ptr_suffixes = "**********";
853 const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
854 const char *name = NULL, *prefix = "", *parens = "";
855 const struct btf_member *m = show->state.member;
856 const struct btf_type *t;
857 const struct btf_array *array;
858 u32 id = show->state.type_id;
859 const char *member = NULL;
860 bool show_member = false;
864 show->state.name[0] = '\0';
867 * Don't show type name if we're showing an array member;
868 * in that case we show the array type so don't need to repeat
869 * ourselves for each member.
871 if (show->state.array_member)
874 /* Retrieve member name, if any. */
876 member = btf_name_by_offset(show->btf, m->name_off);
877 show_member = strlen(member) > 0;
882 * Start with type_id, as we have resolved the struct btf_type *
883 * via btf_modifier_show() past the parent typedef to the child
884 * struct, int etc it is defined as. In such cases, the type_id
885 * still represents the starting type while the struct btf_type *
886 * in our show->state points at the resolved type of the typedef.
888 t = btf_type_by_id(show->btf, id);
893 * The goal here is to build up the right number of pointer and
894 * array suffixes while ensuring the type name for a typedef
895 * is represented. Along the way we accumulate a list of
896 * BTF kinds we have encountered, since these will inform later
897 * display; for example, pointer types will not require an
898 * opening "{" for struct, we will just display the pointer value.
900 * We also want to accumulate the right number of pointer or array
901 * indices in the format string while iterating until we get to
902 * the typedef/pointee/array member target type.
904 * We start by pointing at the end of pointer and array suffix
905 * strings; as we accumulate pointers and arrays we move the pointer
906 * or array string backwards so it will show the expected number of
907 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers
908 * and/or arrays and typedefs are supported as a precaution.
910 * We also want to get typedef name while proceeding to resolve
911 * type it points to so that we can add parentheses if it is a
912 * "typedef struct" etc.
914 for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
916 switch (BTF_INFO_KIND(t->info)) {
917 case BTF_KIND_TYPEDEF:
919 name = btf_name_by_offset(show->btf,
921 kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
925 kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
929 array = btf_type_array(t);
930 if (array_suffix > array_suffixes)
935 kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
936 if (ptr_suffix > ptr_suffixes)
946 t = btf_type_skip_qualifiers(show->btf, id);
948 /* We may not be able to represent this type; bail to be safe */
949 if (i == BTF_SHOW_MAX_ITER)
953 name = btf_name_by_offset(show->btf, t->name_off);
955 switch (BTF_INFO_KIND(t->info)) {
956 case BTF_KIND_STRUCT:
958 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
960 /* if it's an array of struct/union, parens is already set */
961 if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
971 /* pointer does not require parens */
972 if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
974 /* typedef does not require struct/union/enum prefix */
975 if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
981 /* Even if we don't want type name info, we want parentheses etc */
982 if (show->flags & BTF_SHOW_NONAME)
983 snprintf(show->state.name, sizeof(show->state.name), "%s",
986 snprintf(show->state.name, sizeof(show->state.name),
987 "%s%s%s(%s%s%s%s%s%s)%s",
988 /* first 3 strings comprise ".member = " */
989 show_member ? "." : "",
990 show_member ? member : "",
991 show_member ? " = " : "",
992 /* ...next is our prefix (struct, enum, etc) */
994 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
995 /* ...this is the type name itself */
997 /* ...suffixed by the appropriate '*', '[]' suffixes */
998 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
999 array_suffix, parens);
1001 return show->state.name;
1004 static const char *__btf_show_indent(struct btf_show *show)
1006 const char *indents = " ";
1007 const char *indent = &indents[strlen(indents)];
1009 if ((indent - show->state.depth) >= indents)
1010 return indent - show->state.depth;
1014 static const char *btf_show_indent(struct btf_show *show)
1016 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1019 static const char *btf_show_newline(struct btf_show *show)
1021 return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1024 static const char *btf_show_delim(struct btf_show *show)
1026 if (show->state.depth == 0)
1029 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1030 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1036 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1040 if (!show->state.depth_check) {
1041 va_start(args, fmt);
1042 show->showfn(show, fmt, args);
1047 /* Macros are used here as btf_show_type_value[s]() prepends and appends
1048 * format specifiers to the format specifier passed in; these do the work of
1049 * adding indentation, delimiters etc while the caller simply has to specify
1050 * the type value(s) in the format specifier + value(s).
1052 #define btf_show_type_value(show, fmt, value) \
1054 if ((value) != 0 || (show->flags & BTF_SHOW_ZERO) || \
1055 show->state.depth == 0) { \
1056 btf_show(show, "%s%s" fmt "%s%s", \
1057 btf_show_indent(show), \
1058 btf_show_name(show), \
1059 value, btf_show_delim(show), \
1060 btf_show_newline(show)); \
1061 if (show->state.depth > show->state.depth_to_show) \
1062 show->state.depth_to_show = show->state.depth; \
1066 #define btf_show_type_values(show, fmt, ...) \
1068 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \
1069 btf_show_name(show), \
1070 __VA_ARGS__, btf_show_delim(show), \
1071 btf_show_newline(show)); \
1072 if (show->state.depth > show->state.depth_to_show) \
1073 show->state.depth_to_show = show->state.depth; \
1076 /* How much is left to copy to safe buffer after @data? */
1077 static int btf_show_obj_size_left(struct btf_show *show, void *data)
1079 return show->obj.head + show->obj.size - data;
1082 /* Is object pointed to by @data of @size already copied to our safe buffer? */
1083 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1085 return data >= show->obj.data &&
1086 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1090 * If object pointed to by @data of @size falls within our safe buffer, return
1091 * the equivalent pointer to the same safe data. Assumes
1092 * copy_from_kernel_nofault() has already happened and our safe buffer is
1095 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1097 if (btf_show_obj_is_safe(show, data, size))
1098 return show->obj.safe + (data - show->obj.data);
1103 * Return a safe-to-access version of data pointed to by @data.
1104 * We do this by copying the relevant amount of information
1105 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1107 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1108 * safe copy is needed.
1110 * Otherwise we need to determine if we have the required amount
1111 * of data (determined by the @data pointer and the size of the
1112 * largest base type we can encounter (represented by
1113 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1114 * that we will be able to print some of the current object,
1115 * and if more is needed a copy will be triggered.
1116 * Some objects such as structs will not fit into the buffer;
1117 * in such cases additional copies when we iterate over their
1118 * members may be needed.
1120 * btf_show_obj_safe() is used to return a safe buffer for
1121 * btf_show_start_type(); this ensures that as we recurse into
1122 * nested types we always have safe data for the given type.
1123 * This approach is somewhat wasteful; it's possible for example
1124 * that when iterating over a large union we'll end up copying the
1125 * same data repeatedly, but the goal is safety not performance.
1126 * We use stack data as opposed to per-CPU buffers because the
1127 * iteration over a type can take some time, and preemption handling
1128 * would greatly complicate use of the safe buffer.
1130 static void *btf_show_obj_safe(struct btf_show *show,
1131 const struct btf_type *t,
1134 const struct btf_type *rt;
1135 int size_left, size;
1138 if (show->flags & BTF_SHOW_UNSAFE)
1141 rt = btf_resolve_size(show->btf, t, &size);
1143 show->state.status = PTR_ERR(rt);
1148 * Is this toplevel object? If so, set total object size and
1149 * initialize pointers. Otherwise check if we still fall within
1150 * our safe object data.
1152 if (show->state.depth == 0) {
1153 show->obj.size = size;
1154 show->obj.head = data;
1157 * If the size of the current object is > our remaining
1158 * safe buffer we _may_ need to do a new copy. However
1159 * consider the case of a nested struct; it's size pushes
1160 * us over the safe buffer limit, but showing any individual
1161 * struct members does not. In such cases, we don't need
1162 * to initiate a fresh copy yet; however we definitely need
1163 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1164 * in our buffer, regardless of the current object size.
1165 * The logic here is that as we resolve types we will
1166 * hit a base type at some point, and we need to be sure
1167 * the next chunk of data is safely available to display
1168 * that type info safely. We cannot rely on the size of
1169 * the current object here because it may be much larger
1170 * than our current buffer (e.g. task_struct is 8k).
1171 * All we want to do here is ensure that we can print the
1172 * next basic type, which we can if either
1173 * - the current type size is within the safe buffer; or
1174 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1177 safe = __btf_show_obj_safe(show, data,
1179 BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1183 * We need a new copy to our safe object, either because we haven't
1184 * yet copied and are initializing safe data, or because the data
1185 * we want falls outside the boundaries of the safe object.
1188 size_left = btf_show_obj_size_left(show, data);
1189 if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1190 size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1191 show->state.status = copy_from_kernel_nofault(show->obj.safe,
1193 if (!show->state.status) {
1194 show->obj.data = data;
1195 safe = show->obj.safe;
1203 * Set the type we are starting to show and return a safe data pointer
1204 * to be used for showing the associated data.
1206 static void *btf_show_start_type(struct btf_show *show,
1207 const struct btf_type *t,
1208 u32 type_id, void *data)
1210 show->state.type = t;
1211 show->state.type_id = type_id;
1212 show->state.name[0] = '\0';
1214 return btf_show_obj_safe(show, t, data);
1217 static void btf_show_end_type(struct btf_show *show)
1219 show->state.type = NULL;
1220 show->state.type_id = 0;
1221 show->state.name[0] = '\0';
1224 static void *btf_show_start_aggr_type(struct btf_show *show,
1225 const struct btf_type *t,
1226 u32 type_id, void *data)
1228 void *safe_data = btf_show_start_type(show, t, type_id, data);
1233 btf_show(show, "%s%s%s", btf_show_indent(show),
1234 btf_show_name(show),
1235 btf_show_newline(show));
1236 show->state.depth++;
1240 static void btf_show_end_aggr_type(struct btf_show *show,
1243 show->state.depth--;
1244 btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1245 btf_show_delim(show), btf_show_newline(show));
1246 btf_show_end_type(show);
1249 static void btf_show_start_member(struct btf_show *show,
1250 const struct btf_member *m)
1252 show->state.member = m;
1255 static void btf_show_start_array_member(struct btf_show *show)
1257 show->state.array_member = 1;
1258 btf_show_start_member(show, NULL);
1261 static void btf_show_end_member(struct btf_show *show)
1263 show->state.member = NULL;
1266 static void btf_show_end_array_member(struct btf_show *show)
1268 show->state.array_member = 0;
1269 btf_show_end_member(show);
1272 static void *btf_show_start_array_type(struct btf_show *show,
1273 const struct btf_type *t,
1278 show->state.array_encoding = array_encoding;
1279 show->state.array_terminated = 0;
1280 return btf_show_start_aggr_type(show, t, type_id, data);
1283 static void btf_show_end_array_type(struct btf_show *show)
1285 show->state.array_encoding = 0;
1286 show->state.array_terminated = 0;
1287 btf_show_end_aggr_type(show, "]");
1290 static void *btf_show_start_struct_type(struct btf_show *show,
1291 const struct btf_type *t,
1295 return btf_show_start_aggr_type(show, t, type_id, data);
1298 static void btf_show_end_struct_type(struct btf_show *show)
1300 btf_show_end_aggr_type(show, "}");
1303 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1304 const char *fmt, ...)
1308 va_start(args, fmt);
1309 bpf_verifier_vlog(log, fmt, args);
1313 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1314 const char *fmt, ...)
1316 struct bpf_verifier_log *log = &env->log;
1319 if (!bpf_verifier_log_needed(log))
1322 va_start(args, fmt);
1323 bpf_verifier_vlog(log, fmt, args);
1327 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1328 const struct btf_type *t,
1330 const char *fmt, ...)
1332 struct bpf_verifier_log *log = &env->log;
1333 u8 kind = BTF_INFO_KIND(t->info);
1334 struct btf *btf = env->btf;
1337 if (!bpf_verifier_log_needed(log))
1340 /* btf verifier prints all types it is processing via
1341 * btf_verifier_log_type(..., fmt = NULL).
1342 * Skip those prints for in-kernel BTF verification.
1344 if (log->level == BPF_LOG_KERNEL && !fmt)
1347 __btf_verifier_log(log, "[%u] %s %s%s",
1350 __btf_name_by_offset(btf, t->name_off),
1351 log_details ? " " : "");
1354 btf_type_ops(t)->log_details(env, t);
1357 __btf_verifier_log(log, " ");
1358 va_start(args, fmt);
1359 bpf_verifier_vlog(log, fmt, args);
1363 __btf_verifier_log(log, "\n");
1366 #define btf_verifier_log_type(env, t, ...) \
1367 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1368 #define btf_verifier_log_basic(env, t, ...) \
1369 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1372 static void btf_verifier_log_member(struct btf_verifier_env *env,
1373 const struct btf_type *struct_type,
1374 const struct btf_member *member,
1375 const char *fmt, ...)
1377 struct bpf_verifier_log *log = &env->log;
1378 struct btf *btf = env->btf;
1381 if (!bpf_verifier_log_needed(log))
1384 if (log->level == BPF_LOG_KERNEL && !fmt)
1386 /* The CHECK_META phase already did a btf dump.
1388 * If member is logged again, it must hit an error in
1389 * parsing this member. It is useful to print out which
1390 * struct this member belongs to.
1392 if (env->phase != CHECK_META)
1393 btf_verifier_log_type(env, struct_type, NULL);
1395 if (btf_type_kflag(struct_type))
1396 __btf_verifier_log(log,
1397 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1398 __btf_name_by_offset(btf, member->name_off),
1400 BTF_MEMBER_BITFIELD_SIZE(member->offset),
1401 BTF_MEMBER_BIT_OFFSET(member->offset));
1403 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1404 __btf_name_by_offset(btf, member->name_off),
1405 member->type, member->offset);
1408 __btf_verifier_log(log, " ");
1409 va_start(args, fmt);
1410 bpf_verifier_vlog(log, fmt, args);
1414 __btf_verifier_log(log, "\n");
1418 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1419 const struct btf_type *datasec_type,
1420 const struct btf_var_secinfo *vsi,
1421 const char *fmt, ...)
1423 struct bpf_verifier_log *log = &env->log;
1426 if (!bpf_verifier_log_needed(log))
1428 if (log->level == BPF_LOG_KERNEL && !fmt)
1430 if (env->phase != CHECK_META)
1431 btf_verifier_log_type(env, datasec_type, NULL);
1433 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1434 vsi->type, vsi->offset, vsi->size);
1436 __btf_verifier_log(log, " ");
1437 va_start(args, fmt);
1438 bpf_verifier_vlog(log, fmt, args);
1442 __btf_verifier_log(log, "\n");
1445 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1448 struct bpf_verifier_log *log = &env->log;
1449 const struct btf *btf = env->btf;
1450 const struct btf_header *hdr;
1452 if (!bpf_verifier_log_needed(log))
1455 if (log->level == BPF_LOG_KERNEL)
1458 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1459 __btf_verifier_log(log, "version: %u\n", hdr->version);
1460 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1461 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1462 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1463 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1464 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1465 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1466 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1469 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1471 struct btf *btf = env->btf;
1473 if (btf->types_size == btf->nr_types) {
1474 /* Expand 'types' array */
1476 struct btf_type **new_types;
1477 u32 expand_by, new_size;
1479 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1480 btf_verifier_log(env, "Exceeded max num of types");
1484 expand_by = max_t(u32, btf->types_size >> 2, 16);
1485 new_size = min_t(u32, BTF_MAX_TYPE,
1486 btf->types_size + expand_by);
1488 new_types = kvcalloc(new_size, sizeof(*new_types),
1489 GFP_KERNEL | __GFP_NOWARN);
1493 if (btf->nr_types == 0) {
1494 if (!btf->base_btf) {
1495 /* lazily init VOID type */
1496 new_types[0] = &btf_void;
1500 memcpy(new_types, btf->types,
1501 sizeof(*btf->types) * btf->nr_types);
1505 btf->types = new_types;
1506 btf->types_size = new_size;
1509 btf->types[btf->nr_types++] = t;
1514 static int btf_alloc_id(struct btf *btf)
1518 idr_preload(GFP_KERNEL);
1519 spin_lock_bh(&btf_idr_lock);
1520 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1523 spin_unlock_bh(&btf_idr_lock);
1526 if (WARN_ON_ONCE(!id))
1529 return id > 0 ? 0 : id;
1532 static void btf_free_id(struct btf *btf)
1534 unsigned long flags;
1537 * In map-in-map, calling map_delete_elem() on outer
1538 * map will call bpf_map_put on the inner map.
1539 * It will then eventually call btf_free_id()
1540 * on the inner map. Some of the map_delete_elem()
1541 * implementation may have irq disabled, so
1542 * we need to use the _irqsave() version instead
1543 * of the _bh() version.
1545 spin_lock_irqsave(&btf_idr_lock, flags);
1546 idr_remove(&btf_idr, btf->id);
1547 spin_unlock_irqrestore(&btf_idr_lock, flags);
1550 static void btf_free_kfunc_set_tab(struct btf *btf)
1552 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1557 /* For module BTF, we directly assign the sets being registered, so
1558 * there is nothing to free except kfunc_set_tab.
1560 if (btf_is_module(btf))
1562 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) {
1563 for (type = 0; type < ARRAY_SIZE(tab->sets[0]); type++)
1564 kfree(tab->sets[hook][type]);
1568 btf->kfunc_set_tab = NULL;
1571 static void btf_free(struct btf *btf)
1573 btf_free_kfunc_set_tab(btf);
1575 kvfree(btf->resolved_sizes);
1576 kvfree(btf->resolved_ids);
1581 static void btf_free_rcu(struct rcu_head *rcu)
1583 struct btf *btf = container_of(rcu, struct btf, rcu);
1588 void btf_get(struct btf *btf)
1590 refcount_inc(&btf->refcnt);
1593 void btf_put(struct btf *btf)
1595 if (btf && refcount_dec_and_test(&btf->refcnt)) {
1597 call_rcu(&btf->rcu, btf_free_rcu);
1601 static int env_resolve_init(struct btf_verifier_env *env)
1603 struct btf *btf = env->btf;
1604 u32 nr_types = btf->nr_types;
1605 u32 *resolved_sizes = NULL;
1606 u32 *resolved_ids = NULL;
1607 u8 *visit_states = NULL;
1609 resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1610 GFP_KERNEL | __GFP_NOWARN);
1611 if (!resolved_sizes)
1614 resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1615 GFP_KERNEL | __GFP_NOWARN);
1619 visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1620 GFP_KERNEL | __GFP_NOWARN);
1624 btf->resolved_sizes = resolved_sizes;
1625 btf->resolved_ids = resolved_ids;
1626 env->visit_states = visit_states;
1631 kvfree(resolved_sizes);
1632 kvfree(resolved_ids);
1633 kvfree(visit_states);
1637 static void btf_verifier_env_free(struct btf_verifier_env *env)
1639 kvfree(env->visit_states);
1643 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1644 const struct btf_type *next_type)
1646 switch (env->resolve_mode) {
1648 /* int, enum or void is a sink */
1649 return !btf_type_needs_resolve(next_type);
1651 /* int, enum, void, struct, array, func or func_proto is a sink
1654 return !btf_type_is_modifier(next_type) &&
1655 !btf_type_is_ptr(next_type);
1656 case RESOLVE_STRUCT_OR_ARRAY:
1657 /* int, enum, void, ptr, func or func_proto is a sink
1658 * for struct and array
1660 return !btf_type_is_modifier(next_type) &&
1661 !btf_type_is_array(next_type) &&
1662 !btf_type_is_struct(next_type);
1668 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1671 /* base BTF types should be resolved by now */
1672 if (type_id < env->btf->start_id)
1675 return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1678 static int env_stack_push(struct btf_verifier_env *env,
1679 const struct btf_type *t, u32 type_id)
1681 const struct btf *btf = env->btf;
1682 struct resolve_vertex *v;
1684 if (env->top_stack == MAX_RESOLVE_DEPTH)
1687 if (type_id < btf->start_id
1688 || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1691 env->visit_states[type_id - btf->start_id] = VISITED;
1693 v = &env->stack[env->top_stack++];
1695 v->type_id = type_id;
1698 if (env->resolve_mode == RESOLVE_TBD) {
1699 if (btf_type_is_ptr(t))
1700 env->resolve_mode = RESOLVE_PTR;
1701 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1702 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1708 static void env_stack_set_next_member(struct btf_verifier_env *env,
1711 env->stack[env->top_stack - 1].next_member = next_member;
1714 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1715 u32 resolved_type_id,
1718 u32 type_id = env->stack[--(env->top_stack)].type_id;
1719 struct btf *btf = env->btf;
1721 type_id -= btf->start_id; /* adjust to local type id */
1722 btf->resolved_sizes[type_id] = resolved_size;
1723 btf->resolved_ids[type_id] = resolved_type_id;
1724 env->visit_states[type_id] = RESOLVED;
1727 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1729 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1732 /* Resolve the size of a passed-in "type"
1734 * type: is an array (e.g. u32 array[x][y])
1735 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1736 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
1737 * corresponds to the return type.
1739 * *elem_id: id of u32
1740 * *total_nelems: (x * y). Hence, individual elem size is
1741 * (*type_size / *total_nelems)
1742 * *type_id: id of type if it's changed within the function, 0 if not
1744 * type: is not an array (e.g. const struct X)
1745 * return type: type "struct X"
1746 * *type_size: sizeof(struct X)
1747 * *elem_type: same as return type ("struct X")
1750 * *type_id: id of type if it's changed within the function, 0 if not
1752 static const struct btf_type *
1753 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1754 u32 *type_size, const struct btf_type **elem_type,
1755 u32 *elem_id, u32 *total_nelems, u32 *type_id)
1757 const struct btf_type *array_type = NULL;
1758 const struct btf_array *array = NULL;
1759 u32 i, size, nelems = 1, id = 0;
1761 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1762 switch (BTF_INFO_KIND(type->info)) {
1763 /* type->size can be used */
1765 case BTF_KIND_STRUCT:
1766 case BTF_KIND_UNION:
1768 case BTF_KIND_FLOAT:
1773 size = sizeof(void *);
1777 case BTF_KIND_TYPEDEF:
1778 case BTF_KIND_VOLATILE:
1779 case BTF_KIND_CONST:
1780 case BTF_KIND_RESTRICT:
1781 case BTF_KIND_TYPE_TAG:
1783 type = btf_type_by_id(btf, type->type);
1786 case BTF_KIND_ARRAY:
1789 array = btf_type_array(type);
1790 if (nelems && array->nelems > U32_MAX / nelems)
1791 return ERR_PTR(-EINVAL);
1792 nelems *= array->nelems;
1793 type = btf_type_by_id(btf, array->type);
1796 /* type without size */
1798 return ERR_PTR(-EINVAL);
1802 return ERR_PTR(-EINVAL);
1805 if (nelems && size > U32_MAX / nelems)
1806 return ERR_PTR(-EINVAL);
1808 *type_size = nelems * size;
1810 *total_nelems = nelems;
1814 *elem_id = array ? array->type : 0;
1818 return array_type ? : type;
1821 const struct btf_type *
1822 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1825 return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
1828 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
1830 while (type_id < btf->start_id)
1831 btf = btf->base_btf;
1833 return btf->resolved_ids[type_id - btf->start_id];
1836 /* The input param "type_id" must point to a needs_resolve type */
1837 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1840 *type_id = btf_resolved_type_id(btf, *type_id);
1841 return btf_type_by_id(btf, *type_id);
1844 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
1846 while (type_id < btf->start_id)
1847 btf = btf->base_btf;
1849 return btf->resolved_sizes[type_id - btf->start_id];
1852 const struct btf_type *btf_type_id_size(const struct btf *btf,
1853 u32 *type_id, u32 *ret_size)
1855 const struct btf_type *size_type;
1856 u32 size_type_id = *type_id;
1859 size_type = btf_type_by_id(btf, size_type_id);
1860 if (btf_type_nosize_or_null(size_type))
1863 if (btf_type_has_size(size_type)) {
1864 size = size_type->size;
1865 } else if (btf_type_is_array(size_type)) {
1866 size = btf_resolved_type_size(btf, size_type_id);
1867 } else if (btf_type_is_ptr(size_type)) {
1868 size = sizeof(void *);
1870 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1871 !btf_type_is_var(size_type)))
1874 size_type_id = btf_resolved_type_id(btf, size_type_id);
1875 size_type = btf_type_by_id(btf, size_type_id);
1876 if (btf_type_nosize_or_null(size_type))
1878 else if (btf_type_has_size(size_type))
1879 size = size_type->size;
1880 else if (btf_type_is_array(size_type))
1881 size = btf_resolved_type_size(btf, size_type_id);
1882 else if (btf_type_is_ptr(size_type))
1883 size = sizeof(void *);
1888 *type_id = size_type_id;
1895 static int btf_df_check_member(struct btf_verifier_env *env,
1896 const struct btf_type *struct_type,
1897 const struct btf_member *member,
1898 const struct btf_type *member_type)
1900 btf_verifier_log_basic(env, struct_type,
1901 "Unsupported check_member");
1905 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1906 const struct btf_type *struct_type,
1907 const struct btf_member *member,
1908 const struct btf_type *member_type)
1910 btf_verifier_log_basic(env, struct_type,
1911 "Unsupported check_kflag_member");
1915 /* Used for ptr, array struct/union and float type members.
1916 * int, enum and modifier types have their specific callback functions.
1918 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1919 const struct btf_type *struct_type,
1920 const struct btf_member *member,
1921 const struct btf_type *member_type)
1923 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1924 btf_verifier_log_member(env, struct_type, member,
1925 "Invalid member bitfield_size");
1929 /* bitfield size is 0, so member->offset represents bit offset only.
1930 * It is safe to call non kflag check_member variants.
1932 return btf_type_ops(member_type)->check_member(env, struct_type,
1937 static int btf_df_resolve(struct btf_verifier_env *env,
1938 const struct resolve_vertex *v)
1940 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1944 static void btf_df_show(const struct btf *btf, const struct btf_type *t,
1945 u32 type_id, void *data, u8 bits_offsets,
1946 struct btf_show *show)
1948 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1951 static int btf_int_check_member(struct btf_verifier_env *env,
1952 const struct btf_type *struct_type,
1953 const struct btf_member *member,
1954 const struct btf_type *member_type)
1956 u32 int_data = btf_type_int(member_type);
1957 u32 struct_bits_off = member->offset;
1958 u32 struct_size = struct_type->size;
1962 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1963 btf_verifier_log_member(env, struct_type, member,
1964 "bits_offset exceeds U32_MAX");
1968 struct_bits_off += BTF_INT_OFFSET(int_data);
1969 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1970 nr_copy_bits = BTF_INT_BITS(int_data) +
1971 BITS_PER_BYTE_MASKED(struct_bits_off);
1973 if (nr_copy_bits > BITS_PER_U128) {
1974 btf_verifier_log_member(env, struct_type, member,
1975 "nr_copy_bits exceeds 128");
1979 if (struct_size < bytes_offset ||
1980 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1981 btf_verifier_log_member(env, struct_type, member,
1982 "Member exceeds struct_size");
1989 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1990 const struct btf_type *struct_type,
1991 const struct btf_member *member,
1992 const struct btf_type *member_type)
1994 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1995 u32 int_data = btf_type_int(member_type);
1996 u32 struct_size = struct_type->size;
1999 /* a regular int type is required for the kflag int member */
2000 if (!btf_type_int_is_regular(member_type)) {
2001 btf_verifier_log_member(env, struct_type, member,
2002 "Invalid member base type");
2006 /* check sanity of bitfield size */
2007 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2008 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2009 nr_int_data_bits = BTF_INT_BITS(int_data);
2011 /* Not a bitfield member, member offset must be at byte
2014 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2015 btf_verifier_log_member(env, struct_type, member,
2016 "Invalid member offset");
2020 nr_bits = nr_int_data_bits;
2021 } else if (nr_bits > nr_int_data_bits) {
2022 btf_verifier_log_member(env, struct_type, member,
2023 "Invalid member bitfield_size");
2027 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2028 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2029 if (nr_copy_bits > BITS_PER_U128) {
2030 btf_verifier_log_member(env, struct_type, member,
2031 "nr_copy_bits exceeds 128");
2035 if (struct_size < bytes_offset ||
2036 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2037 btf_verifier_log_member(env, struct_type, member,
2038 "Member exceeds struct_size");
2045 static s32 btf_int_check_meta(struct btf_verifier_env *env,
2046 const struct btf_type *t,
2049 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2052 if (meta_left < meta_needed) {
2053 btf_verifier_log_basic(env, t,
2054 "meta_left:%u meta_needed:%u",
2055 meta_left, meta_needed);
2059 if (btf_type_vlen(t)) {
2060 btf_verifier_log_type(env, t, "vlen != 0");
2064 if (btf_type_kflag(t)) {
2065 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2069 int_data = btf_type_int(t);
2070 if (int_data & ~BTF_INT_MASK) {
2071 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2076 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2078 if (nr_bits > BITS_PER_U128) {
2079 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2084 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2085 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2090 * Only one of the encoding bits is allowed and it
2091 * should be sufficient for the pretty print purpose (i.e. decoding).
2092 * Multiple bits can be allowed later if it is found
2093 * to be insufficient.
2095 encoding = BTF_INT_ENCODING(int_data);
2097 encoding != BTF_INT_SIGNED &&
2098 encoding != BTF_INT_CHAR &&
2099 encoding != BTF_INT_BOOL) {
2100 btf_verifier_log_type(env, t, "Unsupported encoding");
2104 btf_verifier_log_type(env, t, NULL);
2109 static void btf_int_log(struct btf_verifier_env *env,
2110 const struct btf_type *t)
2112 int int_data = btf_type_int(t);
2114 btf_verifier_log(env,
2115 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2116 t->size, BTF_INT_OFFSET(int_data),
2117 BTF_INT_BITS(int_data),
2118 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2121 static void btf_int128_print(struct btf_show *show, void *data)
2123 /* data points to a __int128 number.
2125 * int128_num = *(__int128 *)data;
2126 * The below formulas shows what upper_num and lower_num represents:
2127 * upper_num = int128_num >> 64;
2128 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2130 u64 upper_num, lower_num;
2132 #ifdef __BIG_ENDIAN_BITFIELD
2133 upper_num = *(u64 *)data;
2134 lower_num = *(u64 *)(data + 8);
2136 upper_num = *(u64 *)(data + 8);
2137 lower_num = *(u64 *)data;
2140 btf_show_type_value(show, "0x%llx", lower_num);
2142 btf_show_type_values(show, "0x%llx%016llx", upper_num,
2146 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2147 u16 right_shift_bits)
2149 u64 upper_num, lower_num;
2151 #ifdef __BIG_ENDIAN_BITFIELD
2152 upper_num = print_num[0];
2153 lower_num = print_num[1];
2155 upper_num = print_num[1];
2156 lower_num = print_num[0];
2159 /* shake out un-needed bits by shift/or operations */
2160 if (left_shift_bits >= 64) {
2161 upper_num = lower_num << (left_shift_bits - 64);
2164 upper_num = (upper_num << left_shift_bits) |
2165 (lower_num >> (64 - left_shift_bits));
2166 lower_num = lower_num << left_shift_bits;
2169 if (right_shift_bits >= 64) {
2170 lower_num = upper_num >> (right_shift_bits - 64);
2173 lower_num = (lower_num >> right_shift_bits) |
2174 (upper_num << (64 - right_shift_bits));
2175 upper_num = upper_num >> right_shift_bits;
2178 #ifdef __BIG_ENDIAN_BITFIELD
2179 print_num[0] = upper_num;
2180 print_num[1] = lower_num;
2182 print_num[0] = lower_num;
2183 print_num[1] = upper_num;
2187 static void btf_bitfield_show(void *data, u8 bits_offset,
2188 u8 nr_bits, struct btf_show *show)
2190 u16 left_shift_bits, right_shift_bits;
2193 u64 print_num[2] = {};
2195 nr_copy_bits = nr_bits + bits_offset;
2196 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2198 memcpy(print_num, data, nr_copy_bytes);
2200 #ifdef __BIG_ENDIAN_BITFIELD
2201 left_shift_bits = bits_offset;
2203 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2205 right_shift_bits = BITS_PER_U128 - nr_bits;
2207 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2208 btf_int128_print(show, print_num);
2212 static void btf_int_bits_show(const struct btf *btf,
2213 const struct btf_type *t,
2214 void *data, u8 bits_offset,
2215 struct btf_show *show)
2217 u32 int_data = btf_type_int(t);
2218 u8 nr_bits = BTF_INT_BITS(int_data);
2219 u8 total_bits_offset;
2222 * bits_offset is at most 7.
2223 * BTF_INT_OFFSET() cannot exceed 128 bits.
2225 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2226 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2227 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2228 btf_bitfield_show(data, bits_offset, nr_bits, show);
2231 static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2232 u32 type_id, void *data, u8 bits_offset,
2233 struct btf_show *show)
2235 u32 int_data = btf_type_int(t);
2236 u8 encoding = BTF_INT_ENCODING(int_data);
2237 bool sign = encoding & BTF_INT_SIGNED;
2238 u8 nr_bits = BTF_INT_BITS(int_data);
2241 safe_data = btf_show_start_type(show, t, type_id, data);
2245 if (bits_offset || BTF_INT_OFFSET(int_data) ||
2246 BITS_PER_BYTE_MASKED(nr_bits)) {
2247 btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2253 btf_int128_print(show, safe_data);
2257 btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2259 btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2263 btf_show_type_value(show, "%d", *(s32 *)safe_data);
2265 btf_show_type_value(show, "%u", *(u32 *)safe_data);
2269 btf_show_type_value(show, "%d", *(s16 *)safe_data);
2271 btf_show_type_value(show, "%u", *(u16 *)safe_data);
2274 if (show->state.array_encoding == BTF_INT_CHAR) {
2275 /* check for null terminator */
2276 if (show->state.array_terminated)
2278 if (*(char *)data == '\0') {
2279 show->state.array_terminated = 1;
2282 if (isprint(*(char *)data)) {
2283 btf_show_type_value(show, "'%c'",
2284 *(char *)safe_data);
2289 btf_show_type_value(show, "%d", *(s8 *)safe_data);
2291 btf_show_type_value(show, "%u", *(u8 *)safe_data);
2294 btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2298 btf_show_end_type(show);
2301 static const struct btf_kind_operations int_ops = {
2302 .check_meta = btf_int_check_meta,
2303 .resolve = btf_df_resolve,
2304 .check_member = btf_int_check_member,
2305 .check_kflag_member = btf_int_check_kflag_member,
2306 .log_details = btf_int_log,
2307 .show = btf_int_show,
2310 static int btf_modifier_check_member(struct btf_verifier_env *env,
2311 const struct btf_type *struct_type,
2312 const struct btf_member *member,
2313 const struct btf_type *member_type)
2315 const struct btf_type *resolved_type;
2316 u32 resolved_type_id = member->type;
2317 struct btf_member resolved_member;
2318 struct btf *btf = env->btf;
2320 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2321 if (!resolved_type) {
2322 btf_verifier_log_member(env, struct_type, member,
2327 resolved_member = *member;
2328 resolved_member.type = resolved_type_id;
2330 return btf_type_ops(resolved_type)->check_member(env, struct_type,
2335 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2336 const struct btf_type *struct_type,
2337 const struct btf_member *member,
2338 const struct btf_type *member_type)
2340 const struct btf_type *resolved_type;
2341 u32 resolved_type_id = member->type;
2342 struct btf_member resolved_member;
2343 struct btf *btf = env->btf;
2345 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2346 if (!resolved_type) {
2347 btf_verifier_log_member(env, struct_type, member,
2352 resolved_member = *member;
2353 resolved_member.type = resolved_type_id;
2355 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2360 static int btf_ptr_check_member(struct btf_verifier_env *env,
2361 const struct btf_type *struct_type,
2362 const struct btf_member *member,
2363 const struct btf_type *member_type)
2365 u32 struct_size, struct_bits_off, bytes_offset;
2367 struct_size = struct_type->size;
2368 struct_bits_off = member->offset;
2369 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2371 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2372 btf_verifier_log_member(env, struct_type, member,
2373 "Member is not byte aligned");
2377 if (struct_size - bytes_offset < sizeof(void *)) {
2378 btf_verifier_log_member(env, struct_type, member,
2379 "Member exceeds struct_size");
2386 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2387 const struct btf_type *t,
2392 if (btf_type_vlen(t)) {
2393 btf_verifier_log_type(env, t, "vlen != 0");
2397 if (btf_type_kflag(t)) {
2398 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2402 if (!BTF_TYPE_ID_VALID(t->type)) {
2403 btf_verifier_log_type(env, t, "Invalid type_id");
2407 /* typedef/type_tag type must have a valid name, and other ref types,
2408 * volatile, const, restrict, should have a null name.
2410 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2412 !btf_name_valid_identifier(env->btf, t->name_off)) {
2413 btf_verifier_log_type(env, t, "Invalid name");
2416 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2417 value = btf_name_by_offset(env->btf, t->name_off);
2418 if (!value || !value[0]) {
2419 btf_verifier_log_type(env, t, "Invalid name");
2424 btf_verifier_log_type(env, t, "Invalid name");
2429 btf_verifier_log_type(env, t, NULL);
2434 static int btf_modifier_resolve(struct btf_verifier_env *env,
2435 const struct resolve_vertex *v)
2437 const struct btf_type *t = v->t;
2438 const struct btf_type *next_type;
2439 u32 next_type_id = t->type;
2440 struct btf *btf = env->btf;
2442 next_type = btf_type_by_id(btf, next_type_id);
2443 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2444 btf_verifier_log_type(env, v->t, "Invalid type_id");
2448 if (!env_type_is_resolve_sink(env, next_type) &&
2449 !env_type_is_resolved(env, next_type_id))
2450 return env_stack_push(env, next_type, next_type_id);
2452 /* Figure out the resolved next_type_id with size.
2453 * They will be stored in the current modifier's
2454 * resolved_ids and resolved_sizes such that it can
2455 * save us a few type-following when we use it later (e.g. in
2458 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2459 if (env_type_is_resolved(env, next_type_id))
2460 next_type = btf_type_id_resolve(btf, &next_type_id);
2462 /* "typedef void new_void", "const void"...etc */
2463 if (!btf_type_is_void(next_type) &&
2464 !btf_type_is_fwd(next_type) &&
2465 !btf_type_is_func_proto(next_type)) {
2466 btf_verifier_log_type(env, v->t, "Invalid type_id");
2471 env_stack_pop_resolved(env, next_type_id, 0);
2476 static int btf_var_resolve(struct btf_verifier_env *env,
2477 const struct resolve_vertex *v)
2479 const struct btf_type *next_type;
2480 const struct btf_type *t = v->t;
2481 u32 next_type_id = t->type;
2482 struct btf *btf = env->btf;
2484 next_type = btf_type_by_id(btf, next_type_id);
2485 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2486 btf_verifier_log_type(env, v->t, "Invalid type_id");
2490 if (!env_type_is_resolve_sink(env, next_type) &&
2491 !env_type_is_resolved(env, next_type_id))
2492 return env_stack_push(env, next_type, next_type_id);
2494 if (btf_type_is_modifier(next_type)) {
2495 const struct btf_type *resolved_type;
2496 u32 resolved_type_id;
2498 resolved_type_id = next_type_id;
2499 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2501 if (btf_type_is_ptr(resolved_type) &&
2502 !env_type_is_resolve_sink(env, resolved_type) &&
2503 !env_type_is_resolved(env, resolved_type_id))
2504 return env_stack_push(env, resolved_type,
2508 /* We must resolve to something concrete at this point, no
2509 * forward types or similar that would resolve to size of
2512 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2513 btf_verifier_log_type(env, v->t, "Invalid type_id");
2517 env_stack_pop_resolved(env, next_type_id, 0);
2522 static int btf_ptr_resolve(struct btf_verifier_env *env,
2523 const struct resolve_vertex *v)
2525 const struct btf_type *next_type;
2526 const struct btf_type *t = v->t;
2527 u32 next_type_id = t->type;
2528 struct btf *btf = env->btf;
2530 next_type = btf_type_by_id(btf, next_type_id);
2531 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2532 btf_verifier_log_type(env, v->t, "Invalid type_id");
2536 if (!env_type_is_resolve_sink(env, next_type) &&
2537 !env_type_is_resolved(env, next_type_id))
2538 return env_stack_push(env, next_type, next_type_id);
2540 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2541 * the modifier may have stopped resolving when it was resolved
2542 * to a ptr (last-resolved-ptr).
2544 * We now need to continue from the last-resolved-ptr to
2545 * ensure the last-resolved-ptr will not referring back to
2546 * the currenct ptr (t).
2548 if (btf_type_is_modifier(next_type)) {
2549 const struct btf_type *resolved_type;
2550 u32 resolved_type_id;
2552 resolved_type_id = next_type_id;
2553 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2555 if (btf_type_is_ptr(resolved_type) &&
2556 !env_type_is_resolve_sink(env, resolved_type) &&
2557 !env_type_is_resolved(env, resolved_type_id))
2558 return env_stack_push(env, resolved_type,
2562 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2563 if (env_type_is_resolved(env, next_type_id))
2564 next_type = btf_type_id_resolve(btf, &next_type_id);
2566 if (!btf_type_is_void(next_type) &&
2567 !btf_type_is_fwd(next_type) &&
2568 !btf_type_is_func_proto(next_type)) {
2569 btf_verifier_log_type(env, v->t, "Invalid type_id");
2574 env_stack_pop_resolved(env, next_type_id, 0);
2579 static void btf_modifier_show(const struct btf *btf,
2580 const struct btf_type *t,
2581 u32 type_id, void *data,
2582 u8 bits_offset, struct btf_show *show)
2584 if (btf->resolved_ids)
2585 t = btf_type_id_resolve(btf, &type_id);
2587 t = btf_type_skip_modifiers(btf, type_id, NULL);
2589 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2592 static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2593 u32 type_id, void *data, u8 bits_offset,
2594 struct btf_show *show)
2596 t = btf_type_id_resolve(btf, &type_id);
2598 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2601 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2602 u32 type_id, void *data, u8 bits_offset,
2603 struct btf_show *show)
2607 safe_data = btf_show_start_type(show, t, type_id, data);
2611 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2612 if (show->flags & BTF_SHOW_PTR_RAW)
2613 btf_show_type_value(show, "0x%px", *(void **)safe_data);
2615 btf_show_type_value(show, "0x%p", *(void **)safe_data);
2616 btf_show_end_type(show);
2619 static void btf_ref_type_log(struct btf_verifier_env *env,
2620 const struct btf_type *t)
2622 btf_verifier_log(env, "type_id=%u", t->type);
2625 static struct btf_kind_operations modifier_ops = {
2626 .check_meta = btf_ref_type_check_meta,
2627 .resolve = btf_modifier_resolve,
2628 .check_member = btf_modifier_check_member,
2629 .check_kflag_member = btf_modifier_check_kflag_member,
2630 .log_details = btf_ref_type_log,
2631 .show = btf_modifier_show,
2634 static struct btf_kind_operations ptr_ops = {
2635 .check_meta = btf_ref_type_check_meta,
2636 .resolve = btf_ptr_resolve,
2637 .check_member = btf_ptr_check_member,
2638 .check_kflag_member = btf_generic_check_kflag_member,
2639 .log_details = btf_ref_type_log,
2640 .show = btf_ptr_show,
2643 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2644 const struct btf_type *t,
2647 if (btf_type_vlen(t)) {
2648 btf_verifier_log_type(env, t, "vlen != 0");
2653 btf_verifier_log_type(env, t, "type != 0");
2657 /* fwd type must have a valid name */
2659 !btf_name_valid_identifier(env->btf, t->name_off)) {
2660 btf_verifier_log_type(env, t, "Invalid name");
2664 btf_verifier_log_type(env, t, NULL);
2669 static void btf_fwd_type_log(struct btf_verifier_env *env,
2670 const struct btf_type *t)
2672 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2675 static struct btf_kind_operations fwd_ops = {
2676 .check_meta = btf_fwd_check_meta,
2677 .resolve = btf_df_resolve,
2678 .check_member = btf_df_check_member,
2679 .check_kflag_member = btf_df_check_kflag_member,
2680 .log_details = btf_fwd_type_log,
2681 .show = btf_df_show,
2684 static int btf_array_check_member(struct btf_verifier_env *env,
2685 const struct btf_type *struct_type,
2686 const struct btf_member *member,
2687 const struct btf_type *member_type)
2689 u32 struct_bits_off = member->offset;
2690 u32 struct_size, bytes_offset;
2691 u32 array_type_id, array_size;
2692 struct btf *btf = env->btf;
2694 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2695 btf_verifier_log_member(env, struct_type, member,
2696 "Member is not byte aligned");
2700 array_type_id = member->type;
2701 btf_type_id_size(btf, &array_type_id, &array_size);
2702 struct_size = struct_type->size;
2703 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2704 if (struct_size - bytes_offset < array_size) {
2705 btf_verifier_log_member(env, struct_type, member,
2706 "Member exceeds struct_size");
2713 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2714 const struct btf_type *t,
2717 const struct btf_array *array = btf_type_array(t);
2718 u32 meta_needed = sizeof(*array);
2720 if (meta_left < meta_needed) {
2721 btf_verifier_log_basic(env, t,
2722 "meta_left:%u meta_needed:%u",
2723 meta_left, meta_needed);
2727 /* array type should not have a name */
2729 btf_verifier_log_type(env, t, "Invalid name");
2733 if (btf_type_vlen(t)) {
2734 btf_verifier_log_type(env, t, "vlen != 0");
2738 if (btf_type_kflag(t)) {
2739 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2744 btf_verifier_log_type(env, t, "size != 0");
2748 /* Array elem type and index type cannot be in type void,
2749 * so !array->type and !array->index_type are not allowed.
2751 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2752 btf_verifier_log_type(env, t, "Invalid elem");
2756 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2757 btf_verifier_log_type(env, t, "Invalid index");
2761 btf_verifier_log_type(env, t, NULL);
2766 static int btf_array_resolve(struct btf_verifier_env *env,
2767 const struct resolve_vertex *v)
2769 const struct btf_array *array = btf_type_array(v->t);
2770 const struct btf_type *elem_type, *index_type;
2771 u32 elem_type_id, index_type_id;
2772 struct btf *btf = env->btf;
2775 /* Check array->index_type */
2776 index_type_id = array->index_type;
2777 index_type = btf_type_by_id(btf, index_type_id);
2778 if (btf_type_nosize_or_null(index_type) ||
2779 btf_type_is_resolve_source_only(index_type)) {
2780 btf_verifier_log_type(env, v->t, "Invalid index");
2784 if (!env_type_is_resolve_sink(env, index_type) &&
2785 !env_type_is_resolved(env, index_type_id))
2786 return env_stack_push(env, index_type, index_type_id);
2788 index_type = btf_type_id_size(btf, &index_type_id, NULL);
2789 if (!index_type || !btf_type_is_int(index_type) ||
2790 !btf_type_int_is_regular(index_type)) {
2791 btf_verifier_log_type(env, v->t, "Invalid index");
2795 /* Check array->type */
2796 elem_type_id = array->type;
2797 elem_type = btf_type_by_id(btf, elem_type_id);
2798 if (btf_type_nosize_or_null(elem_type) ||
2799 btf_type_is_resolve_source_only(elem_type)) {
2800 btf_verifier_log_type(env, v->t,
2805 if (!env_type_is_resolve_sink(env, elem_type) &&
2806 !env_type_is_resolved(env, elem_type_id))
2807 return env_stack_push(env, elem_type, elem_type_id);
2809 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2811 btf_verifier_log_type(env, v->t, "Invalid elem");
2815 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2816 btf_verifier_log_type(env, v->t, "Invalid array of int");
2820 if (array->nelems && elem_size > U32_MAX / array->nelems) {
2821 btf_verifier_log_type(env, v->t,
2822 "Array size overflows U32_MAX");
2826 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2831 static void btf_array_log(struct btf_verifier_env *env,
2832 const struct btf_type *t)
2834 const struct btf_array *array = btf_type_array(t);
2836 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2837 array->type, array->index_type, array->nelems);
2840 static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
2841 u32 type_id, void *data, u8 bits_offset,
2842 struct btf_show *show)
2844 const struct btf_array *array = btf_type_array(t);
2845 const struct btf_kind_operations *elem_ops;
2846 const struct btf_type *elem_type;
2847 u32 i, elem_size = 0, elem_type_id;
2850 elem_type_id = array->type;
2851 elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
2852 if (elem_type && btf_type_has_size(elem_type))
2853 elem_size = elem_type->size;
2855 if (elem_type && btf_type_is_int(elem_type)) {
2856 u32 int_type = btf_type_int(elem_type);
2858 encoding = BTF_INT_ENCODING(int_type);
2861 * BTF_INT_CHAR encoding never seems to be set for
2862 * char arrays, so if size is 1 and element is
2863 * printable as a char, we'll do that.
2866 encoding = BTF_INT_CHAR;
2869 if (!btf_show_start_array_type(show, t, type_id, encoding, data))
2874 elem_ops = btf_type_ops(elem_type);
2876 for (i = 0; i < array->nelems; i++) {
2878 btf_show_start_array_member(show);
2880 elem_ops->show(btf, elem_type, elem_type_id, data,
2884 btf_show_end_array_member(show);
2886 if (show->state.array_terminated)
2890 btf_show_end_array_type(show);
2893 static void btf_array_show(const struct btf *btf, const struct btf_type *t,
2894 u32 type_id, void *data, u8 bits_offset,
2895 struct btf_show *show)
2897 const struct btf_member *m = show->state.member;
2900 * First check if any members would be shown (are non-zero).
2901 * See comments above "struct btf_show" definition for more
2902 * details on how this works at a high-level.
2904 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
2905 if (!show->state.depth_check) {
2906 show->state.depth_check = show->state.depth + 1;
2907 show->state.depth_to_show = 0;
2909 __btf_array_show(btf, t, type_id, data, bits_offset, show);
2910 show->state.member = m;
2912 if (show->state.depth_check != show->state.depth + 1)
2914 show->state.depth_check = 0;
2916 if (show->state.depth_to_show <= show->state.depth)
2919 * Reaching here indicates we have recursed and found
2920 * non-zero array member(s).
2923 __btf_array_show(btf, t, type_id, data, bits_offset, show);
2926 static struct btf_kind_operations array_ops = {
2927 .check_meta = btf_array_check_meta,
2928 .resolve = btf_array_resolve,
2929 .check_member = btf_array_check_member,
2930 .check_kflag_member = btf_generic_check_kflag_member,
2931 .log_details = btf_array_log,
2932 .show = btf_array_show,
2935 static int btf_struct_check_member(struct btf_verifier_env *env,
2936 const struct btf_type *struct_type,
2937 const struct btf_member *member,
2938 const struct btf_type *member_type)
2940 u32 struct_bits_off = member->offset;
2941 u32 struct_size, bytes_offset;
2943 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2944 btf_verifier_log_member(env, struct_type, member,
2945 "Member is not byte aligned");
2949 struct_size = struct_type->size;
2950 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2951 if (struct_size - bytes_offset < member_type->size) {
2952 btf_verifier_log_member(env, struct_type, member,
2953 "Member exceeds struct_size");
2960 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2961 const struct btf_type *t,
2964 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2965 const struct btf_member *member;
2966 u32 meta_needed, last_offset;
2967 struct btf *btf = env->btf;
2968 u32 struct_size = t->size;
2972 meta_needed = btf_type_vlen(t) * sizeof(*member);
2973 if (meta_left < meta_needed) {
2974 btf_verifier_log_basic(env, t,
2975 "meta_left:%u meta_needed:%u",
2976 meta_left, meta_needed);
2980 /* struct type either no name or a valid one */
2982 !btf_name_valid_identifier(env->btf, t->name_off)) {
2983 btf_verifier_log_type(env, t, "Invalid name");
2987 btf_verifier_log_type(env, t, NULL);
2990 for_each_member(i, t, member) {
2991 if (!btf_name_offset_valid(btf, member->name_off)) {
2992 btf_verifier_log_member(env, t, member,
2993 "Invalid member name_offset:%u",
2998 /* struct member either no name or a valid one */
2999 if (member->name_off &&
3000 !btf_name_valid_identifier(btf, member->name_off)) {
3001 btf_verifier_log_member(env, t, member, "Invalid name");
3004 /* A member cannot be in type void */
3005 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3006 btf_verifier_log_member(env, t, member,
3011 offset = __btf_member_bit_offset(t, member);
3012 if (is_union && offset) {
3013 btf_verifier_log_member(env, t, member,
3014 "Invalid member bits_offset");
3019 * ">" instead of ">=" because the last member could be
3022 if (last_offset > offset) {
3023 btf_verifier_log_member(env, t, member,
3024 "Invalid member bits_offset");
3028 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3029 btf_verifier_log_member(env, t, member,
3030 "Member bits_offset exceeds its struct size");
3034 btf_verifier_log_member(env, t, member, NULL);
3035 last_offset = offset;
3041 static int btf_struct_resolve(struct btf_verifier_env *env,
3042 const struct resolve_vertex *v)
3044 const struct btf_member *member;
3048 /* Before continue resolving the next_member,
3049 * ensure the last member is indeed resolved to a
3050 * type with size info.
3052 if (v->next_member) {
3053 const struct btf_type *last_member_type;
3054 const struct btf_member *last_member;
3055 u16 last_member_type_id;
3057 last_member = btf_type_member(v->t) + v->next_member - 1;
3058 last_member_type_id = last_member->type;
3059 if (WARN_ON_ONCE(!env_type_is_resolved(env,
3060 last_member_type_id)))
3063 last_member_type = btf_type_by_id(env->btf,
3064 last_member_type_id);
3065 if (btf_type_kflag(v->t))
3066 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3070 err = btf_type_ops(last_member_type)->check_member(env, v->t,
3077 for_each_member_from(i, v->next_member, v->t, member) {
3078 u32 member_type_id = member->type;
3079 const struct btf_type *member_type = btf_type_by_id(env->btf,
3082 if (btf_type_nosize_or_null(member_type) ||
3083 btf_type_is_resolve_source_only(member_type)) {
3084 btf_verifier_log_member(env, v->t, member,
3089 if (!env_type_is_resolve_sink(env, member_type) &&
3090 !env_type_is_resolved(env, member_type_id)) {
3091 env_stack_set_next_member(env, i + 1);
3092 return env_stack_push(env, member_type, member_type_id);
3095 if (btf_type_kflag(v->t))
3096 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3100 err = btf_type_ops(member_type)->check_member(env, v->t,
3107 env_stack_pop_resolved(env, 0, 0);
3112 static void btf_struct_log(struct btf_verifier_env *env,
3113 const struct btf_type *t)
3115 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3118 static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t,
3119 const char *name, int sz, int align)
3121 const struct btf_member *member;
3122 u32 i, off = -ENOENT;
3124 for_each_member(i, t, member) {
3125 const struct btf_type *member_type = btf_type_by_id(btf,
3127 if (!__btf_type_is_struct(member_type))
3129 if (member_type->size != sz)
3131 if (strcmp(__btf_name_by_offset(btf, member_type->name_off), name))
3134 /* only one such field is allowed */
3136 off = __btf_member_bit_offset(t, member);
3138 /* valid C code cannot generate such BTF */
3147 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3148 const char *name, int sz, int align)
3150 const struct btf_var_secinfo *vsi;
3151 u32 i, off = -ENOENT;
3153 for_each_vsi(i, t, vsi) {
3154 const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3155 const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3157 if (!__btf_type_is_struct(var_type))
3159 if (var_type->size != sz)
3161 if (vsi->size != sz)
3163 if (strcmp(__btf_name_by_offset(btf, var_type->name_off), name))
3166 /* only one such field is allowed */
3175 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3176 const char *name, int sz, int align)
3179 if (__btf_type_is_struct(t))
3180 return btf_find_struct_field(btf, t, name, sz, align);
3181 else if (btf_type_is_datasec(t))
3182 return btf_find_datasec_var(btf, t, name, sz, align);
3186 /* find 'struct bpf_spin_lock' in map value.
3187 * return >= 0 offset if found
3188 * and < 0 in case of error
3190 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
3192 return btf_find_field(btf, t, "bpf_spin_lock",
3193 sizeof(struct bpf_spin_lock),
3194 __alignof__(struct bpf_spin_lock));
3197 int btf_find_timer(const struct btf *btf, const struct btf_type *t)
3199 return btf_find_field(btf, t, "bpf_timer",
3200 sizeof(struct bpf_timer),
3201 __alignof__(struct bpf_timer));
3204 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
3205 u32 type_id, void *data, u8 bits_offset,
3206 struct btf_show *show)
3208 const struct btf_member *member;
3212 safe_data = btf_show_start_struct_type(show, t, type_id, data);
3216 for_each_member(i, t, member) {
3217 const struct btf_type *member_type = btf_type_by_id(btf,
3219 const struct btf_kind_operations *ops;
3220 u32 member_offset, bitfield_size;
3224 btf_show_start_member(show, member);
3226 member_offset = __btf_member_bit_offset(t, member);
3227 bitfield_size = __btf_member_bitfield_size(t, member);
3228 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
3229 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
3230 if (bitfield_size) {
3231 safe_data = btf_show_start_type(show, member_type,
3233 data + bytes_offset);
3235 btf_bitfield_show(safe_data,
3237 bitfield_size, show);
3238 btf_show_end_type(show);
3240 ops = btf_type_ops(member_type);
3241 ops->show(btf, member_type, member->type,
3242 data + bytes_offset, bits8_offset, show);
3245 btf_show_end_member(show);
3248 btf_show_end_struct_type(show);
3251 static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
3252 u32 type_id, void *data, u8 bits_offset,
3253 struct btf_show *show)
3255 const struct btf_member *m = show->state.member;
3258 * First check if any members would be shown (are non-zero).
3259 * See comments above "struct btf_show" definition for more
3260 * details on how this works at a high-level.
3262 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3263 if (!show->state.depth_check) {
3264 show->state.depth_check = show->state.depth + 1;
3265 show->state.depth_to_show = 0;
3267 __btf_struct_show(btf, t, type_id, data, bits_offset, show);
3268 /* Restore saved member data here */
3269 show->state.member = m;
3270 if (show->state.depth_check != show->state.depth + 1)
3272 show->state.depth_check = 0;
3274 if (show->state.depth_to_show <= show->state.depth)
3277 * Reaching here indicates we have recursed and found
3278 * non-zero child values.
3282 __btf_struct_show(btf, t, type_id, data, bits_offset, show);
3285 static struct btf_kind_operations struct_ops = {
3286 .check_meta = btf_struct_check_meta,
3287 .resolve = btf_struct_resolve,
3288 .check_member = btf_struct_check_member,
3289 .check_kflag_member = btf_generic_check_kflag_member,
3290 .log_details = btf_struct_log,
3291 .show = btf_struct_show,
3294 static int btf_enum_check_member(struct btf_verifier_env *env,
3295 const struct btf_type *struct_type,
3296 const struct btf_member *member,
3297 const struct btf_type *member_type)
3299 u32 struct_bits_off = member->offset;
3300 u32 struct_size, bytes_offset;
3302 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3303 btf_verifier_log_member(env, struct_type, member,
3304 "Member is not byte aligned");
3308 struct_size = struct_type->size;
3309 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3310 if (struct_size - bytes_offset < member_type->size) {
3311 btf_verifier_log_member(env, struct_type, member,
3312 "Member exceeds struct_size");
3319 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
3320 const struct btf_type *struct_type,
3321 const struct btf_member *member,
3322 const struct btf_type *member_type)
3324 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
3325 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
3327 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
3328 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
3330 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3331 btf_verifier_log_member(env, struct_type, member,
3332 "Member is not byte aligned");
3336 nr_bits = int_bitsize;
3337 } else if (nr_bits > int_bitsize) {
3338 btf_verifier_log_member(env, struct_type, member,
3339 "Invalid member bitfield_size");
3343 struct_size = struct_type->size;
3344 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
3345 if (struct_size < bytes_end) {
3346 btf_verifier_log_member(env, struct_type, member,
3347 "Member exceeds struct_size");
3354 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
3355 const struct btf_type *t,
3358 const struct btf_enum *enums = btf_type_enum(t);
3359 struct btf *btf = env->btf;
3363 nr_enums = btf_type_vlen(t);
3364 meta_needed = nr_enums * sizeof(*enums);
3366 if (meta_left < meta_needed) {
3367 btf_verifier_log_basic(env, t,
3368 "meta_left:%u meta_needed:%u",
3369 meta_left, meta_needed);
3373 if (btf_type_kflag(t)) {
3374 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3378 if (t->size > 8 || !is_power_of_2(t->size)) {
3379 btf_verifier_log_type(env, t, "Unexpected size");
3383 /* enum type either no name or a valid one */
3385 !btf_name_valid_identifier(env->btf, t->name_off)) {
3386 btf_verifier_log_type(env, t, "Invalid name");
3390 btf_verifier_log_type(env, t, NULL);
3392 for (i = 0; i < nr_enums; i++) {
3393 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
3394 btf_verifier_log(env, "\tInvalid name_offset:%u",
3399 /* enum member must have a valid name */
3400 if (!enums[i].name_off ||
3401 !btf_name_valid_identifier(btf, enums[i].name_off)) {
3402 btf_verifier_log_type(env, t, "Invalid name");
3406 if (env->log.level == BPF_LOG_KERNEL)
3408 btf_verifier_log(env, "\t%s val=%d\n",
3409 __btf_name_by_offset(btf, enums[i].name_off),
3416 static void btf_enum_log(struct btf_verifier_env *env,
3417 const struct btf_type *t)
3419 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3422 static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
3423 u32 type_id, void *data, u8 bits_offset,
3424 struct btf_show *show)
3426 const struct btf_enum *enums = btf_type_enum(t);
3427 u32 i, nr_enums = btf_type_vlen(t);
3431 safe_data = btf_show_start_type(show, t, type_id, data);
3435 v = *(int *)safe_data;
3437 for (i = 0; i < nr_enums; i++) {
3438 if (v != enums[i].val)
3441 btf_show_type_value(show, "%s",
3442 __btf_name_by_offset(btf,
3443 enums[i].name_off));
3445 btf_show_end_type(show);
3449 btf_show_type_value(show, "%d", v);
3450 btf_show_end_type(show);
3453 static struct btf_kind_operations enum_ops = {
3454 .check_meta = btf_enum_check_meta,
3455 .resolve = btf_df_resolve,
3456 .check_member = btf_enum_check_member,
3457 .check_kflag_member = btf_enum_check_kflag_member,
3458 .log_details = btf_enum_log,
3459 .show = btf_enum_show,
3462 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
3463 const struct btf_type *t,
3466 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
3468 if (meta_left < meta_needed) {
3469 btf_verifier_log_basic(env, t,
3470 "meta_left:%u meta_needed:%u",
3471 meta_left, meta_needed);
3476 btf_verifier_log_type(env, t, "Invalid name");
3480 if (btf_type_kflag(t)) {
3481 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3485 btf_verifier_log_type(env, t, NULL);
3490 static void btf_func_proto_log(struct btf_verifier_env *env,
3491 const struct btf_type *t)
3493 const struct btf_param *args = (const struct btf_param *)(t + 1);
3494 u16 nr_args = btf_type_vlen(t), i;
3496 btf_verifier_log(env, "return=%u args=(", t->type);
3498 btf_verifier_log(env, "void");
3502 if (nr_args == 1 && !args[0].type) {
3503 /* Only one vararg */
3504 btf_verifier_log(env, "vararg");
3508 btf_verifier_log(env, "%u %s", args[0].type,
3509 __btf_name_by_offset(env->btf,
3511 for (i = 1; i < nr_args - 1; i++)
3512 btf_verifier_log(env, ", %u %s", args[i].type,
3513 __btf_name_by_offset(env->btf,
3517 const struct btf_param *last_arg = &args[nr_args - 1];
3520 btf_verifier_log(env, ", %u %s", last_arg->type,
3521 __btf_name_by_offset(env->btf,
3522 last_arg->name_off));
3524 btf_verifier_log(env, ", vararg");
3528 btf_verifier_log(env, ")");
3531 static struct btf_kind_operations func_proto_ops = {
3532 .check_meta = btf_func_proto_check_meta,
3533 .resolve = btf_df_resolve,
3535 * BTF_KIND_FUNC_PROTO cannot be directly referred by
3536 * a struct's member.
3538 * It should be a function pointer instead.
3539 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
3541 * Hence, there is no btf_func_check_member().
3543 .check_member = btf_df_check_member,
3544 .check_kflag_member = btf_df_check_kflag_member,
3545 .log_details = btf_func_proto_log,
3546 .show = btf_df_show,
3549 static s32 btf_func_check_meta(struct btf_verifier_env *env,
3550 const struct btf_type *t,
3554 !btf_name_valid_identifier(env->btf, t->name_off)) {
3555 btf_verifier_log_type(env, t, "Invalid name");
3559 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
3560 btf_verifier_log_type(env, t, "Invalid func linkage");
3564 if (btf_type_kflag(t)) {
3565 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3569 btf_verifier_log_type(env, t, NULL);
3574 static struct btf_kind_operations func_ops = {
3575 .check_meta = btf_func_check_meta,
3576 .resolve = btf_df_resolve,
3577 .check_member = btf_df_check_member,
3578 .check_kflag_member = btf_df_check_kflag_member,
3579 .log_details = btf_ref_type_log,
3580 .show = btf_df_show,
3583 static s32 btf_var_check_meta(struct btf_verifier_env *env,
3584 const struct btf_type *t,
3587 const struct btf_var *var;
3588 u32 meta_needed = sizeof(*var);
3590 if (meta_left < meta_needed) {
3591 btf_verifier_log_basic(env, t,
3592 "meta_left:%u meta_needed:%u",
3593 meta_left, meta_needed);
3597 if (btf_type_vlen(t)) {
3598 btf_verifier_log_type(env, t, "vlen != 0");
3602 if (btf_type_kflag(t)) {
3603 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3608 !__btf_name_valid(env->btf, t->name_off, true)) {
3609 btf_verifier_log_type(env, t, "Invalid name");
3613 /* A var cannot be in type void */
3614 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
3615 btf_verifier_log_type(env, t, "Invalid type_id");
3619 var = btf_type_var(t);
3620 if (var->linkage != BTF_VAR_STATIC &&
3621 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
3622 btf_verifier_log_type(env, t, "Linkage not supported");
3626 btf_verifier_log_type(env, t, NULL);
3631 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
3633 const struct btf_var *var = btf_type_var(t);
3635 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
3638 static const struct btf_kind_operations var_ops = {
3639 .check_meta = btf_var_check_meta,
3640 .resolve = btf_var_resolve,
3641 .check_member = btf_df_check_member,
3642 .check_kflag_member = btf_df_check_kflag_member,
3643 .log_details = btf_var_log,
3644 .show = btf_var_show,
3647 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
3648 const struct btf_type *t,
3651 const struct btf_var_secinfo *vsi;
3652 u64 last_vsi_end_off = 0, sum = 0;
3655 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
3656 if (meta_left < meta_needed) {
3657 btf_verifier_log_basic(env, t,
3658 "meta_left:%u meta_needed:%u",
3659 meta_left, meta_needed);
3664 btf_verifier_log_type(env, t, "size == 0");
3668 if (btf_type_kflag(t)) {
3669 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3674 !btf_name_valid_section(env->btf, t->name_off)) {
3675 btf_verifier_log_type(env, t, "Invalid name");
3679 btf_verifier_log_type(env, t, NULL);
3681 for_each_vsi(i, t, vsi) {
3682 /* A var cannot be in type void */
3683 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
3684 btf_verifier_log_vsi(env, t, vsi,
3689 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
3690 btf_verifier_log_vsi(env, t, vsi,
3695 if (!vsi->size || vsi->size > t->size) {
3696 btf_verifier_log_vsi(env, t, vsi,
3701 last_vsi_end_off = vsi->offset + vsi->size;
3702 if (last_vsi_end_off > t->size) {
3703 btf_verifier_log_vsi(env, t, vsi,
3704 "Invalid offset+size");
3708 btf_verifier_log_vsi(env, t, vsi, NULL);
3712 if (t->size < sum) {
3713 btf_verifier_log_type(env, t, "Invalid btf_info size");
3720 static int btf_datasec_resolve(struct btf_verifier_env *env,
3721 const struct resolve_vertex *v)
3723 const struct btf_var_secinfo *vsi;
3724 struct btf *btf = env->btf;
3727 for_each_vsi_from(i, v->next_member, v->t, vsi) {
3728 u32 var_type_id = vsi->type, type_id, type_size = 0;
3729 const struct btf_type *var_type = btf_type_by_id(env->btf,
3731 if (!var_type || !btf_type_is_var(var_type)) {
3732 btf_verifier_log_vsi(env, v->t, vsi,
3733 "Not a VAR kind member");
3737 if (!env_type_is_resolve_sink(env, var_type) &&
3738 !env_type_is_resolved(env, var_type_id)) {
3739 env_stack_set_next_member(env, i + 1);
3740 return env_stack_push(env, var_type, var_type_id);
3743 type_id = var_type->type;
3744 if (!btf_type_id_size(btf, &type_id, &type_size)) {
3745 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
3749 if (vsi->size < type_size) {
3750 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
3755 env_stack_pop_resolved(env, 0, 0);
3759 static void btf_datasec_log(struct btf_verifier_env *env,
3760 const struct btf_type *t)
3762 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3765 static void btf_datasec_show(const struct btf *btf,
3766 const struct btf_type *t, u32 type_id,
3767 void *data, u8 bits_offset,
3768 struct btf_show *show)
3770 const struct btf_var_secinfo *vsi;
3771 const struct btf_type *var;
3774 if (!btf_show_start_type(show, t, type_id, data))
3777 btf_show_type_value(show, "section (\"%s\") = {",
3778 __btf_name_by_offset(btf, t->name_off));
3779 for_each_vsi(i, t, vsi) {
3780 var = btf_type_by_id(btf, vsi->type);
3782 btf_show(show, ",");
3783 btf_type_ops(var)->show(btf, var, vsi->type,
3784 data + vsi->offset, bits_offset, show);
3786 btf_show_end_type(show);
3789 static const struct btf_kind_operations datasec_ops = {
3790 .check_meta = btf_datasec_check_meta,
3791 .resolve = btf_datasec_resolve,
3792 .check_member = btf_df_check_member,
3793 .check_kflag_member = btf_df_check_kflag_member,
3794 .log_details = btf_datasec_log,
3795 .show = btf_datasec_show,
3798 static s32 btf_float_check_meta(struct btf_verifier_env *env,
3799 const struct btf_type *t,
3802 if (btf_type_vlen(t)) {
3803 btf_verifier_log_type(env, t, "vlen != 0");
3807 if (btf_type_kflag(t)) {
3808 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3812 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
3814 btf_verifier_log_type(env, t, "Invalid type_size");
3818 btf_verifier_log_type(env, t, NULL);
3823 static int btf_float_check_member(struct btf_verifier_env *env,
3824 const struct btf_type *struct_type,
3825 const struct btf_member *member,
3826 const struct btf_type *member_type)
3828 u64 start_offset_bytes;
3829 u64 end_offset_bytes;
3834 /* Different architectures have different alignment requirements, so
3835 * here we check only for the reasonable minimum. This way we ensure
3836 * that types after CO-RE can pass the kernel BTF verifier.
3838 align_bytes = min_t(u64, sizeof(void *), member_type->size);
3839 align_bits = align_bytes * BITS_PER_BYTE;
3840 div64_u64_rem(member->offset, align_bits, &misalign_bits);
3841 if (misalign_bits) {
3842 btf_verifier_log_member(env, struct_type, member,
3843 "Member is not properly aligned");
3847 start_offset_bytes = member->offset / BITS_PER_BYTE;
3848 end_offset_bytes = start_offset_bytes + member_type->size;
3849 if (end_offset_bytes > struct_type->size) {
3850 btf_verifier_log_member(env, struct_type, member,
3851 "Member exceeds struct_size");
3858 static void btf_float_log(struct btf_verifier_env *env,
3859 const struct btf_type *t)
3861 btf_verifier_log(env, "size=%u", t->size);
3864 static const struct btf_kind_operations float_ops = {
3865 .check_meta = btf_float_check_meta,
3866 .resolve = btf_df_resolve,
3867 .check_member = btf_float_check_member,
3868 .check_kflag_member = btf_generic_check_kflag_member,
3869 .log_details = btf_float_log,
3870 .show = btf_df_show,
3873 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
3874 const struct btf_type *t,
3877 const struct btf_decl_tag *tag;
3878 u32 meta_needed = sizeof(*tag);
3882 if (meta_left < meta_needed) {
3883 btf_verifier_log_basic(env, t,
3884 "meta_left:%u meta_needed:%u",
3885 meta_left, meta_needed);
3889 value = btf_name_by_offset(env->btf, t->name_off);
3890 if (!value || !value[0]) {
3891 btf_verifier_log_type(env, t, "Invalid value");
3895 if (btf_type_vlen(t)) {
3896 btf_verifier_log_type(env, t, "vlen != 0");
3900 if (btf_type_kflag(t)) {
3901 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3905 component_idx = btf_type_decl_tag(t)->component_idx;
3906 if (component_idx < -1) {
3907 btf_verifier_log_type(env, t, "Invalid component_idx");
3911 btf_verifier_log_type(env, t, NULL);
3916 static int btf_decl_tag_resolve(struct btf_verifier_env *env,
3917 const struct resolve_vertex *v)
3919 const struct btf_type *next_type;
3920 const struct btf_type *t = v->t;
3921 u32 next_type_id = t->type;
3922 struct btf *btf = env->btf;
3926 next_type = btf_type_by_id(btf, next_type_id);
3927 if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
3928 btf_verifier_log_type(env, v->t, "Invalid type_id");
3932 if (!env_type_is_resolve_sink(env, next_type) &&
3933 !env_type_is_resolved(env, next_type_id))
3934 return env_stack_push(env, next_type, next_type_id);
3936 component_idx = btf_type_decl_tag(t)->component_idx;
3937 if (component_idx != -1) {
3938 if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
3939 btf_verifier_log_type(env, v->t, "Invalid component_idx");
3943 if (btf_type_is_struct(next_type)) {
3944 vlen = btf_type_vlen(next_type);
3946 /* next_type should be a function */
3947 next_type = btf_type_by_id(btf, next_type->type);
3948 vlen = btf_type_vlen(next_type);
3951 if ((u32)component_idx >= vlen) {
3952 btf_verifier_log_type(env, v->t, "Invalid component_idx");
3957 env_stack_pop_resolved(env, next_type_id, 0);
3962 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
3964 btf_verifier_log(env, "type=%u component_idx=%d", t->type,
3965 btf_type_decl_tag(t)->component_idx);
3968 static const struct btf_kind_operations decl_tag_ops = {
3969 .check_meta = btf_decl_tag_check_meta,
3970 .resolve = btf_decl_tag_resolve,
3971 .check_member = btf_df_check_member,
3972 .check_kflag_member = btf_df_check_kflag_member,
3973 .log_details = btf_decl_tag_log,
3974 .show = btf_df_show,
3977 static int btf_func_proto_check(struct btf_verifier_env *env,
3978 const struct btf_type *t)
3980 const struct btf_type *ret_type;
3981 const struct btf_param *args;
3982 const struct btf *btf;
3987 args = (const struct btf_param *)(t + 1);
3988 nr_args = btf_type_vlen(t);
3990 /* Check func return type which could be "void" (t->type == 0) */
3992 u32 ret_type_id = t->type;
3994 ret_type = btf_type_by_id(btf, ret_type_id);
3996 btf_verifier_log_type(env, t, "Invalid return type");
4000 if (btf_type_needs_resolve(ret_type) &&
4001 !env_type_is_resolved(env, ret_type_id)) {
4002 err = btf_resolve(env, ret_type, ret_type_id);
4007 /* Ensure the return type is a type that has a size */
4008 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
4009 btf_verifier_log_type(env, t, "Invalid return type");
4017 /* Last func arg type_id could be 0 if it is a vararg */
4018 if (!args[nr_args - 1].type) {
4019 if (args[nr_args - 1].name_off) {
4020 btf_verifier_log_type(env, t, "Invalid arg#%u",
4028 for (i = 0; i < nr_args; i++) {
4029 const struct btf_type *arg_type;
4032 arg_type_id = args[i].type;
4033 arg_type = btf_type_by_id(btf, arg_type_id);
4035 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4040 if (args[i].name_off &&
4041 (!btf_name_offset_valid(btf, args[i].name_off) ||
4042 !btf_name_valid_identifier(btf, args[i].name_off))) {
4043 btf_verifier_log_type(env, t,
4044 "Invalid arg#%u", i + 1);
4049 if (btf_type_needs_resolve(arg_type) &&
4050 !env_type_is_resolved(env, arg_type_id)) {
4051 err = btf_resolve(env, arg_type, arg_type_id);
4056 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
4057 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4066 static int btf_func_check(struct btf_verifier_env *env,
4067 const struct btf_type *t)
4069 const struct btf_type *proto_type;
4070 const struct btf_param *args;
4071 const struct btf *btf;
4075 proto_type = btf_type_by_id(btf, t->type);
4077 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
4078 btf_verifier_log_type(env, t, "Invalid type_id");
4082 args = (const struct btf_param *)(proto_type + 1);
4083 nr_args = btf_type_vlen(proto_type);
4084 for (i = 0; i < nr_args; i++) {
4085 if (!args[i].name_off && args[i].type) {
4086 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4094 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
4095 [BTF_KIND_INT] = &int_ops,
4096 [BTF_KIND_PTR] = &ptr_ops,
4097 [BTF_KIND_ARRAY] = &array_ops,
4098 [BTF_KIND_STRUCT] = &struct_ops,
4099 [BTF_KIND_UNION] = &struct_ops,
4100 [BTF_KIND_ENUM] = &enum_ops,
4101 [BTF_KIND_FWD] = &fwd_ops,
4102 [BTF_KIND_TYPEDEF] = &modifier_ops,
4103 [BTF_KIND_VOLATILE] = &modifier_ops,
4104 [BTF_KIND_CONST] = &modifier_ops,
4105 [BTF_KIND_RESTRICT] = &modifier_ops,
4106 [BTF_KIND_FUNC] = &func_ops,
4107 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
4108 [BTF_KIND_VAR] = &var_ops,
4109 [BTF_KIND_DATASEC] = &datasec_ops,
4110 [BTF_KIND_FLOAT] = &float_ops,
4111 [BTF_KIND_DECL_TAG] = &decl_tag_ops,
4112 [BTF_KIND_TYPE_TAG] = &modifier_ops,
4115 static s32 btf_check_meta(struct btf_verifier_env *env,
4116 const struct btf_type *t,
4119 u32 saved_meta_left = meta_left;
4122 if (meta_left < sizeof(*t)) {
4123 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
4124 env->log_type_id, meta_left, sizeof(*t));
4127 meta_left -= sizeof(*t);
4129 if (t->info & ~BTF_INFO_MASK) {
4130 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
4131 env->log_type_id, t->info);
4135 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
4136 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
4137 btf_verifier_log(env, "[%u] Invalid kind:%u",
4138 env->log_type_id, BTF_INFO_KIND(t->info));
4142 if (!btf_name_offset_valid(env->btf, t->name_off)) {
4143 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
4144 env->log_type_id, t->name_off);
4148 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
4149 if (var_meta_size < 0)
4150 return var_meta_size;
4152 meta_left -= var_meta_size;
4154 return saved_meta_left - meta_left;
4157 static int btf_check_all_metas(struct btf_verifier_env *env)
4159 struct btf *btf = env->btf;
4160 struct btf_header *hdr;
4164 cur = btf->nohdr_data + hdr->type_off;
4165 end = cur + hdr->type_len;
4167 env->log_type_id = btf->base_btf ? btf->start_id : 1;
4169 struct btf_type *t = cur;
4172 meta_size = btf_check_meta(env, t, end - cur);
4176 btf_add_type(env, t);
4184 static bool btf_resolve_valid(struct btf_verifier_env *env,
4185 const struct btf_type *t,
4188 struct btf *btf = env->btf;
4190 if (!env_type_is_resolved(env, type_id))
4193 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
4194 return !btf_resolved_type_id(btf, type_id) &&
4195 !btf_resolved_type_size(btf, type_id);
4197 if (btf_type_is_decl_tag(t))
4198 return btf_resolved_type_id(btf, type_id) &&
4199 !btf_resolved_type_size(btf, type_id);
4201 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
4202 btf_type_is_var(t)) {
4203 t = btf_type_id_resolve(btf, &type_id);
4205 !btf_type_is_modifier(t) &&
4206 !btf_type_is_var(t) &&
4207 !btf_type_is_datasec(t);
4210 if (btf_type_is_array(t)) {
4211 const struct btf_array *array = btf_type_array(t);
4212 const struct btf_type *elem_type;
4213 u32 elem_type_id = array->type;
4216 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
4217 return elem_type && !btf_type_is_modifier(elem_type) &&
4218 (array->nelems * elem_size ==
4219 btf_resolved_type_size(btf, type_id));
4225 static int btf_resolve(struct btf_verifier_env *env,
4226 const struct btf_type *t, u32 type_id)
4228 u32 save_log_type_id = env->log_type_id;
4229 const struct resolve_vertex *v;
4232 env->resolve_mode = RESOLVE_TBD;
4233 env_stack_push(env, t, type_id);
4234 while (!err && (v = env_stack_peak(env))) {
4235 env->log_type_id = v->type_id;
4236 err = btf_type_ops(v->t)->resolve(env, v);
4239 env->log_type_id = type_id;
4240 if (err == -E2BIG) {
4241 btf_verifier_log_type(env, t,
4242 "Exceeded max resolving depth:%u",
4244 } else if (err == -EEXIST) {
4245 btf_verifier_log_type(env, t, "Loop detected");
4248 /* Final sanity check */
4249 if (!err && !btf_resolve_valid(env, t, type_id)) {
4250 btf_verifier_log_type(env, t, "Invalid resolve state");
4254 env->log_type_id = save_log_type_id;
4258 static int btf_check_all_types(struct btf_verifier_env *env)
4260 struct btf *btf = env->btf;
4261 const struct btf_type *t;
4265 err = env_resolve_init(env);
4270 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
4271 type_id = btf->start_id + i;
4272 t = btf_type_by_id(btf, type_id);
4274 env->log_type_id = type_id;
4275 if (btf_type_needs_resolve(t) &&
4276 !env_type_is_resolved(env, type_id)) {
4277 err = btf_resolve(env, t, type_id);
4282 if (btf_type_is_func_proto(t)) {
4283 err = btf_func_proto_check(env, t);
4288 if (btf_type_is_func(t)) {
4289 err = btf_func_check(env, t);
4298 static int btf_parse_type_sec(struct btf_verifier_env *env)
4300 const struct btf_header *hdr = &env->btf->hdr;
4303 /* Type section must align to 4 bytes */
4304 if (hdr->type_off & (sizeof(u32) - 1)) {
4305 btf_verifier_log(env, "Unaligned type_off");
4309 if (!env->btf->base_btf && !hdr->type_len) {
4310 btf_verifier_log(env, "No type found");
4314 err = btf_check_all_metas(env);
4318 return btf_check_all_types(env);
4321 static int btf_parse_str_sec(struct btf_verifier_env *env)
4323 const struct btf_header *hdr;
4324 struct btf *btf = env->btf;
4325 const char *start, *end;
4328 start = btf->nohdr_data + hdr->str_off;
4329 end = start + hdr->str_len;
4331 if (end != btf->data + btf->data_size) {
4332 btf_verifier_log(env, "String section is not at the end");
4336 btf->strings = start;
4338 if (btf->base_btf && !hdr->str_len)
4340 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
4341 btf_verifier_log(env, "Invalid string section");
4344 if (!btf->base_btf && start[0]) {
4345 btf_verifier_log(env, "Invalid string section");
4352 static const size_t btf_sec_info_offset[] = {
4353 offsetof(struct btf_header, type_off),
4354 offsetof(struct btf_header, str_off),
4357 static int btf_sec_info_cmp(const void *a, const void *b)
4359 const struct btf_sec_info *x = a;
4360 const struct btf_sec_info *y = b;
4362 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
4365 static int btf_check_sec_info(struct btf_verifier_env *env,
4368 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
4369 u32 total, expected_total, i;
4370 const struct btf_header *hdr;
4371 const struct btf *btf;
4376 /* Populate the secs from hdr */
4377 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
4378 secs[i] = *(struct btf_sec_info *)((void *)hdr +
4379 btf_sec_info_offset[i]);
4381 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
4382 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
4384 /* Check for gaps and overlap among sections */
4386 expected_total = btf_data_size - hdr->hdr_len;
4387 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
4388 if (expected_total < secs[i].off) {
4389 btf_verifier_log(env, "Invalid section offset");
4392 if (total < secs[i].off) {
4394 btf_verifier_log(env, "Unsupported section found");
4397 if (total > secs[i].off) {
4398 btf_verifier_log(env, "Section overlap found");
4401 if (expected_total - total < secs[i].len) {
4402 btf_verifier_log(env,
4403 "Total section length too long");
4406 total += secs[i].len;
4409 /* There is data other than hdr and known sections */
4410 if (expected_total != total) {
4411 btf_verifier_log(env, "Unsupported section found");
4418 static int btf_parse_hdr(struct btf_verifier_env *env)
4420 u32 hdr_len, hdr_copy, btf_data_size;
4421 const struct btf_header *hdr;
4426 btf_data_size = btf->data_size;
4429 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
4430 btf_verifier_log(env, "hdr_len not found");
4435 hdr_len = hdr->hdr_len;
4436 if (btf_data_size < hdr_len) {
4437 btf_verifier_log(env, "btf_header not found");
4441 /* Ensure the unsupported header fields are zero */
4442 if (hdr_len > sizeof(btf->hdr)) {
4443 u8 *expected_zero = btf->data + sizeof(btf->hdr);
4444 u8 *end = btf->data + hdr_len;
4446 for (; expected_zero < end; expected_zero++) {
4447 if (*expected_zero) {
4448 btf_verifier_log(env, "Unsupported btf_header");
4454 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
4455 memcpy(&btf->hdr, btf->data, hdr_copy);
4459 btf_verifier_log_hdr(env, btf_data_size);
4461 if (hdr->magic != BTF_MAGIC) {
4462 btf_verifier_log(env, "Invalid magic");
4466 if (hdr->version != BTF_VERSION) {
4467 btf_verifier_log(env, "Unsupported version");
4472 btf_verifier_log(env, "Unsupported flags");
4476 if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
4477 btf_verifier_log(env, "No data");
4481 err = btf_check_sec_info(env, btf_data_size);
4488 static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
4489 u32 log_level, char __user *log_ubuf, u32 log_size)
4491 struct btf_verifier_env *env = NULL;
4492 struct bpf_verifier_log *log;
4493 struct btf *btf = NULL;
4497 if (btf_data_size > BTF_MAX_SIZE)
4498 return ERR_PTR(-E2BIG);
4500 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
4502 return ERR_PTR(-ENOMEM);
4505 if (log_level || log_ubuf || log_size) {
4506 /* user requested verbose verifier output
4507 * and supplied buffer to store the verification trace
4509 log->level = log_level;
4510 log->ubuf = log_ubuf;
4511 log->len_total = log_size;
4513 /* log attributes have to be sane */
4514 if (!bpf_verifier_log_attr_valid(log)) {
4520 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
4527 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
4534 btf->data_size = btf_data_size;
4536 if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
4541 err = btf_parse_hdr(env);
4545 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
4547 err = btf_parse_str_sec(env);
4551 err = btf_parse_type_sec(env);
4555 if (log->level && bpf_verifier_log_full(log)) {
4560 btf_verifier_env_free(env);
4561 refcount_set(&btf->refcnt, 1);
4565 btf_verifier_env_free(env);
4568 return ERR_PTR(err);
4571 extern char __weak __start_BTF[];
4572 extern char __weak __stop_BTF[];
4573 extern struct btf *btf_vmlinux;
4575 #define BPF_MAP_TYPE(_id, _ops)
4576 #define BPF_LINK_TYPE(_id, _name)
4578 struct bpf_ctx_convert {
4579 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
4580 prog_ctx_type _id##_prog; \
4581 kern_ctx_type _id##_kern;
4582 #include <linux/bpf_types.h>
4583 #undef BPF_PROG_TYPE
4585 /* 't' is written once under lock. Read many times. */
4586 const struct btf_type *t;
4589 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
4591 #include <linux/bpf_types.h>
4592 #undef BPF_PROG_TYPE
4593 __ctx_convert_unused, /* to avoid empty enum in extreme .config */
4595 static u8 bpf_ctx_convert_map[] = {
4596 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
4597 [_id] = __ctx_convert##_id,
4598 #include <linux/bpf_types.h>
4599 #undef BPF_PROG_TYPE
4600 0, /* avoid empty array */
4603 #undef BPF_LINK_TYPE
4605 static const struct btf_member *
4606 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
4607 const struct btf_type *t, enum bpf_prog_type prog_type,
4610 const struct btf_type *conv_struct;
4611 const struct btf_type *ctx_struct;
4612 const struct btf_member *ctx_type;
4613 const char *tname, *ctx_tname;
4615 conv_struct = bpf_ctx_convert.t;
4617 bpf_log(log, "btf_vmlinux is malformed\n");
4620 t = btf_type_by_id(btf, t->type);
4621 while (btf_type_is_modifier(t))
4622 t = btf_type_by_id(btf, t->type);
4623 if (!btf_type_is_struct(t)) {
4624 /* Only pointer to struct is supported for now.
4625 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
4626 * is not supported yet.
4627 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
4631 tname = btf_name_by_offset(btf, t->name_off);
4633 bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
4636 /* prog_type is valid bpf program type. No need for bounds check. */
4637 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
4638 /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
4639 * Like 'struct __sk_buff'
4641 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
4643 /* should not happen */
4645 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
4647 /* should not happen */
4648 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
4651 /* only compare that prog's ctx type name is the same as
4652 * kernel expects. No need to compare field by field.
4653 * It's ok for bpf prog to do:
4654 * struct __sk_buff {};
4655 * int socket_filter_bpf_prog(struct __sk_buff *skb)
4656 * { // no fields of skb are ever used }
4658 if (strcmp(ctx_tname, tname))
4663 static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
4664 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
4665 #define BPF_LINK_TYPE(_id, _name)
4666 #define BPF_MAP_TYPE(_id, _ops) \
4668 #include <linux/bpf_types.h>
4669 #undef BPF_PROG_TYPE
4670 #undef BPF_LINK_TYPE
4674 static int btf_vmlinux_map_ids_init(const struct btf *btf,
4675 struct bpf_verifier_log *log)
4677 const struct bpf_map_ops *ops;
4680 for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
4681 ops = btf_vmlinux_map_ops[i];
4682 if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
4684 if (!ops->map_btf_name || !ops->map_btf_id) {
4685 bpf_log(log, "map type %d is misconfigured\n", i);
4688 btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
4692 *ops->map_btf_id = btf_id;
4698 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
4700 const struct btf_type *t,
4701 enum bpf_prog_type prog_type,
4704 const struct btf_member *prog_ctx_type, *kern_ctx_type;
4706 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
4709 kern_ctx_type = prog_ctx_type + 1;
4710 return kern_ctx_type->type;
4713 BTF_ID_LIST(bpf_ctx_convert_btf_id)
4714 BTF_ID(struct, bpf_ctx_convert)
4716 struct btf *btf_parse_vmlinux(void)
4718 struct btf_verifier_env *env = NULL;
4719 struct bpf_verifier_log *log;
4720 struct btf *btf = NULL;
4723 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
4725 return ERR_PTR(-ENOMEM);
4728 log->level = BPF_LOG_KERNEL;
4730 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
4737 btf->data = __start_BTF;
4738 btf->data_size = __stop_BTF - __start_BTF;
4739 btf->kernel_btf = true;
4740 snprintf(btf->name, sizeof(btf->name), "vmlinux");
4742 err = btf_parse_hdr(env);
4746 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
4748 err = btf_parse_str_sec(env);
4752 err = btf_check_all_metas(env);
4756 /* btf_parse_vmlinux() runs under bpf_verifier_lock */
4757 bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
4759 /* find bpf map structs for map_ptr access checking */
4760 err = btf_vmlinux_map_ids_init(btf, log);
4764 bpf_struct_ops_init(btf, log);
4766 refcount_set(&btf->refcnt, 1);
4768 err = btf_alloc_id(btf);
4772 btf_verifier_env_free(env);
4776 btf_verifier_env_free(env);
4781 return ERR_PTR(err);
4784 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
4786 static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size)
4788 struct btf_verifier_env *env = NULL;
4789 struct bpf_verifier_log *log;
4790 struct btf *btf = NULL, *base_btf;
4793 base_btf = bpf_get_btf_vmlinux();
4794 if (IS_ERR(base_btf))
4797 return ERR_PTR(-EINVAL);
4799 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
4801 return ERR_PTR(-ENOMEM);
4804 log->level = BPF_LOG_KERNEL;
4806 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
4813 btf->base_btf = base_btf;
4814 btf->start_id = base_btf->nr_types;
4815 btf->start_str_off = base_btf->hdr.str_len;
4816 btf->kernel_btf = true;
4817 snprintf(btf->name, sizeof(btf->name), "%s", module_name);
4819 btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
4824 memcpy(btf->data, data, data_size);
4825 btf->data_size = data_size;
4827 err = btf_parse_hdr(env);
4831 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
4833 err = btf_parse_str_sec(env);
4837 err = btf_check_all_metas(env);
4841 btf_verifier_env_free(env);
4842 refcount_set(&btf->refcnt, 1);
4846 btf_verifier_env_free(env);
4852 return ERR_PTR(err);
4855 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
4857 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
4859 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
4862 return tgt_prog->aux->btf;
4864 return prog->aux->attach_btf;
4867 static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
4869 /* t comes in already as a pointer */
4870 t = btf_type_by_id(btf, t->type);
4873 if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
4874 t = btf_type_by_id(btf, t->type);
4876 return btf_type_is_int(t);
4879 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
4880 const struct bpf_prog *prog,
4881 struct bpf_insn_access_aux *info)
4883 const struct btf_type *t = prog->aux->attach_func_proto;
4884 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
4885 struct btf *btf = bpf_prog_get_target_btf(prog);
4886 const char *tname = prog->aux->attach_func_name;
4887 struct bpf_verifier_log *log = info->log;
4888 const struct btf_param *args;
4893 bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
4898 args = (const struct btf_param *)(t + 1);
4899 /* if (t == NULL) Fall back to default BPF prog with
4900 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
4902 nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
4903 if (prog->aux->attach_btf_trace) {
4904 /* skip first 'void *__data' argument in btf_trace_##name typedef */
4909 if (arg > nr_args) {
4910 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
4915 if (arg == nr_args) {
4916 switch (prog->expected_attach_type) {
4918 case BPF_TRACE_FEXIT:
4919 /* When LSM programs are attached to void LSM hooks
4920 * they use FEXIT trampolines and when attached to
4921 * int LSM hooks, they use MODIFY_RETURN trampolines.
4923 * While the LSM programs are BPF_MODIFY_RETURN-like
4926 * if (ret_type != 'int')
4929 * is _not_ done here. This is still safe as LSM hooks
4930 * have only void and int return types.
4934 t = btf_type_by_id(btf, t->type);
4936 case BPF_MODIFY_RETURN:
4937 /* For now the BPF_MODIFY_RETURN can only be attached to
4938 * functions that return an int.
4943 t = btf_type_skip_modifiers(btf, t->type, NULL);
4944 if (!btf_type_is_small_int(t)) {
4946 "ret type %s not allowed for fmod_ret\n",
4947 btf_kind_str[BTF_INFO_KIND(t->info)]);
4952 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
4958 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */
4960 t = btf_type_by_id(btf, args[arg].type);
4963 /* skip modifiers */
4964 while (btf_type_is_modifier(t))
4965 t = btf_type_by_id(btf, t->type);
4966 if (btf_type_is_small_int(t) || btf_type_is_enum(t))
4967 /* accessing a scalar */
4969 if (!btf_type_is_ptr(t)) {
4971 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
4973 __btf_name_by_offset(btf, t->name_off),
4974 btf_kind_str[BTF_INFO_KIND(t->info)]);
4978 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
4979 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
4980 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
4983 type = base_type(ctx_arg_info->reg_type);
4984 flag = type_flag(ctx_arg_info->reg_type);
4985 if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
4986 (flag & PTR_MAYBE_NULL)) {
4987 info->reg_type = ctx_arg_info->reg_type;
4993 /* This is a pointer to void.
4994 * It is the same as scalar from the verifier safety pov.
4995 * No further pointer walking is allowed.
4999 if (is_int_ptr(btf, t))
5002 /* this is a pointer to another type */
5003 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
5004 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
5006 if (ctx_arg_info->offset == off) {
5007 if (!ctx_arg_info->btf_id) {
5008 bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
5012 info->reg_type = ctx_arg_info->reg_type;
5013 info->btf = btf_vmlinux;
5014 info->btf_id = ctx_arg_info->btf_id;
5019 info->reg_type = PTR_TO_BTF_ID;
5021 enum bpf_prog_type tgt_type;
5023 if (tgt_prog->type == BPF_PROG_TYPE_EXT)
5024 tgt_type = tgt_prog->aux->saved_dst_prog_type;
5026 tgt_type = tgt_prog->type;
5028 ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
5030 info->btf = btf_vmlinux;
5039 info->btf_id = t->type;
5040 t = btf_type_by_id(btf, t->type);
5041 /* skip modifiers */
5042 while (btf_type_is_modifier(t)) {
5043 info->btf_id = t->type;
5044 t = btf_type_by_id(btf, t->type);
5046 if (!btf_type_is_struct(t)) {
5048 "func '%s' arg%d type %s is not a struct\n",
5049 tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
5052 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
5053 tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
5054 __btf_name_by_offset(btf, t->name_off));
5058 enum bpf_struct_walk_result {
5065 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
5066 const struct btf_type *t, int off, int size,
5069 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
5070 const struct btf_type *mtype, *elem_type = NULL;
5071 const struct btf_member *member;
5072 const char *tname, *mname;
5073 u32 vlen, elem_id, mid;
5076 tname = __btf_name_by_offset(btf, t->name_off);
5077 if (!btf_type_is_struct(t)) {
5078 bpf_log(log, "Type '%s' is not a struct\n", tname);
5082 vlen = btf_type_vlen(t);
5083 if (off + size > t->size) {
5084 /* If the last element is a variable size array, we may
5085 * need to relax the rule.
5087 struct btf_array *array_elem;
5092 member = btf_type_member(t) + vlen - 1;
5093 mtype = btf_type_skip_modifiers(btf, member->type,
5095 if (!btf_type_is_array(mtype))
5098 array_elem = (struct btf_array *)(mtype + 1);
5099 if (array_elem->nelems != 0)
5102 moff = __btf_member_bit_offset(t, member) / 8;
5106 /* Only allow structure for now, can be relaxed for
5107 * other types later.
5109 t = btf_type_skip_modifiers(btf, array_elem->type,
5111 if (!btf_type_is_struct(t))
5114 off = (off - moff) % t->size;
5118 bpf_log(log, "access beyond struct %s at off %u size %u\n",
5123 for_each_member(i, t, member) {
5124 /* offset of the field in bytes */
5125 moff = __btf_member_bit_offset(t, member) / 8;
5126 if (off + size <= moff)
5127 /* won't find anything, field is already too far */
5130 if (__btf_member_bitfield_size(t, member)) {
5131 u32 end_bit = __btf_member_bit_offset(t, member) +
5132 __btf_member_bitfield_size(t, member);
5134 /* off <= moff instead of off == moff because clang
5135 * does not generate a BTF member for anonymous
5136 * bitfield like the ":16" here:
5143 BITS_ROUNDUP_BYTES(end_bit) <= off + size)
5146 /* off may be accessing a following member
5150 * Doing partial access at either end of this
5151 * bitfield. Continue on this case also to
5152 * treat it as not accessing this bitfield
5153 * and eventually error out as field not
5154 * found to keep it simple.
5155 * It could be relaxed if there was a legit
5156 * partial access case later.
5161 /* In case of "off" is pointing to holes of a struct */
5165 /* type of the field */
5167 mtype = btf_type_by_id(btf, member->type);
5168 mname = __btf_name_by_offset(btf, member->name_off);
5170 mtype = __btf_resolve_size(btf, mtype, &msize,
5171 &elem_type, &elem_id, &total_nelems,
5173 if (IS_ERR(mtype)) {
5174 bpf_log(log, "field %s doesn't have size\n", mname);
5178 mtrue_end = moff + msize;
5179 if (off >= mtrue_end)
5180 /* no overlap with member, keep iterating */
5183 if (btf_type_is_array(mtype)) {
5186 /* __btf_resolve_size() above helps to
5187 * linearize a multi-dimensional array.
5189 * The logic here is treating an array
5190 * in a struct as the following way:
5193 * struct inner array[2][2];
5199 * struct inner array_elem0;
5200 * struct inner array_elem1;
5201 * struct inner array_elem2;
5202 * struct inner array_elem3;
5205 * When accessing outer->array[1][0], it moves
5206 * moff to "array_elem2", set mtype to
5207 * "struct inner", and msize also becomes
5208 * sizeof(struct inner). Then most of the
5209 * remaining logic will fall through without
5210 * caring the current member is an array or
5213 * Unlike mtype/msize/moff, mtrue_end does not
5214 * change. The naming difference ("_true") tells
5215 * that it is not always corresponding to
5216 * the current mtype/msize/moff.
5217 * It is the true end of the current
5218 * member (i.e. array in this case). That
5219 * will allow an int array to be accessed like
5221 * i.e. allow access beyond the size of
5222 * the array's element as long as it is
5223 * within the mtrue_end boundary.
5226 /* skip empty array */
5227 if (moff == mtrue_end)
5230 msize /= total_nelems;
5231 elem_idx = (off - moff) / msize;
5232 moff += elem_idx * msize;
5237 /* the 'off' we're looking for is either equal to start
5238 * of this field or inside of this struct
5240 if (btf_type_is_struct(mtype)) {
5241 /* our field must be inside that union or struct */
5244 /* return if the offset matches the member offset */
5250 /* adjust offset we're looking for */
5255 if (btf_type_is_ptr(mtype)) {
5256 const struct btf_type *stype;
5259 if (msize != size || off != moff) {
5261 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
5262 mname, moff, tname, off, size);
5265 stype = btf_type_skip_modifiers(btf, mtype->type, &id);
5266 if (btf_type_is_struct(stype)) {
5272 /* Allow more flexible access within an int as long as
5273 * it is within mtrue_end.
5274 * Since mtrue_end could be the end of an array,
5275 * that also allows using an array of int as a scratch
5276 * space. e.g. skb->cb[].
5278 if (off + size > mtrue_end) {
5280 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
5281 mname, mtrue_end, tname, off, size);
5287 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
5291 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
5292 const struct btf_type *t, int off, int size,
5293 enum bpf_access_type atype __maybe_unused,
5300 err = btf_struct_walk(log, btf, t, off, size, &id);
5304 /* If we found the pointer or scalar on t+off,
5308 return PTR_TO_BTF_ID;
5310 return SCALAR_VALUE;
5312 /* We found nested struct, so continue the search
5313 * by diving in it. At this point the offset is
5314 * aligned with the new type, so set it to 0.
5316 t = btf_type_by_id(btf, id);
5320 /* It's either error or unknown return value..
5323 if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
5332 /* Check that two BTF types, each specified as an BTF object + id, are exactly
5333 * the same. Trivial ID check is not enough due to module BTFs, because we can
5334 * end up with two different module BTFs, but IDs point to the common type in
5337 static bool btf_types_are_same(const struct btf *btf1, u32 id1,
5338 const struct btf *btf2, u32 id2)
5344 return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
5347 bool btf_struct_ids_match(struct bpf_verifier_log *log,
5348 const struct btf *btf, u32 id, int off,
5349 const struct btf *need_btf, u32 need_type_id)
5351 const struct btf_type *type;
5354 /* Are we already done? */
5355 if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
5359 type = btf_type_by_id(btf, id);
5362 err = btf_struct_walk(log, btf, type, off, 1, &id);
5363 if (err != WALK_STRUCT)
5366 /* We found nested struct object. If it matches
5367 * the requested ID, we're done. Otherwise let's
5368 * continue the search with offset 0 in the new
5371 if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
5379 static int __get_type_size(struct btf *btf, u32 btf_id,
5380 const struct btf_type **bad_type)
5382 const struct btf_type *t;
5387 t = btf_type_by_id(btf, btf_id);
5388 while (t && btf_type_is_modifier(t))
5389 t = btf_type_by_id(btf, t->type);
5391 *bad_type = btf_type_by_id(btf, 0);
5394 if (btf_type_is_ptr(t))
5395 /* kernel size of pointer. Not BPF's size of pointer*/
5396 return sizeof(void *);
5397 if (btf_type_is_int(t) || btf_type_is_enum(t))
5403 int btf_distill_func_proto(struct bpf_verifier_log *log,
5405 const struct btf_type *func,
5407 struct btf_func_model *m)
5409 const struct btf_param *args;
5410 const struct btf_type *t;
5415 /* BTF function prototype doesn't match the verifier types.
5416 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
5418 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++)
5421 m->nr_args = MAX_BPF_FUNC_REG_ARGS;
5424 args = (const struct btf_param *)(func + 1);
5425 nargs = btf_type_vlen(func);
5426 if (nargs >= MAX_BPF_FUNC_ARGS) {
5428 "The function %s has %d arguments. Too many.\n",
5432 ret = __get_type_size(btf, func->type, &t);
5435 "The function %s return type %s is unsupported.\n",
5436 tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
5441 for (i = 0; i < nargs; i++) {
5442 if (i == nargs - 1 && args[i].type == 0) {
5444 "The function %s with variable args is unsupported.\n",
5448 ret = __get_type_size(btf, args[i].type, &t);
5451 "The function %s arg%d type %s is unsupported.\n",
5452 tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
5457 "The function %s has malformed void argument.\n",
5461 m->arg_size[i] = ret;
5467 /* Compare BTFs of two functions assuming only scalars and pointers to context.
5468 * t1 points to BTF_KIND_FUNC in btf1
5469 * t2 points to BTF_KIND_FUNC in btf2
5471 * EINVAL - function prototype mismatch
5472 * EFAULT - verifier bug
5473 * 0 - 99% match. The last 1% is validated by the verifier.
5475 static int btf_check_func_type_match(struct bpf_verifier_log *log,
5476 struct btf *btf1, const struct btf_type *t1,
5477 struct btf *btf2, const struct btf_type *t2)
5479 const struct btf_param *args1, *args2;
5480 const char *fn1, *fn2, *s1, *s2;
5481 u32 nargs1, nargs2, i;
5483 fn1 = btf_name_by_offset(btf1, t1->name_off);
5484 fn2 = btf_name_by_offset(btf2, t2->name_off);
5486 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
5487 bpf_log(log, "%s() is not a global function\n", fn1);
5490 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
5491 bpf_log(log, "%s() is not a global function\n", fn2);
5495 t1 = btf_type_by_id(btf1, t1->type);
5496 if (!t1 || !btf_type_is_func_proto(t1))
5498 t2 = btf_type_by_id(btf2, t2->type);
5499 if (!t2 || !btf_type_is_func_proto(t2))
5502 args1 = (const struct btf_param *)(t1 + 1);
5503 nargs1 = btf_type_vlen(t1);
5504 args2 = (const struct btf_param *)(t2 + 1);
5505 nargs2 = btf_type_vlen(t2);
5507 if (nargs1 != nargs2) {
5508 bpf_log(log, "%s() has %d args while %s() has %d args\n",
5509 fn1, nargs1, fn2, nargs2);
5513 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
5514 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
5515 if (t1->info != t2->info) {
5517 "Return type %s of %s() doesn't match type %s of %s()\n",
5518 btf_type_str(t1), fn1,
5519 btf_type_str(t2), fn2);
5523 for (i = 0; i < nargs1; i++) {
5524 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
5525 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
5527 if (t1->info != t2->info) {
5528 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
5529 i, fn1, btf_type_str(t1),
5530 fn2, btf_type_str(t2));
5533 if (btf_type_has_size(t1) && t1->size != t2->size) {
5535 "arg%d in %s() has size %d while %s() has %d\n",
5541 /* global functions are validated with scalars and pointers
5542 * to context only. And only global functions can be replaced.
5543 * Hence type check only those types.
5545 if (btf_type_is_int(t1) || btf_type_is_enum(t1))
5547 if (!btf_type_is_ptr(t1)) {
5549 "arg%d in %s() has unrecognized type\n",
5553 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
5554 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
5555 if (!btf_type_is_struct(t1)) {
5557 "arg%d in %s() is not a pointer to context\n",
5561 if (!btf_type_is_struct(t2)) {
5563 "arg%d in %s() is not a pointer to context\n",
5567 /* This is an optional check to make program writing easier.
5568 * Compare names of structs and report an error to the user.
5569 * btf_prepare_func_args() already checked that t2 struct
5570 * is a context type. btf_prepare_func_args() will check
5571 * later that t1 struct is a context type as well.
5573 s1 = btf_name_by_offset(btf1, t1->name_off);
5574 s2 = btf_name_by_offset(btf2, t2->name_off);
5575 if (strcmp(s1, s2)) {
5577 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
5578 i, fn1, s1, fn2, s2);
5585 /* Compare BTFs of given program with BTF of target program */
5586 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
5587 struct btf *btf2, const struct btf_type *t2)
5589 struct btf *btf1 = prog->aux->btf;
5590 const struct btf_type *t1;
5593 if (!prog->aux->func_info) {
5594 bpf_log(log, "Program extension requires BTF\n");
5598 btf_id = prog->aux->func_info[0].type_id;
5602 t1 = btf_type_by_id(btf1, btf_id);
5603 if (!t1 || !btf_type_is_func(t1))
5606 return btf_check_func_type_match(log, btf1, t1, btf2, t2);
5609 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
5611 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
5612 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5613 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
5617 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
5618 static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log,
5619 const struct btf *btf,
5620 const struct btf_type *t, int rec)
5622 const struct btf_type *member_type;
5623 const struct btf_member *member;
5626 if (!btf_type_is_struct(t))
5629 for_each_member(i, t, member) {
5630 const struct btf_array *array;
5632 member_type = btf_type_skip_modifiers(btf, member->type, NULL);
5633 if (btf_type_is_struct(member_type)) {
5635 bpf_log(log, "max struct nesting depth exceeded\n");
5638 if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1))
5642 if (btf_type_is_array(member_type)) {
5643 array = btf_type_array(member_type);
5646 member_type = btf_type_skip_modifiers(btf, array->type, NULL);
5647 if (!btf_type_is_scalar(member_type))
5651 if (!btf_type_is_scalar(member_type))
5657 static bool is_kfunc_arg_mem_size(const struct btf *btf,
5658 const struct btf_param *arg,
5659 const struct bpf_reg_state *reg)
5661 int len, sfx_len = sizeof("__sz") - 1;
5662 const struct btf_type *t;
5663 const char *param_name;
5665 t = btf_type_skip_modifiers(btf, arg->type, NULL);
5666 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
5669 /* In the future, this can be ported to use BTF tagging */
5670 param_name = btf_name_by_offset(btf, arg->name_off);
5671 if (str_is_empty(param_name))
5673 len = strlen(param_name);
5676 param_name += len - sfx_len;
5677 if (strncmp(param_name, "__sz", sfx_len))
5683 static int btf_check_func_arg_match(struct bpf_verifier_env *env,
5684 const struct btf *btf, u32 func_id,
5685 struct bpf_reg_state *regs,
5688 struct bpf_verifier_log *log = &env->log;
5689 u32 i, nargs, ref_id, ref_obj_id = 0;
5690 bool is_kfunc = btf_is_kernel(btf);
5691 const char *func_name, *ref_tname;
5692 const struct btf_type *t, *ref_t;
5693 const struct btf_param *args;
5697 t = btf_type_by_id(btf, func_id);
5698 if (!t || !btf_type_is_func(t)) {
5699 /* These checks were already done by the verifier while loading
5700 * struct bpf_func_info or in add_kfunc_call().
5702 bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n",
5706 func_name = btf_name_by_offset(btf, t->name_off);
5708 t = btf_type_by_id(btf, t->type);
5709 if (!t || !btf_type_is_func_proto(t)) {
5710 bpf_log(log, "Invalid BTF of func %s\n", func_name);
5713 args = (const struct btf_param *)(t + 1);
5714 nargs = btf_type_vlen(t);
5715 if (nargs > MAX_BPF_FUNC_REG_ARGS) {
5716 bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs,
5717 MAX_BPF_FUNC_REG_ARGS);
5721 /* check that BTF function arguments match actual types that the
5724 for (i = 0; i < nargs; i++) {
5726 struct bpf_reg_state *reg = ®s[regno];
5728 t = btf_type_skip_modifiers(btf, args[i].type, NULL);
5729 if (btf_type_is_scalar(t)) {
5730 if (reg->type == SCALAR_VALUE)
5732 bpf_log(log, "R%d is not a scalar\n", regno);
5736 if (!btf_type_is_ptr(t)) {
5737 bpf_log(log, "Unrecognized arg#%d type %s\n",
5738 i, btf_type_str(t));
5742 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
5743 ref_tname = btf_name_by_offset(btf, ref_t->name_off);
5744 if (btf_get_prog_ctx_type(log, btf, t,
5745 env->prog->type, i)) {
5746 /* If function expects ctx type in BTF check that caller
5747 * is passing PTR_TO_CTX.
5749 if (reg->type != PTR_TO_CTX) {
5751 "arg#%d expected pointer to ctx, but got %s\n",
5752 i, btf_type_str(t));
5755 if (check_ptr_off_reg(env, reg, regno))
5757 } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || reg2btf_ids[reg->type])) {
5758 const struct btf_type *reg_ref_t;
5759 const struct btf *reg_btf;
5760 const char *reg_ref_tname;
5763 if (!btf_type_is_struct(ref_t)) {
5764 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n",
5765 func_name, i, btf_type_str(ref_t),
5770 if (reg->type == PTR_TO_BTF_ID) {
5772 reg_ref_id = reg->btf_id;
5773 /* Ensure only one argument is referenced PTR_TO_BTF_ID */
5774 if (reg->ref_obj_id) {
5776 bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
5777 regno, reg->ref_obj_id, ref_obj_id);
5781 ref_obj_id = reg->ref_obj_id;
5784 reg_btf = btf_vmlinux;
5785 reg_ref_id = *reg2btf_ids[reg->type];
5788 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
5790 reg_ref_tname = btf_name_by_offset(reg_btf,
5791 reg_ref_t->name_off);
5792 if (!btf_struct_ids_match(log, reg_btf, reg_ref_id,
5793 reg->off, btf, ref_id)) {
5794 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
5796 btf_type_str(ref_t), ref_tname,
5797 regno, btf_type_str(reg_ref_t),
5801 } else if (ptr_to_mem_ok) {
5802 const struct btf_type *resolve_ret;
5806 bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], ®s[regno + 1]);
5808 /* Permit pointer to mem, but only when argument
5809 * type is pointer to scalar, or struct composed
5810 * (recursively) of scalars.
5811 * When arg_mem_size is true, the pointer can be
5814 if (!btf_type_is_scalar(ref_t) &&
5815 !__btf_type_is_scalar_struct(log, btf, ref_t, 0) &&
5816 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
5818 "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
5819 i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
5823 /* Check for mem, len pair */
5825 if (check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1)) {
5826 bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n",
5835 resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
5836 if (IS_ERR(resolve_ret)) {
5838 "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
5839 i, btf_type_str(ref_t), ref_tname,
5840 PTR_ERR(resolve_ret));
5844 if (check_mem_reg(env, reg, regno, type_size))
5847 bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i,
5848 is_kfunc ? "kernel " : "", func_name, func_id);
5853 /* Either both are set, or neither */
5854 WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno));
5856 rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog),
5857 BTF_KFUNC_TYPE_RELEASE, func_id);
5858 /* We already made sure ref_obj_id is set only for one argument */
5859 if (rel && !ref_obj_id) {
5860 bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
5864 /* Allow (!rel && ref_obj_id), so that passing such referenced PTR_TO_BTF_ID to
5865 * other kfuncs works
5868 /* returns argument register number > 0 in case of reference release kfunc */
5869 return rel ? ref_regno : 0;
5872 /* Compare BTF of a function with given bpf_reg_state.
5874 * EFAULT - there is a verifier bug. Abort verification.
5875 * EINVAL - there is a type mismatch or BTF is not available.
5876 * 0 - BTF matches with what bpf_reg_state expects.
5877 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
5879 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
5880 struct bpf_reg_state *regs)
5882 struct bpf_prog *prog = env->prog;
5883 struct btf *btf = prog->aux->btf;
5888 if (!prog->aux->func_info)
5891 btf_id = prog->aux->func_info[subprog].type_id;
5895 if (prog->aux->func_info_aux[subprog].unreliable)
5898 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
5899 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global);
5901 /* Compiler optimizations can remove arguments from static functions
5902 * or mismatched type can be passed into a global function.
5903 * In such cases mark the function as unreliable from BTF point of view.
5906 prog->aux->func_info_aux[subprog].unreliable = true;
5910 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
5911 const struct btf *btf, u32 func_id,
5912 struct bpf_reg_state *regs)
5914 return btf_check_func_arg_match(env, btf, func_id, regs, true);
5917 /* Convert BTF of a function into bpf_reg_state if possible
5919 * EFAULT - there is a verifier bug. Abort verification.
5920 * EINVAL - cannot convert BTF.
5921 * 0 - Successfully converted BTF into bpf_reg_state
5922 * (either PTR_TO_CTX or SCALAR_VALUE).
5924 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
5925 struct bpf_reg_state *regs)
5927 struct bpf_verifier_log *log = &env->log;
5928 struct bpf_prog *prog = env->prog;
5929 enum bpf_prog_type prog_type = prog->type;
5930 struct btf *btf = prog->aux->btf;
5931 const struct btf_param *args;
5932 const struct btf_type *t, *ref_t;
5933 u32 i, nargs, btf_id;
5936 if (!prog->aux->func_info ||
5937 prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
5938 bpf_log(log, "Verifier bug\n");
5942 btf_id = prog->aux->func_info[subprog].type_id;
5944 bpf_log(log, "Global functions need valid BTF\n");
5948 t = btf_type_by_id(btf, btf_id);
5949 if (!t || !btf_type_is_func(t)) {
5950 /* These checks were already done by the verifier while loading
5951 * struct bpf_func_info
5953 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
5957 tname = btf_name_by_offset(btf, t->name_off);
5959 if (log->level & BPF_LOG_LEVEL)
5960 bpf_log(log, "Validating %s() func#%d...\n",
5963 if (prog->aux->func_info_aux[subprog].unreliable) {
5964 bpf_log(log, "Verifier bug in function %s()\n", tname);
5967 if (prog_type == BPF_PROG_TYPE_EXT)
5968 prog_type = prog->aux->dst_prog->type;
5970 t = btf_type_by_id(btf, t->type);
5971 if (!t || !btf_type_is_func_proto(t)) {
5972 bpf_log(log, "Invalid type of function %s()\n", tname);
5975 args = (const struct btf_param *)(t + 1);
5976 nargs = btf_type_vlen(t);
5977 if (nargs > MAX_BPF_FUNC_REG_ARGS) {
5978 bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
5979 tname, nargs, MAX_BPF_FUNC_REG_ARGS);
5982 /* check that function returns int */
5983 t = btf_type_by_id(btf, t->type);
5984 while (btf_type_is_modifier(t))
5985 t = btf_type_by_id(btf, t->type);
5986 if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
5988 "Global function %s() doesn't return scalar. Only those are supported.\n",
5992 /* Convert BTF function arguments into verifier types.
5993 * Only PTR_TO_CTX and SCALAR are supported atm.
5995 for (i = 0; i < nargs; i++) {
5996 struct bpf_reg_state *reg = ®s[i + 1];
5998 t = btf_type_by_id(btf, args[i].type);
5999 while (btf_type_is_modifier(t))
6000 t = btf_type_by_id(btf, t->type);
6001 if (btf_type_is_int(t) || btf_type_is_enum(t)) {
6002 reg->type = SCALAR_VALUE;
6005 if (btf_type_is_ptr(t)) {
6006 if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
6007 reg->type = PTR_TO_CTX;
6011 t = btf_type_skip_modifiers(btf, t->type, NULL);
6013 ref_t = btf_resolve_size(btf, t, ®->mem_size);
6014 if (IS_ERR(ref_t)) {
6016 "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
6017 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
6022 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL;
6023 reg->id = ++env->id_gen;
6027 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
6028 i, btf_kind_str[BTF_INFO_KIND(t->info)], tname);
6034 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
6035 struct btf_show *show)
6037 const struct btf_type *t = btf_type_by_id(btf, type_id);
6040 memset(&show->state, 0, sizeof(show->state));
6041 memset(&show->obj, 0, sizeof(show->obj));
6043 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
6046 static void btf_seq_show(struct btf_show *show, const char *fmt,
6049 seq_vprintf((struct seq_file *)show->target, fmt, args);
6052 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
6053 void *obj, struct seq_file *m, u64 flags)
6055 struct btf_show sseq;
6058 sseq.showfn = btf_seq_show;
6061 btf_type_show(btf, type_id, obj, &sseq);
6063 return sseq.state.status;
6066 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
6069 (void) btf_type_seq_show_flags(btf, type_id, obj, m,
6070 BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
6071 BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
6074 struct btf_show_snprintf {
6075 struct btf_show show;
6076 int len_left; /* space left in string */
6077 int len; /* length we would have written */
6080 static void btf_snprintf_show(struct btf_show *show, const char *fmt,
6083 struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
6086 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
6089 ssnprintf->len_left = 0;
6090 ssnprintf->len = len;
6091 } else if (len > ssnprintf->len_left) {
6092 /* no space, drive on to get length we would have written */
6093 ssnprintf->len_left = 0;
6094 ssnprintf->len += len;
6096 ssnprintf->len_left -= len;
6097 ssnprintf->len += len;
6098 show->target += len;
6102 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
6103 char *buf, int len, u64 flags)
6105 struct btf_show_snprintf ssnprintf;
6107 ssnprintf.show.target = buf;
6108 ssnprintf.show.flags = flags;
6109 ssnprintf.show.showfn = btf_snprintf_show;
6110 ssnprintf.len_left = len;
6113 btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
6115 /* If we encontered an error, return it. */
6116 if (ssnprintf.show.state.status)
6117 return ssnprintf.show.state.status;
6119 /* Otherwise return length we would have written */
6120 return ssnprintf.len;
6123 #ifdef CONFIG_PROC_FS
6124 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
6126 const struct btf *btf = filp->private_data;
6128 seq_printf(m, "btf_id:\t%u\n", btf->id);
6132 static int btf_release(struct inode *inode, struct file *filp)
6134 btf_put(filp->private_data);
6138 const struct file_operations btf_fops = {
6139 #ifdef CONFIG_PROC_FS
6140 .show_fdinfo = bpf_btf_show_fdinfo,
6142 .release = btf_release,
6145 static int __btf_new_fd(struct btf *btf)
6147 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
6150 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
6155 btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
6156 attr->btf_size, attr->btf_log_level,
6157 u64_to_user_ptr(attr->btf_log_buf),
6158 attr->btf_log_size);
6160 return PTR_ERR(btf);
6162 ret = btf_alloc_id(btf);
6169 * The BTF ID is published to the userspace.
6170 * All BTF free must go through call_rcu() from
6171 * now on (i.e. free by calling btf_put()).
6174 ret = __btf_new_fd(btf);
6181 struct btf *btf_get_by_fd(int fd)
6189 return ERR_PTR(-EBADF);
6191 if (f.file->f_op != &btf_fops) {
6193 return ERR_PTR(-EINVAL);
6196 btf = f.file->private_data;
6197 refcount_inc(&btf->refcnt);
6203 int btf_get_info_by_fd(const struct btf *btf,
6204 const union bpf_attr *attr,
6205 union bpf_attr __user *uattr)
6207 struct bpf_btf_info __user *uinfo;
6208 struct bpf_btf_info info;
6209 u32 info_copy, btf_copy;
6212 u32 uinfo_len, uname_len, name_len;
6215 uinfo = u64_to_user_ptr(attr->info.info);
6216 uinfo_len = attr->info.info_len;
6218 info_copy = min_t(u32, uinfo_len, sizeof(info));
6219 memset(&info, 0, sizeof(info));
6220 if (copy_from_user(&info, uinfo, info_copy))
6224 ubtf = u64_to_user_ptr(info.btf);
6225 btf_copy = min_t(u32, btf->data_size, info.btf_size);
6226 if (copy_to_user(ubtf, btf->data, btf_copy))
6228 info.btf_size = btf->data_size;
6230 info.kernel_btf = btf->kernel_btf;
6232 uname = u64_to_user_ptr(info.name);
6233 uname_len = info.name_len;
6234 if (!uname ^ !uname_len)
6237 name_len = strlen(btf->name);
6238 info.name_len = name_len;
6241 if (uname_len >= name_len + 1) {
6242 if (copy_to_user(uname, btf->name, name_len + 1))
6247 if (copy_to_user(uname, btf->name, uname_len - 1))
6249 if (put_user(zero, uname + uname_len - 1))
6251 /* let user-space know about too short buffer */
6256 if (copy_to_user(uinfo, &info, info_copy) ||
6257 put_user(info_copy, &uattr->info.info_len))
6263 int btf_get_fd_by_id(u32 id)
6269 btf = idr_find(&btf_idr, id);
6270 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
6271 btf = ERR_PTR(-ENOENT);
6275 return PTR_ERR(btf);
6277 fd = __btf_new_fd(btf);
6284 u32 btf_obj_id(const struct btf *btf)
6289 bool btf_is_kernel(const struct btf *btf)
6291 return btf->kernel_btf;
6294 bool btf_is_module(const struct btf *btf)
6296 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
6299 static int btf_id_cmp_func(const void *a, const void *b)
6301 const int *pa = a, *pb = b;
6306 bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
6308 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
6312 BTF_MODULE_F_LIVE = (1 << 0),
6315 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6317 struct list_head list;
6318 struct module *module;
6320 struct bin_attribute *sysfs_attr;
6324 static LIST_HEAD(btf_modules);
6325 static DEFINE_MUTEX(btf_module_mutex);
6328 btf_module_read(struct file *file, struct kobject *kobj,
6329 struct bin_attribute *bin_attr,
6330 char *buf, loff_t off, size_t len)
6332 const struct btf *btf = bin_attr->private;
6334 memcpy(buf, btf->data + off, len);
6338 static void purge_cand_cache(struct btf *btf);
6340 static int btf_module_notify(struct notifier_block *nb, unsigned long op,
6343 struct btf_module *btf_mod, *tmp;
6344 struct module *mod = module;
6348 if (mod->btf_data_size == 0 ||
6349 (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
6350 op != MODULE_STATE_GOING))
6354 case MODULE_STATE_COMING:
6355 btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
6360 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size);
6362 pr_warn("failed to validate module [%s] BTF: %ld\n",
6363 mod->name, PTR_ERR(btf));
6368 err = btf_alloc_id(btf);
6375 purge_cand_cache(NULL);
6376 mutex_lock(&btf_module_mutex);
6377 btf_mod->module = module;
6379 list_add(&btf_mod->list, &btf_modules);
6380 mutex_unlock(&btf_module_mutex);
6382 if (IS_ENABLED(CONFIG_SYSFS)) {
6383 struct bin_attribute *attr;
6385 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
6389 sysfs_bin_attr_init(attr);
6390 attr->attr.name = btf->name;
6391 attr->attr.mode = 0444;
6392 attr->size = btf->data_size;
6393 attr->private = btf;
6394 attr->read = btf_module_read;
6396 err = sysfs_create_bin_file(btf_kobj, attr);
6398 pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
6405 btf_mod->sysfs_attr = attr;
6409 case MODULE_STATE_LIVE:
6410 mutex_lock(&btf_module_mutex);
6411 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
6412 if (btf_mod->module != module)
6415 btf_mod->flags |= BTF_MODULE_F_LIVE;
6418 mutex_unlock(&btf_module_mutex);
6420 case MODULE_STATE_GOING:
6421 mutex_lock(&btf_module_mutex);
6422 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
6423 if (btf_mod->module != module)
6426 list_del(&btf_mod->list);
6427 if (btf_mod->sysfs_attr)
6428 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
6429 purge_cand_cache(btf_mod->btf);
6430 btf_put(btf_mod->btf);
6431 kfree(btf_mod->sysfs_attr);
6435 mutex_unlock(&btf_module_mutex);
6439 return notifier_from_errno(err);
6442 static struct notifier_block btf_module_nb = {
6443 .notifier_call = btf_module_notify,
6446 static int __init btf_module_init(void)
6448 register_module_notifier(&btf_module_nb);
6452 fs_initcall(btf_module_init);
6453 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
6455 struct module *btf_try_get_module(const struct btf *btf)
6457 struct module *res = NULL;
6458 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6459 struct btf_module *btf_mod, *tmp;
6461 mutex_lock(&btf_module_mutex);
6462 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
6463 if (btf_mod->btf != btf)
6466 /* We must only consider module whose __init routine has
6467 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
6468 * which is set from the notifier callback for
6469 * MODULE_STATE_LIVE.
6471 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
6472 res = btf_mod->module;
6476 mutex_unlock(&btf_module_mutex);
6482 /* Returns struct btf corresponding to the struct module
6484 * This function can return NULL or ERR_PTR. Note that caller must
6485 * release reference for struct btf iff btf_is_module is true.
6487 static struct btf *btf_get_module_btf(const struct module *module)
6489 struct btf *btf = NULL;
6490 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6491 struct btf_module *btf_mod, *tmp;
6495 return bpf_get_btf_vmlinux();
6496 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6497 mutex_lock(&btf_module_mutex);
6498 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
6499 if (btf_mod->module != module)
6502 btf_get(btf_mod->btf);
6506 mutex_unlock(&btf_module_mutex);
6512 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
6520 if (name_sz <= 1 || name[name_sz - 1])
6523 btf = bpf_get_btf_vmlinux();
6525 return PTR_ERR(btf);
6527 ret = btf_find_by_name_kind(btf, name, kind);
6528 /* ret is never zero, since btf_find_by_name_kind returns
6529 * positive btf_id or negative error.
6532 struct btf *mod_btf;
6535 /* If name is not found in vmlinux's BTF then search in module's BTFs */
6536 spin_lock_bh(&btf_idr_lock);
6537 idr_for_each_entry(&btf_idr, mod_btf, id) {
6538 if (!btf_is_module(mod_btf))
6540 /* linear search could be slow hence unlock/lock
6541 * the IDR to avoiding holding it for too long
6544 spin_unlock_bh(&btf_idr_lock);
6545 ret = btf_find_by_name_kind(mod_btf, name, kind);
6549 btf_obj_fd = __btf_new_fd(mod_btf);
6550 if (btf_obj_fd < 0) {
6554 return ret | (((u64)btf_obj_fd) << 32);
6556 spin_lock_bh(&btf_idr_lock);
6559 spin_unlock_bh(&btf_idr_lock);
6564 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
6565 .func = bpf_btf_find_by_name_kind,
6567 .ret_type = RET_INTEGER,
6568 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6569 .arg2_type = ARG_CONST_SIZE,
6570 .arg3_type = ARG_ANYTHING,
6571 .arg4_type = ARG_ANYTHING,
6574 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
6575 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
6576 BTF_TRACING_TYPE_xxx
6577 #undef BTF_TRACING_TYPE
6579 /* Kernel Function (kfunc) BTF ID set registration API */
6581 static int __btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
6582 enum btf_kfunc_type type,
6583 struct btf_id_set *add_set, bool vmlinux_set)
6585 struct btf_kfunc_set_tab *tab;
6586 struct btf_id_set *set;
6590 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) {
6598 tab = btf->kfunc_set_tab;
6600 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
6603 btf->kfunc_set_tab = tab;
6606 set = tab->sets[hook][type];
6607 /* Warn when register_btf_kfunc_id_set is called twice for the same hook
6610 if (WARN_ON_ONCE(set && !vmlinux_set)) {
6615 /* We don't need to allocate, concatenate, and sort module sets, because
6616 * only one is allowed per hook. Hence, we can directly assign the
6617 * pointer and return.
6620 tab->sets[hook][type] = add_set;
6624 /* In case of vmlinux sets, there may be more than one set being
6625 * registered per hook. To create a unified set, we allocate a new set
6626 * and concatenate all individual sets being registered. While each set
6627 * is individually sorted, they may become unsorted when concatenated,
6628 * hence re-sorting the final set again is required to make binary
6629 * searching the set using btf_id_set_contains function work.
6631 set_cnt = set ? set->cnt : 0;
6633 if (set_cnt > U32_MAX - add_set->cnt) {
6638 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
6644 set = krealloc(tab->sets[hook][type],
6645 offsetof(struct btf_id_set, ids[set_cnt + add_set->cnt]),
6646 GFP_KERNEL | __GFP_NOWARN);
6652 /* For newly allocated set, initialize set->cnt to 0 */
6653 if (!tab->sets[hook][type])
6655 tab->sets[hook][type] = set;
6657 /* Concatenate the two sets */
6658 memcpy(set->ids + set->cnt, add_set->ids, add_set->cnt * sizeof(set->ids[0]));
6659 set->cnt += add_set->cnt;
6661 sort(set->ids, set->cnt, sizeof(set->ids[0]), btf_id_cmp_func, NULL);
6665 btf_free_kfunc_set_tab(btf);
6669 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
6670 const struct btf_kfunc_id_set *kset)
6672 bool vmlinux_set = !btf_is_module(btf);
6675 for (type = 0; type < ARRAY_SIZE(kset->sets); type++) {
6676 if (!kset->sets[type])
6679 ret = __btf_populate_kfunc_set(btf, hook, type, kset->sets[type], vmlinux_set);
6686 static bool __btf_kfunc_id_set_contains(const struct btf *btf,
6687 enum btf_kfunc_hook hook,
6688 enum btf_kfunc_type type,
6691 struct btf_id_set *set;
6693 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX)
6695 if (!btf->kfunc_set_tab)
6697 set = btf->kfunc_set_tab->sets[hook][type];
6700 return btf_id_set_contains(set, kfunc_btf_id);
6703 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
6705 switch (prog_type) {
6706 case BPF_PROG_TYPE_XDP:
6707 return BTF_KFUNC_HOOK_XDP;
6708 case BPF_PROG_TYPE_SCHED_CLS:
6709 return BTF_KFUNC_HOOK_TC;
6710 case BPF_PROG_TYPE_STRUCT_OPS:
6711 return BTF_KFUNC_HOOK_STRUCT_OPS;
6713 return BTF_KFUNC_HOOK_MAX;
6718 * Reference to the module (obtained using btf_try_get_module) corresponding to
6719 * the struct btf *MUST* be held when calling this function from verifier
6720 * context. This is usually true as we stash references in prog's kfunc_btf_tab;
6721 * keeping the reference for the duration of the call provides the necessary
6722 * protection for looking up a well-formed btf->kfunc_set_tab.
6724 bool btf_kfunc_id_set_contains(const struct btf *btf,
6725 enum bpf_prog_type prog_type,
6726 enum btf_kfunc_type type, u32 kfunc_btf_id)
6728 enum btf_kfunc_hook hook;
6730 hook = bpf_prog_type_to_kfunc_hook(prog_type);
6731 return __btf_kfunc_id_set_contains(btf, hook, type, kfunc_btf_id);
6734 /* This function must be invoked only from initcalls/module init functions */
6735 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
6736 const struct btf_kfunc_id_set *kset)
6738 enum btf_kfunc_hook hook;
6742 btf = btf_get_module_btf(kset->owner);
6743 if (IS_ERR_OR_NULL(btf))
6744 return btf ? PTR_ERR(btf) : -ENOENT;
6746 hook = bpf_prog_type_to_kfunc_hook(prog_type);
6747 ret = btf_populate_kfunc_set(btf, hook, kset);
6748 /* reference is only taken for module BTF */
6749 if (btf_is_module(btf))
6753 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
6755 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
6756 const struct btf *targ_btf, __u32 targ_id)
6761 static bool bpf_core_is_flavor_sep(const char *s)
6763 /* check X___Y name pattern, where X and Y are not underscores */
6764 return s[0] != '_' && /* X */
6765 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
6766 s[4] != '_'; /* Y */
6769 size_t bpf_core_essential_name_len(const char *name)
6771 size_t n = strlen(name);
6774 for (i = n - 5; i >= 0; i--) {
6775 if (bpf_core_is_flavor_sep(name + i))
6781 struct bpf_cand_cache {
6787 const struct btf *btf;
6792 static void bpf_free_cands(struct bpf_cand_cache *cands)
6795 /* empty candidate array was allocated on stack */
6800 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
6806 #define VMLINUX_CAND_CACHE_SIZE 31
6807 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
6809 #define MODULE_CAND_CACHE_SIZE 31
6810 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
6812 static DEFINE_MUTEX(cand_cache_mutex);
6814 static void __print_cand_cache(struct bpf_verifier_log *log,
6815 struct bpf_cand_cache **cache,
6818 struct bpf_cand_cache *cc;
6821 for (i = 0; i < cache_size; i++) {
6825 bpf_log(log, "[%d]%s(", i, cc->name);
6826 for (j = 0; j < cc->cnt; j++) {
6827 bpf_log(log, "%d", cc->cands[j].id);
6828 if (j < cc->cnt - 1)
6831 bpf_log(log, "), ");
6835 static void print_cand_cache(struct bpf_verifier_log *log)
6837 mutex_lock(&cand_cache_mutex);
6838 bpf_log(log, "vmlinux_cand_cache:");
6839 __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
6840 bpf_log(log, "\nmodule_cand_cache:");
6841 __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
6843 mutex_unlock(&cand_cache_mutex);
6846 static u32 hash_cands(struct bpf_cand_cache *cands)
6848 return jhash(cands->name, cands->name_len, 0);
6851 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
6852 struct bpf_cand_cache **cache,
6855 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
6857 if (cc && cc->name_len == cands->name_len &&
6858 !strncmp(cc->name, cands->name, cands->name_len))
6863 static size_t sizeof_cands(int cnt)
6865 return offsetof(struct bpf_cand_cache, cands[cnt]);
6868 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
6869 struct bpf_cand_cache **cache,
6872 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
6875 bpf_free_cands_from_cache(*cc);
6878 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
6880 bpf_free_cands(cands);
6881 return ERR_PTR(-ENOMEM);
6883 /* strdup the name, since it will stay in cache.
6884 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
6886 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
6887 bpf_free_cands(cands);
6888 if (!new_cands->name) {
6890 return ERR_PTR(-ENOMEM);
6896 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6897 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
6900 struct bpf_cand_cache *cc;
6903 for (i = 0; i < cache_size; i++) {
6908 /* when new module is loaded purge all of module_cand_cache,
6909 * since new module might have candidates with the name
6910 * that matches cached cands.
6912 bpf_free_cands_from_cache(cc);
6916 /* when module is unloaded purge cache entries
6917 * that match module's btf
6919 for (j = 0; j < cc->cnt; j++)
6920 if (cc->cands[j].btf == btf) {
6921 bpf_free_cands_from_cache(cc);
6929 static void purge_cand_cache(struct btf *btf)
6931 mutex_lock(&cand_cache_mutex);
6932 __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
6933 mutex_unlock(&cand_cache_mutex);
6937 static struct bpf_cand_cache *
6938 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
6941 struct bpf_cand_cache *new_cands;
6942 const struct btf_type *t;
6943 const char *targ_name;
6944 size_t targ_essent_len;
6947 n = btf_nr_types(targ_btf);
6948 for (i = targ_start_id; i < n; i++) {
6949 t = btf_type_by_id(targ_btf, i);
6950 if (btf_kind(t) != cands->kind)
6953 targ_name = btf_name_by_offset(targ_btf, t->name_off);
6957 /* the resched point is before strncmp to make sure that search
6958 * for non-existing name will have a chance to schedule().
6962 if (strncmp(cands->name, targ_name, cands->name_len) != 0)
6965 targ_essent_len = bpf_core_essential_name_len(targ_name);
6966 if (targ_essent_len != cands->name_len)
6969 /* most of the time there is only one candidate for a given kind+name pair */
6970 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
6972 bpf_free_cands(cands);
6973 return ERR_PTR(-ENOMEM);
6976 memcpy(new_cands, cands, sizeof_cands(cands->cnt));
6977 bpf_free_cands(cands);
6979 cands->cands[cands->cnt].btf = targ_btf;
6980 cands->cands[cands->cnt].id = i;
6986 static struct bpf_cand_cache *
6987 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
6989 struct bpf_cand_cache *cands, *cc, local_cand = {};
6990 const struct btf *local_btf = ctx->btf;
6991 const struct btf_type *local_type;
6992 const struct btf *main_btf;
6993 size_t local_essent_len;
6994 struct btf *mod_btf;
6998 main_btf = bpf_get_btf_vmlinux();
6999 if (IS_ERR(main_btf))
7000 return ERR_CAST(main_btf);
7002 local_type = btf_type_by_id(local_btf, local_type_id);
7004 return ERR_PTR(-EINVAL);
7006 name = btf_name_by_offset(local_btf, local_type->name_off);
7007 if (str_is_empty(name))
7008 return ERR_PTR(-EINVAL);
7009 local_essent_len = bpf_core_essential_name_len(name);
7011 cands = &local_cand;
7013 cands->kind = btf_kind(local_type);
7014 cands->name_len = local_essent_len;
7016 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
7017 /* cands is a pointer to stack here */
7024 /* Attempt to find target candidates in vmlinux BTF first */
7025 cands = bpf_core_add_cands(cands, main_btf, 1);
7027 return ERR_CAST(cands);
7029 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
7031 /* populate cache even when cands->cnt == 0 */
7032 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
7034 return ERR_CAST(cc);
7036 /* if vmlinux BTF has any candidate, don't go for module BTFs */
7041 /* cands is a pointer to stack here and cands->cnt == 0 */
7042 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7044 /* if cache has it return it even if cc->cnt == 0 */
7047 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */
7048 spin_lock_bh(&btf_idr_lock);
7049 idr_for_each_entry(&btf_idr, mod_btf, id) {
7050 if (!btf_is_module(mod_btf))
7052 /* linear search could be slow hence unlock/lock
7053 * the IDR to avoiding holding it for too long
7056 spin_unlock_bh(&btf_idr_lock);
7057 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
7058 if (IS_ERR(cands)) {
7060 return ERR_CAST(cands);
7062 spin_lock_bh(&btf_idr_lock);
7065 spin_unlock_bh(&btf_idr_lock);
7066 /* cands is a pointer to kmalloced memory here if cands->cnt > 0
7067 * or pointer to stack if cands->cnd == 0.
7068 * Copy it into the cache even when cands->cnt == 0 and
7069 * return the result.
7071 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7074 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
7075 int relo_idx, void *insn)
7077 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
7078 struct bpf_core_cand_list cands = {};
7079 struct bpf_core_spec *specs;
7082 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
7083 * into arrays of btf_ids of struct fields and array indices.
7085 specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
7090 struct bpf_cand_cache *cc;
7093 mutex_lock(&cand_cache_mutex);
7094 cc = bpf_core_find_cands(ctx, relo->type_id);
7096 bpf_log(ctx->log, "target candidate search failed for %d\n",
7102 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
7108 for (i = 0; i < cc->cnt; i++) {
7110 "CO-RE relocating %s %s: found target candidate [%d]\n",
7111 btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
7112 cands.cands[i].btf = cc->cands[i].btf;
7113 cands.cands[i].id = cc->cands[i].id;
7115 cands.len = cc->cnt;
7116 /* cand_cache_mutex needs to span the cache lookup and
7117 * copy of btf pointer into bpf_core_cand_list,
7118 * since module can be unloaded while bpf_core_apply_relo_insn
7119 * is working with module's btf.
7123 err = bpf_core_apply_relo_insn((void *)ctx->log, insn, relo->insn_off / 8,
7124 relo, relo_idx, ctx->btf, &cands, specs);
7129 mutex_unlock(&cand_cache_mutex);
7130 if (ctx->log->level & BPF_LOG_LEVEL2)
7131 print_cand_cache(ctx->log);