1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Facebook */
7 #include <linux/string.h>
8 #include <linux/bpf_verifier.h>
11 static const char *btf_kind_str(const struct btf_type *t)
13 return btf_type_str(t);
16 static bool is_ldimm64_insn(struct bpf_insn *insn)
18 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
21 static const struct btf_type *
22 skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
24 return btf_type_skip_modifiers(btf, id, res_id);
27 static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
29 return btf_name_by_offset(btf, offset);
32 static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
34 const struct btf_type *t;
37 t = btf_type_by_id(btf, type_id);
38 t = btf_resolve_size(btf, t, &size);
44 enum libbpf_print_level {
53 #define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
54 #define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
55 #define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
56 #define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
62 #include <linux/err.h>
67 #include "str_error.h"
68 #include "libbpf_internal.h"
71 static bool is_flex_arr(const struct btf *btf,
72 const struct bpf_core_accessor *acc,
73 const struct btf_array *arr)
75 const struct btf_type *t;
77 /* not a flexible array, if not inside a struct or has non-zero size */
78 if (!acc->name || arr->nelems > 0)
81 /* has to be the last member of enclosing struct */
82 t = btf_type_by_id(btf, acc->type_id);
83 return acc->idx == btf_vlen(t) - 1;
86 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
89 case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
90 case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
91 case BPF_CORE_FIELD_EXISTS: return "field_exists";
92 case BPF_CORE_FIELD_SIGNED: return "signed";
93 case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
94 case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
95 case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
96 case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
97 case BPF_CORE_TYPE_EXISTS: return "type_exists";
98 case BPF_CORE_TYPE_SIZE: return "type_size";
99 case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
100 case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
101 default: return "unknown";
105 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
108 case BPF_CORE_FIELD_BYTE_OFFSET:
109 case BPF_CORE_FIELD_BYTE_SIZE:
110 case BPF_CORE_FIELD_EXISTS:
111 case BPF_CORE_FIELD_SIGNED:
112 case BPF_CORE_FIELD_LSHIFT_U64:
113 case BPF_CORE_FIELD_RSHIFT_U64:
120 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
123 case BPF_CORE_TYPE_ID_LOCAL:
124 case BPF_CORE_TYPE_ID_TARGET:
125 case BPF_CORE_TYPE_EXISTS:
126 case BPF_CORE_TYPE_SIZE:
133 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
136 case BPF_CORE_ENUMVAL_EXISTS:
137 case BPF_CORE_ENUMVAL_VALUE:
145 * Turn bpf_core_relo into a low- and high-level spec representation,
146 * validating correctness along the way, as well as calculating resulting
147 * field bit offset, specified by accessor string. Low-level spec captures
148 * every single level of nestedness, including traversing anonymous
149 * struct/union members. High-level one only captures semantically meaningful
150 * "turning points": named fields and array indicies.
151 * E.g., for this case:
162 * struct sample *s = ...;
164 * int x = &s->a[3]; // access string = '0:1:2:3'
166 * Low-level spec has 1:1 mapping with each element of access string (it's
167 * just a parsed access string representation): [0, 1, 2, 3].
169 * High-level spec will capture only 3 points:
170 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
171 * - field 'a' access (corresponds to '2' in low-level spec);
172 * - array element #3 access (corresponds to '3' in low-level spec).
174 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
175 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
176 * spec and raw_spec are kept empty.
178 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
179 * string to specify enumerator's value index that need to be relocated.
181 static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
182 const struct bpf_core_relo *relo,
183 struct bpf_core_spec *spec)
185 int access_idx, parsed_len, i;
186 struct bpf_core_accessor *acc;
187 const struct btf_type *t;
188 const char *name, *spec_str;
192 spec_str = btf__name_by_offset(btf, relo->access_str_off);
193 if (str_is_empty(spec_str) || *spec_str == ':')
196 memset(spec, 0, sizeof(*spec));
198 spec->root_type_id = relo->type_id;
199 spec->relo_kind = relo->kind;
201 /* type-based relocations don't have a field access string */
202 if (core_relo_is_type_based(relo->kind)) {
203 if (strcmp(spec_str, "0"))
208 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
210 if (*spec_str == ':')
212 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
214 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
216 spec_str += parsed_len;
217 spec->raw_spec[spec->raw_len++] = access_idx;
220 if (spec->raw_len == 0)
223 t = skip_mods_and_typedefs(btf, relo->type_id, &id);
227 access_idx = spec->raw_spec[0];
228 acc = &spec->spec[0];
230 acc->idx = access_idx;
233 if (core_relo_is_enumval_based(relo->kind)) {
234 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
237 /* record enumerator name in a first accessor */
238 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
242 if (!core_relo_is_field_based(relo->kind))
245 sz = btf__resolve_size(btf, id);
248 spec->bit_offset = access_idx * sz * 8;
250 for (i = 1; i < spec->raw_len; i++) {
251 t = skip_mods_and_typedefs(btf, id, &id);
255 access_idx = spec->raw_spec[i];
256 acc = &spec->spec[spec->len];
258 if (btf_is_composite(t)) {
259 const struct btf_member *m;
262 if (access_idx >= btf_vlen(t))
265 bit_offset = btf_member_bit_offset(t, access_idx);
266 spec->bit_offset += bit_offset;
268 m = btf_members(t) + access_idx;
270 name = btf__name_by_offset(btf, m->name_off);
271 if (str_is_empty(name))
275 acc->idx = access_idx;
281 } else if (btf_is_array(t)) {
282 const struct btf_array *a = btf_array(t);
285 t = skip_mods_and_typedefs(btf, a->type, &id);
289 flex = is_flex_arr(btf, acc - 1, a);
290 if (!flex && access_idx >= a->nelems)
293 spec->spec[spec->len].type_id = id;
294 spec->spec[spec->len].idx = access_idx;
297 sz = btf__resolve_size(btf, id);
300 spec->bit_offset += access_idx * sz * 8;
302 pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
303 prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t));
311 /* Check two types for compatibility for the purpose of field access
312 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
313 * are relocating semantically compatible entities:
314 * - any two STRUCTs/UNIONs are compatible and can be mixed;
315 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
316 * - any two PTRs are always compatible;
317 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
318 * least one of enums should be anonymous;
319 * - for ENUMs, check sizes, names are ignored;
320 * - for INT, size and signedness are ignored;
321 * - any two FLOATs are always compatible;
322 * - for ARRAY, dimensionality is ignored, element types are checked for
323 * compatibility recursively;
324 * - everything else shouldn't be ever a target of relocation.
325 * These rules are not set in stone and probably will be adjusted as we get
326 * more experience with using BPF CO-RE relocations.
328 static int bpf_core_fields_are_compat(const struct btf *local_btf,
330 const struct btf *targ_btf,
333 const struct btf_type *local_type, *targ_type;
336 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
337 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
338 if (!local_type || !targ_type)
341 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
343 if (btf_kind(local_type) != btf_kind(targ_type))
346 switch (btf_kind(local_type)) {
351 case BTF_KIND_ENUM: {
352 const char *local_name, *targ_name;
353 size_t local_len, targ_len;
355 local_name = btf__name_by_offset(local_btf,
356 local_type->name_off);
357 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
358 local_len = bpf_core_essential_name_len(local_name);
359 targ_len = bpf_core_essential_name_len(targ_name);
360 /* one of them is anonymous or both w/ same flavor-less names */
361 return local_len == 0 || targ_len == 0 ||
362 (local_len == targ_len &&
363 strncmp(local_name, targ_name, local_len) == 0);
366 /* just reject deprecated bitfield-like integers; all other
367 * integers are by default compatible between each other
369 return btf_int_offset(local_type) == 0 &&
370 btf_int_offset(targ_type) == 0;
372 local_id = btf_array(local_type)->type;
373 targ_id = btf_array(targ_type)->type;
381 * Given single high-level named field accessor in local type, find
382 * corresponding high-level accessor for a target type. Along the way,
383 * maintain low-level spec for target as well. Also keep updating target
386 * Searching is performed through recursive exhaustive enumeration of all
387 * fields of a struct/union. If there are any anonymous (embedded)
388 * structs/unions, they are recursively searched as well. If field with
389 * desired name is found, check compatibility between local and target types,
390 * before returning result.
392 * 1 is returned, if field is found.
393 * 0 is returned if no compatible field is found.
394 * <0 is returned on error.
396 static int bpf_core_match_member(const struct btf *local_btf,
397 const struct bpf_core_accessor *local_acc,
398 const struct btf *targ_btf,
400 struct bpf_core_spec *spec,
403 const struct btf_type *local_type, *targ_type;
404 const struct btf_member *local_member, *m;
405 const char *local_name, *targ_name;
409 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
412 if (!btf_is_composite(targ_type))
415 local_id = local_acc->type_id;
416 local_type = btf_type_by_id(local_btf, local_id);
417 local_member = btf_members(local_type) + local_acc->idx;
418 local_name = btf__name_by_offset(local_btf, local_member->name_off);
420 n = btf_vlen(targ_type);
421 m = btf_members(targ_type);
422 for (i = 0; i < n; i++, m++) {
425 bit_offset = btf_member_bit_offset(targ_type, i);
427 /* too deep struct/union/array nesting */
428 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
431 /* speculate this member will be the good one */
432 spec->bit_offset += bit_offset;
433 spec->raw_spec[spec->raw_len++] = i;
435 targ_name = btf__name_by_offset(targ_btf, m->name_off);
436 if (str_is_empty(targ_name)) {
437 /* embedded struct/union, we need to go deeper */
438 found = bpf_core_match_member(local_btf, local_acc,
441 if (found) /* either found or error */
443 } else if (strcmp(local_name, targ_name) == 0) {
444 /* matching named field */
445 struct bpf_core_accessor *targ_acc;
447 targ_acc = &spec->spec[spec->len++];
448 targ_acc->type_id = targ_id;
450 targ_acc->name = targ_name;
452 *next_targ_id = m->type;
453 found = bpf_core_fields_are_compat(local_btf,
457 spec->len--; /* pop accessor */
460 /* member turned out not to be what we looked for */
461 spec->bit_offset -= bit_offset;
469 * Try to match local spec to a target type and, if successful, produce full
470 * target spec (high-level, low-level + bit offset).
472 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
473 const struct btf *targ_btf, __u32 targ_id,
474 struct bpf_core_spec *targ_spec)
476 const struct btf_type *targ_type;
477 const struct bpf_core_accessor *local_acc;
478 struct bpf_core_accessor *targ_acc;
481 memset(targ_spec, 0, sizeof(*targ_spec));
482 targ_spec->btf = targ_btf;
483 targ_spec->root_type_id = targ_id;
484 targ_spec->relo_kind = local_spec->relo_kind;
486 if (core_relo_is_type_based(local_spec->relo_kind)) {
487 return bpf_core_types_are_compat(local_spec->btf,
488 local_spec->root_type_id,
492 local_acc = &local_spec->spec[0];
493 targ_acc = &targ_spec->spec[0];
495 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
496 size_t local_essent_len, targ_essent_len;
497 const struct btf_enum *e;
498 const char *targ_name;
500 /* has to resolve to an enum */
501 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
502 if (!btf_is_enum(targ_type))
505 local_essent_len = bpf_core_essential_name_len(local_acc->name);
507 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
508 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
509 targ_essent_len = bpf_core_essential_name_len(targ_name);
510 if (targ_essent_len != local_essent_len)
512 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
513 targ_acc->type_id = targ_id;
515 targ_acc->name = targ_name;
517 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
518 targ_spec->raw_len++;
525 if (!core_relo_is_field_based(local_spec->relo_kind))
528 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
529 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
534 if (local_acc->name) {
535 matched = bpf_core_match_member(local_spec->btf,
538 targ_spec, &targ_id);
542 /* for i=0, targ_id is already treated as array element
543 * type (because it's the original struct), for others
544 * we should find array element type first
547 const struct btf_array *a;
550 if (!btf_is_array(targ_type))
553 a = btf_array(targ_type);
554 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
555 if (!flex && local_acc->idx >= a->nelems)
557 if (!skip_mods_and_typedefs(targ_btf, a->type,
562 /* too deep struct/union/array nesting */
563 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
566 targ_acc->type_id = targ_id;
567 targ_acc->idx = local_acc->idx;
568 targ_acc->name = NULL;
570 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
571 targ_spec->raw_len++;
573 sz = btf__resolve_size(targ_btf, targ_id);
576 targ_spec->bit_offset += local_acc->idx * sz * 8;
583 static int bpf_core_calc_field_relo(const char *prog_name,
584 const struct bpf_core_relo *relo,
585 const struct bpf_core_spec *spec,
586 __u32 *val, __u32 *field_sz, __u32 *type_id,
589 const struct bpf_core_accessor *acc;
590 const struct btf_type *t;
591 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
592 const struct btf_member *m;
593 const struct btf_type *mt;
599 if (relo->kind == BPF_CORE_FIELD_EXISTS) {
605 return -EUCLEAN; /* request instruction poisoning */
607 acc = &spec->spec[spec->len - 1];
608 t = btf_type_by_id(spec->btf, acc->type_id);
610 /* a[n] accessor needs special handling */
612 if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
613 *val = spec->bit_offset / 8;
614 /* remember field size for load/store mem size */
615 sz = btf__resolve_size(spec->btf, acc->type_id);
619 *type_id = acc->type_id;
620 } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
621 sz = btf__resolve_size(spec->btf, acc->type_id);
626 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
627 prog_name, relo->kind, relo->insn_off / 8);
635 m = btf_members(t) + acc->idx;
636 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
637 bit_off = spec->bit_offset;
638 bit_sz = btf_member_bitfield_size(t, acc->idx);
640 bitfield = bit_sz > 0;
643 byte_off = bit_off / 8 / byte_sz * byte_sz;
644 /* figure out smallest int size necessary for bitfield load */
645 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
647 /* bitfield can't be read with 64-bit read */
648 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
649 prog_name, relo->kind, relo->insn_off / 8);
653 byte_off = bit_off / 8 / byte_sz * byte_sz;
656 sz = btf__resolve_size(spec->btf, field_type_id);
660 byte_off = spec->bit_offset / 8;
661 bit_sz = byte_sz * 8;
664 /* for bitfields, all the relocatable aspects are ambiguous and we
665 * might disagree with compiler, so turn off validation of expected
666 * value, except for signedness
669 *validate = !bitfield;
671 switch (relo->kind) {
672 case BPF_CORE_FIELD_BYTE_OFFSET:
676 *type_id = field_type_id;
679 case BPF_CORE_FIELD_BYTE_SIZE:
682 case BPF_CORE_FIELD_SIGNED:
683 /* enums will be assumed unsigned */
684 *val = btf_is_enum(mt) ||
685 (btf_int_encoding(mt) & BTF_INT_SIGNED);
687 *validate = true; /* signedness is never ambiguous */
689 case BPF_CORE_FIELD_LSHIFT_U64:
690 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
691 *val = 64 - (bit_off + bit_sz - byte_off * 8);
693 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
696 case BPF_CORE_FIELD_RSHIFT_U64:
699 *validate = true; /* right shift is never ambiguous */
701 case BPF_CORE_FIELD_EXISTS:
709 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
710 const struct bpf_core_spec *spec,
711 __u32 *val, bool *validate)
715 /* by default, always check expected value in bpf_insn */
719 /* type-based relos return zero when target type is not found */
725 switch (relo->kind) {
726 case BPF_CORE_TYPE_ID_TARGET:
727 *val = spec->root_type_id;
728 /* type ID, embedded in bpf_insn, might change during linking,
729 * so enforcing it is pointless
734 case BPF_CORE_TYPE_EXISTS:
737 case BPF_CORE_TYPE_SIZE:
738 sz = btf__resolve_size(spec->btf, spec->root_type_id);
743 case BPF_CORE_TYPE_ID_LOCAL:
744 /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */
752 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
753 const struct bpf_core_spec *spec,
756 const struct btf_type *t;
757 const struct btf_enum *e;
759 switch (relo->kind) {
760 case BPF_CORE_ENUMVAL_EXISTS:
763 case BPF_CORE_ENUMVAL_VALUE:
765 return -EUCLEAN; /* request instruction poisoning */
766 t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
767 e = btf_enum(t) + spec->spec[0].idx;
777 /* Calculate original and target relocation values, given local and target
778 * specs and relocation kind. These values are calculated for each candidate.
779 * If there are multiple candidates, resulting values should all be consistent
780 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
781 * If instruction has to be poisoned, *poison will be set to true.
783 static int bpf_core_calc_relo(const char *prog_name,
784 const struct bpf_core_relo *relo,
786 const struct bpf_core_spec *local_spec,
787 const struct bpf_core_spec *targ_spec,
788 struct bpf_core_relo_res *res)
790 int err = -EOPNOTSUPP;
795 res->validate = true;
796 res->fail_memsz_adjust = false;
797 res->orig_sz = res->new_sz = 0;
798 res->orig_type_id = res->new_type_id = 0;
800 if (core_relo_is_field_based(relo->kind)) {
801 err = bpf_core_calc_field_relo(prog_name, relo, local_spec,
802 &res->orig_val, &res->orig_sz,
803 &res->orig_type_id, &res->validate);
804 err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec,
805 &res->new_val, &res->new_sz,
806 &res->new_type_id, NULL);
809 /* Validate if it's safe to adjust load/store memory size.
810 * Adjustments are performed only if original and new memory
813 res->fail_memsz_adjust = false;
814 if (res->orig_sz != res->new_sz) {
815 const struct btf_type *orig_t, *new_t;
817 orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
818 new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
820 /* There are two use cases in which it's safe to
821 * adjust load/store's mem size:
822 * - reading a 32-bit kernel pointer, while on BPF
823 * size pointers are always 64-bit; in this case
824 * it's safe to "downsize" instruction size due to
825 * pointer being treated as unsigned integer with
826 * zero-extended upper 32-bits;
827 * - reading unsigned integers, again due to
828 * zero-extension is preserving the value correctly.
830 * In all other cases it's incorrect to attempt to
831 * load/store field because read value will be
832 * incorrect, so we poison relocated instruction.
834 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
836 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
837 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
838 btf_int_encoding(new_t) != BTF_INT_SIGNED)
841 /* mark as invalid mem size adjustment, but this will
842 * only be checked for LDX/STX/ST insns
844 res->fail_memsz_adjust = true;
846 } else if (core_relo_is_type_based(relo->kind)) {
847 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
848 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
849 } else if (core_relo_is_enumval_based(relo->kind)) {
850 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
851 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
855 if (err == -EUCLEAN) {
856 /* EUCLEAN is used to signal instruction poisoning request */
859 } else if (err == -EOPNOTSUPP) {
860 /* EOPNOTSUPP means unknown/unsupported relocation */
861 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
862 prog_name, relo_idx, core_relo_kind_str(relo->kind),
863 relo->kind, relo->insn_off / 8);
870 * Turn instruction for which CO_RE relocation failed into invalid one with
871 * distinct signature.
873 static void bpf_core_poison_insn(const char *prog_name, int relo_idx,
874 int insn_idx, struct bpf_insn *insn)
876 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
877 prog_name, relo_idx, insn_idx);
878 insn->code = BPF_JMP | BPF_CALL;
882 /* if this instruction is reachable (not a dead code),
883 * verifier will complain with the following message:
884 * invalid func unknown#195896080
886 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
889 static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
891 switch (BPF_SIZE(insn->code)) {
892 case BPF_DW: return 8;
893 case BPF_W: return 4;
894 case BPF_H: return 2;
895 case BPF_B: return 1;
900 static int insn_bytes_to_bpf_size(__u32 sz)
903 case 8: return BPF_DW;
904 case 4: return BPF_W;
905 case 2: return BPF_H;
906 case 1: return BPF_B;
912 * Patch relocatable BPF instruction.
914 * Patched value is determined by relocation kind and target specification.
915 * For existence relocations target spec will be NULL if field/type is not found.
916 * Expected insn->imm value is determined using relocation kind and local
917 * spec, and is checked before patching instruction. If actual insn->imm value
918 * is wrong, bail out with error.
920 * Currently supported classes of BPF instruction are:
921 * 1. rX = <imm> (assignment with immediate operand);
922 * 2. rX += <imm> (arithmetic operations with immediate operand);
923 * 3. rX = <imm64> (load with 64-bit immediate value);
924 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
925 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
926 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
928 int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
929 int insn_idx, const struct bpf_core_relo *relo,
930 int relo_idx, const struct bpf_core_relo_res *res)
932 __u32 orig_val, new_val;
935 class = BPF_CLASS(insn->code);
939 /* poison second part of ldimm64 to avoid confusing error from
940 * verifier about "unknown opcode 00"
942 if (is_ldimm64_insn(insn))
943 bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1);
944 bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn);
948 orig_val = res->orig_val;
949 new_val = res->new_val;
954 if (BPF_SRC(insn->code) != BPF_K)
956 if (res->validate && insn->imm != orig_val) {
957 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
959 insn_idx, insn->imm, orig_val, new_val);
962 orig_val = insn->imm;
964 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
965 prog_name, relo_idx, insn_idx,
971 if (res->validate && insn->off != orig_val) {
972 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
973 prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
976 if (new_val > SHRT_MAX) {
977 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
978 prog_name, relo_idx, insn_idx, new_val);
981 if (res->fail_memsz_adjust) {
982 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
983 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
984 prog_name, relo_idx, insn_idx);
988 orig_val = insn->off;
990 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
991 prog_name, relo_idx, insn_idx, orig_val, new_val);
993 if (res->new_sz != res->orig_sz) {
994 int insn_bytes_sz, insn_bpf_sz;
996 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
997 if (insn_bytes_sz != res->orig_sz) {
998 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
999 prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
1003 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
1004 if (insn_bpf_sz < 0) {
1005 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
1006 prog_name, relo_idx, insn_idx, res->new_sz);
1010 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
1011 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
1012 prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
1018 if (!is_ldimm64_insn(insn) ||
1019 insn[0].src_reg != 0 || insn[0].off != 0 ||
1020 insn[1].code != 0 || insn[1].dst_reg != 0 ||
1021 insn[1].src_reg != 0 || insn[1].off != 0) {
1022 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
1023 prog_name, relo_idx, insn_idx);
1027 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
1028 if (res->validate && imm != orig_val) {
1029 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
1030 prog_name, relo_idx,
1031 insn_idx, (unsigned long long)imm,
1036 insn[0].imm = new_val;
1037 insn[1].imm = 0; /* currently only 32-bit values are supported */
1038 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
1039 prog_name, relo_idx, insn_idx,
1040 (unsigned long long)imm, new_val);
1044 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
1045 prog_name, relo_idx, insn_idx, insn->code,
1046 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
1053 /* Output spec definition in the format:
1054 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
1055 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
1057 static int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
1059 const struct btf_type *t;
1060 const struct btf_enum *e;
1065 #define append_buf(fmt, args...) \
1068 r = snprintf(buf, buf_sz, fmt, ##args); \
1076 type_id = spec->root_type_id;
1077 t = btf_type_by_id(spec->btf, type_id);
1078 s = btf__name_by_offset(spec->btf, t->name_off);
1080 append_buf("<%s> [%u] %s %s",
1081 core_relo_kind_str(spec->relo_kind),
1082 type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
1084 if (core_relo_is_type_based(spec->relo_kind))
1087 if (core_relo_is_enumval_based(spec->relo_kind)) {
1088 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
1089 e = btf_enum(t) + spec->raw_spec[0];
1090 s = btf__name_by_offset(spec->btf, e->name_off);
1092 append_buf("::%s = %u", s, e->val);
1096 if (core_relo_is_field_based(spec->relo_kind)) {
1097 for (i = 0; i < spec->len; i++) {
1098 if (spec->spec[i].name)
1099 append_buf(".%s", spec->spec[i].name);
1100 else if (i > 0 || spec->spec[i].idx > 0)
1101 append_buf("[%u]", spec->spec[i].idx);
1105 for (i = 0; i < spec->raw_len; i++)
1106 append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
1108 if (spec->bit_offset % 8)
1109 append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8);
1111 append_buf(" @ offset %u)", spec->bit_offset / 8);
1120 * Calculate CO-RE relocation target result.
1122 * The outline and important points of the algorithm:
1123 * 1. For given local type, find corresponding candidate target types.
1124 * Candidate type is a type with the same "essential" name, ignoring
1125 * everything after last triple underscore (___). E.g., `sample`,
1126 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
1127 * for each other. Names with triple underscore are referred to as
1128 * "flavors" and are useful, among other things, to allow to
1129 * specify/support incompatible variations of the same kernel struct, which
1130 * might differ between different kernel versions and/or build
1133 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
1134 * converter, when deduplicated BTF of a kernel still contains more than
1135 * one different types with the same name. In that case, ___2, ___3, etc
1136 * are appended starting from second name conflict. But start flavors are
1137 * also useful to be defined "locally", in BPF program, to extract same
1138 * data from incompatible changes between different kernel
1139 * versions/configurations. For instance, to handle field renames between
1140 * kernel versions, one can use two flavors of the struct name with the
1141 * same common name and use conditional relocations to extract that field,
1142 * depending on target kernel version.
1143 * 2. For each candidate type, try to match local specification to this
1144 * candidate target type. Matching involves finding corresponding
1145 * high-level spec accessors, meaning that all named fields should match,
1146 * as well as all array accesses should be within the actual bounds. Also,
1147 * types should be compatible (see bpf_core_fields_are_compat for details).
1148 * 3. It is supported and expected that there might be multiple flavors
1149 * matching the spec. As long as all the specs resolve to the same set of
1150 * offsets across all candidates, there is no error. If there is any
1151 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
1152 * imprefection of BTF deduplication, which can cause slight duplication of
1153 * the same BTF type, if some directly or indirectly referenced (by
1154 * pointer) type gets resolved to different actual types in different
1155 * object files. If such situation occurs, deduplicated BTF will end up
1156 * with two (or more) structurally identical types, which differ only in
1157 * types they refer to through pointer. This should be OK in most cases and
1159 * 4. Candidate types search is performed by linearly scanning through all
1160 * types in target BTF. It is anticipated that this is overall more
1161 * efficient memory-wise and not significantly worse (if not better)
1162 * CPU-wise compared to prebuilding a map from all local type names to
1163 * a list of candidate type names. It's also sped up by caching resolved
1164 * list of matching candidates per each local "root" type ID, that has at
1165 * least one bpf_core_relo associated with it. This list is shared
1166 * between multiple relocations for the same type ID and is updated as some
1167 * of the candidates are pruned due to structural incompatibility.
1169 int bpf_core_calc_relo_insn(const char *prog_name,
1170 const struct bpf_core_relo *relo,
1172 const struct btf *local_btf,
1173 struct bpf_core_cand_list *cands,
1174 struct bpf_core_spec *specs_scratch,
1175 struct bpf_core_relo_res *targ_res)
1177 struct bpf_core_spec *local_spec = &specs_scratch[0];
1178 struct bpf_core_spec *cand_spec = &specs_scratch[1];
1179 struct bpf_core_spec *targ_spec = &specs_scratch[2];
1180 struct bpf_core_relo_res cand_res;
1181 const struct btf_type *local_type;
1182 const char *local_name;
1187 local_id = relo->type_id;
1188 local_type = btf_type_by_id(local_btf, local_id);
1189 local_name = btf__name_by_offset(local_btf, local_type->name_off);
1193 err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec);
1195 const char *spec_str;
1197 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
1198 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
1199 prog_name, relo_idx, local_id, btf_kind_str(local_type),
1200 str_is_empty(local_name) ? "<anon>" : local_name,
1201 spec_str ?: "<?>", err);
1205 bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec);
1206 pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf);
1208 /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
1209 if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
1210 /* bpf_insn's imm value could get out of sync during linking */
1211 memset(targ_res, 0, sizeof(*targ_res));
1212 targ_res->validate = false;
1213 targ_res->poison = false;
1214 targ_res->orig_val = local_spec->root_type_id;
1215 targ_res->new_val = local_spec->root_type_id;
1219 /* libbpf doesn't support candidate search for anonymous types */
1220 if (str_is_empty(local_name)) {
1221 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
1222 prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1226 for (i = 0, j = 0; i < cands->len; i++) {
1227 err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
1228 cands->cands[i].id, cand_spec);
1230 bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
1231 pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
1232 prog_name, relo_idx, i, spec_buf, err);
1236 bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
1237 pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name,
1238 relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf);
1243 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
1248 *targ_res = cand_res;
1249 *targ_spec = *cand_spec;
1250 } else if (cand_spec->bit_offset != targ_spec->bit_offset) {
1251 /* if there are many field relo candidates, they
1252 * should all resolve to the same bit offset
1254 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
1255 prog_name, relo_idx, cand_spec->bit_offset,
1256 targ_spec->bit_offset);
1258 } else if (cand_res.poison != targ_res->poison ||
1259 cand_res.new_val != targ_res->new_val) {
1260 /* all candidates should result in the same relocation
1261 * decision and value, otherwise it's dangerous to
1262 * proceed due to ambiguity
1264 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
1265 prog_name, relo_idx,
1266 cand_res.poison ? "failure" : "success", cand_res.new_val,
1267 targ_res->poison ? "failure" : "success", targ_res->new_val);
1271 cands->cands[j++] = cands->cands[i];
1275 * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field
1276 * existence checks or kernel version/config checks, it's expected
1277 * that we might not find any candidates. In this case, if field
1278 * wasn't found in any candidate, the list of candidates shouldn't
1279 * change at all, we'll just handle relocating appropriately,
1280 * depending on relo's kind.
1286 * If no candidates were found, it might be both a programmer error,
1287 * as well as expected case, depending whether instruction w/
1288 * relocation is guarded in some way that makes it unreachable (dead
1289 * code) if relocation can't be resolved. This is handled in
1290 * bpf_core_patch_insn() uniformly by replacing that instruction with
1291 * BPF helper call insn (using invalid helper ID). If that instruction
1292 * is indeed unreachable, then it will be ignored and eliminated by
1293 * verifier. If it was an error, then verifier will complain and point
1294 * to a specific instruction number in its log.
1297 pr_debug("prog '%s': relo #%d: no matching targets found\n",
1298 prog_name, relo_idx);
1300 /* calculate single target relo result explicitly */
1301 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res);