bpf: Search for kptrs in prog BTF structs
authorDave Marchevsky <davemarchevsky@fb.com>
Tue, 13 Aug 2024 21:24:21 +0000 (21:24 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 23 Aug 2024 18:39:33 +0000 (11:39 -0700)
Currently btf_parse_fields is used in two places to create struct
btf_record's for structs: when looking at mapval type, and when looking
at any struct in program BTF. The former looks for kptr fields while the
latter does not. This patch modifies the btf_parse_fields call made when
looking at prog BTF struct types to search for kptrs as well.

Before this series there was no reason to search for kptrs in non-mapval
types: a referenced kptr needs some owner to guarantee resource cleanup,
and map values were the only owner that supported this. If a struct with
a kptr field were to have some non-kptr-aware owner, the kptr field
might not be properly cleaned up and result in resources leaking. Only
searching for kptr fields in mapval was a simple way to avoid this
problem.

In practice, though, searching for BPF_KPTR when populating
struct_meta_tab does not expose us to this risk, as struct_meta_tab is
only accessed through btf_find_struct_meta helper, and that helper is
only called in contexts where recognizing the kptr field is safe:

  * PTR_TO_BTF_ID reg w/ MEM_ALLOC flag
    * Such a reg is a local kptr and must be free'd via bpf_obj_drop,
      which will correctly handle kptr field

  * When handling specific kfuncs which either expect MEM_ALLOC input or
    return MEM_ALLOC output (obj_{new,drop}, percpu_obj_{new,drop},
    list+rbtree funcs, refcount_acquire)
     * Will correctly handle kptr field for same reasons as above

  * When looking at kptr pointee type
     * Called by functions which implement "correct kptr resource
       handling"

  * In btf_check_and_fixup_fields
     * Helper that ensures no ownership loops for lists and rbtrees,
       doesn't care about kptr field existence

So we should be able to find BPF_KPTR fields in all prog BTF structs
without leaking resources.

Further patches in the series will build on this change to support
kptr_xchg into non-mapval local kptr. Without this change there would be
no kptr field found in such a type.

Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
Acked-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Signed-off-by: Amery Hung <amery.hung@bytedance.com>
Link: https://lore.kernel.org/r/20240813212424.2871455-3-amery.hung@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/btf.c

index a4c6538..edad152 100644 (file)
@@ -5512,36 +5512,70 @@ static const char *alloc_obj_fields[] = {
 static struct btf_struct_metas *
 btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
 {
-       union {
-               struct btf_id_set set;
-               struct {
-                       u32 _cnt;
-                       u32 _ids[ARRAY_SIZE(alloc_obj_fields)];
-               } _arr;
-       } aof;
        struct btf_struct_metas *tab = NULL;
+       struct btf_id_set *aof;
        int i, n, id, ret;
 
        BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
        BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
 
-       memset(&aof, 0, sizeof(aof));
+       aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
+       if (!aof)
+               return ERR_PTR(-ENOMEM);
+       aof->cnt = 0;
+
        for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
                /* Try to find whether this special type exists in user BTF, and
                 * if so remember its ID so we can easily find it among members
                 * of structs that we iterate in the next loop.
                 */
+               struct btf_id_set *new_aof;
+
                id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
                if (id < 0)
                        continue;
-               aof.set.ids[aof.set.cnt++] = id;
+
+               new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
+                                  GFP_KERNEL | __GFP_NOWARN);
+               if (!new_aof) {
+                       ret = -ENOMEM;
+                       goto free_aof;
+               }
+               aof = new_aof;
+               aof->ids[aof->cnt++] = id;
+       }
+
+       n = btf_nr_types(btf);
+       for (i = 1; i < n; i++) {
+               /* Try to find if there are kptrs in user BTF and remember their ID */
+               struct btf_id_set *new_aof;
+               struct btf_field_info tmp;
+               const struct btf_type *t;
+
+               t = btf_type_by_id(btf, i);
+               if (!t) {
+                       ret = -EINVAL;
+                       goto free_aof;
+               }
+
+               ret = btf_find_kptr(btf, t, 0, 0, &tmp);
+               if (ret != BTF_FIELD_FOUND)
+                       continue;
+
+               new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
+                                  GFP_KERNEL | __GFP_NOWARN);
+               if (!new_aof) {
+                       ret = -ENOMEM;
+                       goto free_aof;
+               }
+               aof = new_aof;
+               aof->ids[aof->cnt++] = i;
        }
 
-       if (!aof.set.cnt)
+       if (!aof->cnt)
                return NULL;
-       sort(&aof.set.ids, aof.set.cnt, sizeof(aof.set.ids[0]), btf_id_cmp_func, NULL);
+       sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
 
-       n = btf_nr_types(btf);
        for (i = 1; i < n; i++) {
                struct btf_struct_metas *new_tab;
                const struct btf_member *member;
@@ -5551,17 +5585,13 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
                int j, tab_cnt;
 
                t = btf_type_by_id(btf, i);
-               if (!t) {
-                       ret = -EINVAL;
-                       goto free;
-               }
                if (!__btf_type_is_struct(t))
                        continue;
 
                cond_resched();
 
                for_each_member(j, t, member) {
-                       if (btf_id_set_contains(&aof.set, member->type))
+                       if (btf_id_set_contains(aof, member->type))
                                goto parse;
                }
                continue;
@@ -5580,7 +5610,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
                type = &tab->types[tab->cnt];
                type->btf_id = i;
                record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
-                                                 BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT, t->size);
+                                                 BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
+                                                 BPF_KPTR, t->size);
                /* The record cannot be unset, treat it as an error if so */
                if (IS_ERR_OR_NULL(record)) {
                        ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
@@ -5589,9 +5620,12 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
                type->record = record;
                tab->cnt++;
        }
+       kfree(aof);
        return tab;
 free:
        btf_struct_metas_free(tab);
+free_aof:
+       kfree(aof);
        return ERR_PTR(ret);
 }