1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
5 #include <linux/bpf_verifier.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
16 struct bpf_struct_ops_value {
17 struct bpf_struct_ops_common_value common;
18 char data[] ____cacheline_aligned_in_smp;
21 #define MAX_TRAMP_IMAGE_PAGES 8
23 struct bpf_struct_ops_map {
26 const struct bpf_struct_ops_desc *st_ops_desc;
27 /* protect map_update */
29 /* link has all the bpf_links that is populated
30 * to the func ptr of the kernel's struct
33 struct bpf_link **links;
36 /* image_pages is an array of pages that has all the trampolines
37 * that stores the func args before calling the bpf_prog.
39 void *image_pages[MAX_TRAMP_IMAGE_PAGES];
40 /* The owner moduler's btf. */
42 /* uvalue->data stores the kernel struct
43 * (e.g. tcp_congestion_ops) that is more useful
44 * to userspace than the kvalue. For example,
45 * the bpf_prog's id is stored instead of the kernel
46 * address of a func ptr.
48 struct bpf_struct_ops_value *uvalue;
49 /* kvalue.data stores the actual kernel's struct
50 * (e.g. tcp_congestion_ops) that will be
51 * registered to the kernel subsystem.
53 struct bpf_struct_ops_value kvalue;
56 struct bpf_struct_ops_link {
58 struct bpf_map __rcu *map;
61 static DEFINE_MUTEX(update_mutex);
63 #define VALUE_PREFIX "bpf_struct_ops_"
64 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
66 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
69 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
71 .test_run = bpf_struct_ops_test_run,
75 BTF_ID_LIST(st_ops_ids)
76 BTF_ID(struct, module)
77 BTF_ID(struct, bpf_struct_ops_common_value)
81 IDX_ST_OPS_COMMON_VALUE_ID,
84 extern struct btf *btf_vmlinux;
86 static bool is_valid_value_type(struct btf *btf, s32 value_id,
87 const struct btf_type *type,
88 const char *value_name)
90 const struct btf_type *common_value_type;
91 const struct btf_member *member;
92 const struct btf_type *vt, *mt;
94 vt = btf_type_by_id(btf, value_id);
95 if (btf_vlen(vt) != 2) {
96 pr_warn("The number of %s's members should be 2, but we get %d\n",
97 value_name, btf_vlen(vt));
100 member = btf_type_member(vt);
101 mt = btf_type_by_id(btf, member->type);
102 common_value_type = btf_type_by_id(btf_vmlinux,
103 st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
104 if (mt != common_value_type) {
105 pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
110 mt = btf_type_by_id(btf, member->type);
112 pr_warn("The second member of %s should be %s\n",
113 value_name, btf_name_by_offset(btf, type->name_off));
120 static void *bpf_struct_ops_image_alloc(void)
125 err = bpf_jit_charge_modmem(PAGE_SIZE);
128 image = arch_alloc_bpf_trampoline(PAGE_SIZE);
130 bpf_jit_uncharge_modmem(PAGE_SIZE);
131 return ERR_PTR(-ENOMEM);
137 void bpf_struct_ops_image_free(void *image)
140 arch_free_bpf_trampoline(image, PAGE_SIZE);
141 bpf_jit_uncharge_modmem(PAGE_SIZE);
145 #define MAYBE_NULL_SUFFIX "__nullable"
146 #define MAX_STUB_NAME 128
148 /* Return the type info of a stub function, if it exists.
150 * The name of a stub function is made up of the name of the struct_ops and
151 * the name of the function pointer member, separated by "__". For example,
152 * if the struct_ops type is named "foo_ops" and the function pointer
153 * member is named "bar", the stub function name would be "foo_ops__bar".
155 static const struct btf_type *
156 find_stub_func_proto(const struct btf *btf, const char *st_op_name,
157 const char *member_name)
159 char stub_func_name[MAX_STUB_NAME];
160 const struct btf_type *func_type;
164 cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
165 st_op_name, member_name);
166 if (cp >= MAX_STUB_NAME) {
167 pr_warn("Stub function name too long\n");
170 btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
173 func_type = btf_type_by_id(btf, btf_id);
177 return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
180 /* Prepare argument info for every nullable argument of a member of a
183 * Initialize a struct bpf_struct_ops_arg_info according to type info of
184 * the arguments of a stub function. (Check kCFI for more information about
187 * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
188 * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
189 * the information that used by the verifier to check the arguments of the
190 * BPF struct_ops program assigned to the member. Here, we only care about
191 * the arguments that are marked as __nullable.
193 * The array of struct bpf_ctx_arg_aux is eventually assigned to
194 * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
195 * verifier. (See check_struct_ops_btf_id())
197 * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
198 * fails, it will be kept untouched.
200 static int prepare_arg_info(struct btf *btf,
201 const char *st_ops_name,
202 const char *member_name,
203 const struct btf_type *func_proto,
204 struct bpf_struct_ops_arg_info *arg_info)
206 const struct btf_type *stub_func_proto, *pointed_type;
207 const struct btf_param *stub_args, *args;
208 struct bpf_ctx_arg_aux *info, *info_buf;
209 u32 nargs, arg_no, info_cnt = 0;
213 stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
214 if (!stub_func_proto)
217 /* Check if the number of arguments of the stub function is the same
218 * as the number of arguments of the function pointer.
220 nargs = btf_type_vlen(func_proto);
221 if (nargs != btf_type_vlen(stub_func_proto)) {
222 pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
223 st_ops_name, member_name, member_name, st_ops_name);
230 args = btf_params(func_proto);
231 stub_args = btf_params(stub_func_proto);
233 info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
237 /* Prepare info for every nullable argument */
239 for (arg_no = 0; arg_no < nargs; arg_no++) {
240 /* Skip arguments that is not suffixed with
243 if (!btf_param_match_suffix(btf, &stub_args[arg_no],
247 /* Should be a pointer to struct */
248 pointed_type = btf_type_resolve_ptr(btf,
252 !btf_type_is_struct(pointed_type)) {
253 pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
254 st_ops_name, member_name, MAYBE_NULL_SUFFIX);
258 offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
260 pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
261 st_ops_name, member_name, arg_no);
265 if (args[arg_no].type != stub_args[arg_no].type) {
266 pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
267 arg_no, st_ops_name, member_name);
271 /* Fill the information of the new argument */
273 PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
274 info->btf_id = arg_btf_id;
276 info->offset = offset;
283 arg_info->info = info_buf;
284 arg_info->cnt = info_cnt;
297 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
298 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
300 struct bpf_struct_ops_arg_info *arg_info;
303 arg_info = st_ops_desc->arg_info;
304 for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
305 kfree(arg_info[i].info);
310 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
312 struct bpf_verifier_log *log)
314 struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
315 struct bpf_struct_ops_arg_info *arg_info;
316 const struct btf_member *member;
317 const struct btf_type *t;
318 s32 type_id, value_id;
319 char value_name[128];
323 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
324 sizeof(value_name)) {
325 pr_warn("struct_ops name %s is too long\n",
329 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
331 if (!st_ops->cfi_stubs) {
332 pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
336 type_id = btf_find_by_name_kind(btf, st_ops->name,
339 pr_warn("Cannot find struct %s in %s\n",
340 st_ops->name, btf_get_name(btf));
343 t = btf_type_by_id(btf, type_id);
344 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
345 pr_warn("Cannot support #%u members in struct %s\n",
346 btf_type_vlen(t), st_ops->name);
350 value_id = btf_find_by_name_kind(btf, value_name,
353 pr_warn("Cannot find struct %s in %s\n",
354 value_name, btf_get_name(btf));
357 if (!is_valid_value_type(btf, value_id, t, value_name))
360 arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
365 st_ops_desc->arg_info = arg_info;
366 st_ops_desc->type = t;
367 st_ops_desc->type_id = type_id;
368 st_ops_desc->value_id = value_id;
369 st_ops_desc->value_type = btf_type_by_id(btf, value_id);
371 for_each_member(i, t, member) {
372 const struct btf_type *func_proto;
374 mname = btf_name_by_offset(btf, member->name_off);
376 pr_warn("anon member in struct %s is not supported\n",
382 if (__btf_member_bitfield_size(t, member)) {
383 pr_warn("bit field member %s in struct %s is not supported\n",
384 mname, st_ops->name);
389 func_proto = btf_type_resolve_func_ptr(btf,
395 if (btf_distill_func_proto(log, btf,
397 &st_ops->func_models[i])) {
398 pr_warn("Error in parsing func ptr %s in struct %s\n",
399 mname, st_ops->name);
404 err = prepare_arg_info(btf, st_ops->name, mname,
411 if (st_ops->init(btf)) {
412 pr_warn("Error in init bpf_struct_ops %s\n",
421 bpf_struct_ops_desc_release(st_ops_desc);
426 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
429 if (key && *(u32 *)key == 0)
432 *(u32 *)next_key = 0;
436 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
439 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
440 struct bpf_struct_ops_value *uvalue, *kvalue;
441 enum bpf_struct_ops_state state;
444 if (unlikely(*(u32 *)key != 0))
447 kvalue = &st_map->kvalue;
448 /* Pair with smp_store_release() during map_update */
449 state = smp_load_acquire(&kvalue->common.state);
450 if (state == BPF_STRUCT_OPS_STATE_INIT) {
451 memset(value, 0, map->value_size);
455 /* No lock is needed. state and refcnt do not need
456 * to be updated together under atomic context.
459 memcpy(uvalue, st_map->uvalue, map->value_size);
460 uvalue->common.state = state;
462 /* This value offers the user space a general estimate of how
463 * many sockets are still utilizing this struct_ops for TCP
464 * congestion control. The number might not be exact, but it
465 * should sufficiently meet our present goals.
467 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
468 refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
473 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
475 return ERR_PTR(-EINVAL);
478 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
482 for (i = 0; i < st_map->links_cnt; i++) {
483 if (st_map->links[i]) {
484 bpf_link_put(st_map->links[i]);
485 st_map->links[i] = NULL;
490 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
494 for (i = 0; i < st_map->image_pages_cnt; i++)
495 bpf_struct_ops_image_free(st_map->image_pages[i]);
496 st_map->image_pages_cnt = 0;
499 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
501 const struct btf_member *member;
502 u32 i, moff, msize, prev_mend = 0;
503 const struct btf_type *mtype;
505 for_each_member(i, t, member) {
506 moff = __btf_member_bit_offset(t, member) / 8;
507 if (moff > prev_mend &&
508 memchr_inv(data + prev_mend, 0, moff - prev_mend))
511 mtype = btf_type_by_id(btf, member->type);
512 mtype = btf_resolve_size(btf, mtype, &msize);
514 return PTR_ERR(mtype);
515 prev_mend = moff + msize;
518 if (t->size > prev_mend &&
519 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
525 static void bpf_struct_ops_link_release(struct bpf_link *link)
529 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
531 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
536 const struct bpf_link_ops bpf_struct_ops_link_lops = {
537 .release = bpf_struct_ops_link_release,
538 .dealloc = bpf_struct_ops_link_dealloc,
541 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
542 struct bpf_tramp_link *link,
543 const struct btf_func_model *model,
545 void **_image, u32 *_image_off,
548 u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
549 void *image = *_image;
552 tlinks[BPF_TRAMP_FENTRY].links[0] = link;
553 tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
555 if (model->ret_size > 0)
556 flags |= BPF_TRAMP_F_RET_FENTRY_RET;
558 size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
560 return size ? : -EFAULT;
562 /* Allocate image buffer if necessary */
563 if (!image || size > PAGE_SIZE - image_off) {
567 image = bpf_struct_ops_image_alloc();
569 return PTR_ERR(image);
573 size = arch_prepare_bpf_trampoline(NULL, image + image_off,
575 model, flags, tlinks, stub_func);
577 if (image != *_image)
578 bpf_struct_ops_image_free(image);
579 return size ? : -EFAULT;
583 *_image_off = image_off + size;
587 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
588 void *value, u64 flags)
590 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
591 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
592 const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
593 struct bpf_struct_ops_value *uvalue, *kvalue;
594 const struct btf_type *module_type;
595 const struct btf_member *member;
596 const struct btf_type *t = st_ops_desc->type;
597 struct bpf_tramp_links *tlinks;
600 u32 i, trampoline_start, image_off = 0;
601 void *cur_image = NULL, *image = NULL;
606 if (*(u32 *)key != 0)
609 err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
614 err = check_zero_holes(st_map->btf, t, uvalue->data);
618 if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
621 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
625 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
626 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
628 mutex_lock(&st_map->lock);
630 if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
635 memcpy(uvalue, value, map->value_size);
637 udata = &uvalue->data;
638 kdata = &kvalue->data;
640 module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
641 for_each_member(i, t, member) {
642 const struct btf_type *mtype, *ptype;
643 struct bpf_prog *prog;
644 struct bpf_tramp_link *link;
647 moff = __btf_member_bit_offset(t, member) / 8;
648 ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
649 if (ptype == module_type) {
650 if (*(void **)(udata + moff))
652 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
656 err = st_ops->init_member(t, member, kdata, udata);
660 /* The ->init_member() has handled this member */
664 /* If st_ops->init_member does not handle it,
665 * we will only handle func ptrs and zero-ed members
666 * here. Reject everything else.
669 /* All non func ptr member must be 0 */
670 if (!ptype || !btf_type_is_func_proto(ptype)) {
673 mtype = btf_type_by_id(st_map->btf, member->type);
674 mtype = btf_resolve_size(st_map->btf, mtype, &msize);
676 err = PTR_ERR(mtype);
680 if (memchr_inv(udata + moff, 0, msize)) {
688 prog_fd = (int)(*(unsigned long *)(udata + moff));
689 /* Similar check as the attr->attach_prog_fd */
693 prog = bpf_prog_get(prog_fd);
699 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
700 prog->aux->attach_btf_id != st_ops_desc->type_id ||
701 prog->expected_attach_type != i) {
707 link = kzalloc(sizeof(*link), GFP_USER);
713 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
714 &bpf_struct_ops_link_lops, prog);
715 st_map->links[i] = &link->link;
717 trampoline_start = image_off;
718 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
719 &st_ops->func_models[i],
720 *(void **)(st_ops->cfi_stubs + moff),
722 st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
726 if (cur_image != image) {
727 st_map->image_pages[st_map->image_pages_cnt++] = image;
729 trampoline_start = 0;
734 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
736 /* put prog_id to udata */
737 *(unsigned long *)(udata + moff) = prog->aux->id;
740 if (st_ops->validate) {
741 err = st_ops->validate(kdata);
745 for (i = 0; i < st_map->image_pages_cnt; i++)
746 arch_protect_bpf_trampoline(st_map->image_pages[i], PAGE_SIZE);
748 if (st_map->map.map_flags & BPF_F_LINK) {
750 /* Let bpf_link handle registration & unregistration.
752 * Pair with smp_load_acquire() during lookup_elem().
754 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
758 err = st_ops->reg(kdata);
760 /* This refcnt increment on the map here after
761 * 'st_ops->reg()' is secure since the state of the
762 * map must be set to INIT at this moment, and thus
763 * bpf_struct_ops_map_delete_elem() can't unregister
764 * or transition it to TOBEFREE concurrently.
767 /* Pair with smp_load_acquire() during lookup_elem().
768 * It ensures the above udata updates (e.g. prog->aux->id)
769 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
771 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
775 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
776 * verified as a whole, after all init_member() calls. Can also happen if
777 * there was a race in registering the struct_ops (under the same name) to
778 * a sub-system through different struct_ops's maps.
782 bpf_struct_ops_map_free_image(st_map);
783 bpf_struct_ops_map_put_progs(st_map);
784 memset(uvalue, 0, map->value_size);
785 memset(kvalue, 0, map->value_size);
788 mutex_unlock(&st_map->lock);
792 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
794 enum bpf_struct_ops_state prev_state;
795 struct bpf_struct_ops_map *st_map;
797 st_map = (struct bpf_struct_ops_map *)map;
798 if (st_map->map.map_flags & BPF_F_LINK)
801 prev_state = cmpxchg(&st_map->kvalue.common.state,
802 BPF_STRUCT_OPS_STATE_INUSE,
803 BPF_STRUCT_OPS_STATE_TOBEFREE);
804 switch (prev_state) {
805 case BPF_STRUCT_OPS_STATE_INUSE:
806 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
809 case BPF_STRUCT_OPS_STATE_TOBEFREE:
811 case BPF_STRUCT_OPS_STATE_INIT:
815 /* Should never happen. Treat it as not found. */
820 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
823 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
827 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
831 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
833 btf_type_seq_show(st_map->btf,
834 map->btf_vmlinux_value_type_id,
842 static void __bpf_struct_ops_map_free(struct bpf_map *map)
844 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
847 bpf_struct_ops_map_put_progs(st_map);
848 bpf_map_area_free(st_map->links);
849 bpf_struct_ops_map_free_image(st_map);
850 bpf_map_area_free(st_map->uvalue);
851 bpf_map_area_free(st_map);
854 static void bpf_struct_ops_map_free(struct bpf_map *map)
856 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
858 /* st_ops->owner was acquired during map_alloc to implicitly holds
859 * the btf's refcnt. The acquire was only done when btf_is_module()
860 * st_map->btf cannot be NULL here.
862 if (btf_is_module(st_map->btf))
863 module_put(st_map->st_ops_desc->st_ops->owner);
865 /* The struct_ops's function may switch to another struct_ops.
867 * For example, bpf_tcp_cc_x->init() may switch to
868 * another tcp_cc_y by calling
869 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
870 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
871 * and its refcount may reach 0 which then free its
872 * trampoline image while tcp_cc_x is still running.
874 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
875 * to finish. bpf-tcp-cc prog is non sleepable.
876 * A rcu_tasks gp is to wait for the last few insn
877 * in the tramopline image to finish before releasing
878 * the trampoline image.
880 synchronize_rcu_mult(call_rcu, call_rcu_tasks);
882 __bpf_struct_ops_map_free(map);
885 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
887 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
888 (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
889 !attr->btf_vmlinux_value_type_id)
894 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
896 const struct bpf_struct_ops_desc *st_ops_desc;
898 struct bpf_struct_ops_map *st_map;
899 const struct btf_type *t, *vt;
900 struct module *mod = NULL;
905 if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
906 /* The map holds btf for its whole life time. */
907 btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
909 return ERR_CAST(btf);
910 if (!btf_is_module(btf)) {
912 return ERR_PTR(-EINVAL);
915 mod = btf_try_get_module(btf);
916 /* mod holds a refcnt to btf. We don't need an extra refcnt
921 return ERR_PTR(-EINVAL);
923 btf = bpf_get_btf_vmlinux();
925 return ERR_CAST(btf);
927 return ERR_PTR(-ENOTSUPP);
930 st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
936 vt = st_ops_desc->value_type;
937 if (attr->value_size != vt->size) {
942 t = st_ops_desc->type;
944 st_map_size = sizeof(*st_map) +
946 * struct bpf_struct_ops_tcp_congestions_ops
948 (vt->size - sizeof(struct bpf_struct_ops_value));
950 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
956 st_map->st_ops_desc = st_ops_desc;
959 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
960 st_map->links_cnt = btf_type_vlen(t);
962 bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *),
964 if (!st_map->uvalue || !st_map->links) {
970 mutex_init(&st_map->lock);
971 bpf_map_init_from_attr(map, attr);
976 __bpf_struct_ops_map_free(map);
983 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
985 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
986 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
987 const struct btf_type *vt = st_ops_desc->value_type;
990 usage = sizeof(*st_map) +
991 vt->size - sizeof(struct bpf_struct_ops_value);
993 usage += btf_type_vlen(vt) * sizeof(struct bpf_links *);
998 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
999 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1000 .map_alloc_check = bpf_struct_ops_map_alloc_check,
1001 .map_alloc = bpf_struct_ops_map_alloc,
1002 .map_free = bpf_struct_ops_map_free,
1003 .map_get_next_key = bpf_struct_ops_map_get_next_key,
1004 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1005 .map_delete_elem = bpf_struct_ops_map_delete_elem,
1006 .map_update_elem = bpf_struct_ops_map_update_elem,
1007 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1008 .map_mem_usage = bpf_struct_ops_map_mem_usage,
1009 .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1012 /* "const void *" because some subsystem is
1013 * passing a const (e.g. const struct tcp_congestion_ops *)
1015 bool bpf_struct_ops_get(const void *kdata)
1017 struct bpf_struct_ops_value *kvalue;
1018 struct bpf_struct_ops_map *st_map;
1019 struct bpf_map *map;
1021 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1022 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1024 map = __bpf_map_inc_not_zero(&st_map->map, false);
1025 return !IS_ERR(map);
1028 void bpf_struct_ops_put(const void *kdata)
1030 struct bpf_struct_ops_value *kvalue;
1031 struct bpf_struct_ops_map *st_map;
1033 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1034 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1036 bpf_map_put(&st_map->map);
1039 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1041 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1043 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1044 map->map_flags & BPF_F_LINK &&
1045 /* Pair with smp_store_release() during map_update */
1046 smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1049 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1051 struct bpf_struct_ops_link *st_link;
1052 struct bpf_struct_ops_map *st_map;
1054 st_link = container_of(link, struct bpf_struct_ops_link, link);
1055 st_map = (struct bpf_struct_ops_map *)
1056 rcu_dereference_protected(st_link->map, true);
1058 /* st_link->map can be NULL if
1059 * bpf_struct_ops_link_create() fails to register.
1061 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
1062 bpf_map_put(&st_map->map);
1067 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1068 struct seq_file *seq)
1070 struct bpf_struct_ops_link *st_link;
1071 struct bpf_map *map;
1073 st_link = container_of(link, struct bpf_struct_ops_link, link);
1075 map = rcu_dereference(st_link->map);
1076 seq_printf(seq, "map_id:\t%d\n", map->id);
1080 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1081 struct bpf_link_info *info)
1083 struct bpf_struct_ops_link *st_link;
1084 struct bpf_map *map;
1086 st_link = container_of(link, struct bpf_struct_ops_link, link);
1088 map = rcu_dereference(st_link->map);
1089 info->struct_ops.map_id = map->id;
1094 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1095 struct bpf_map *expected_old_map)
1097 struct bpf_struct_ops_map *st_map, *old_st_map;
1098 struct bpf_map *old_map;
1099 struct bpf_struct_ops_link *st_link;
1102 st_link = container_of(link, struct bpf_struct_ops_link, link);
1103 st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1105 if (!bpf_struct_ops_valid_to_reg(new_map))
1108 if (!st_map->st_ops_desc->st_ops->update)
1111 mutex_lock(&update_mutex);
1113 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1114 if (expected_old_map && old_map != expected_old_map) {
1119 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1120 /* The new and old struct_ops must be the same type. */
1121 if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1126 err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
1130 bpf_map_inc(new_map);
1131 rcu_assign_pointer(st_link->map, new_map);
1132 bpf_map_put(old_map);
1135 mutex_unlock(&update_mutex);
1140 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1141 .dealloc = bpf_struct_ops_map_link_dealloc,
1142 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1143 .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1144 .update_map = bpf_struct_ops_map_link_update,
1147 int bpf_struct_ops_link_create(union bpf_attr *attr)
1149 struct bpf_struct_ops_link *link = NULL;
1150 struct bpf_link_primer link_primer;
1151 struct bpf_struct_ops_map *st_map;
1152 struct bpf_map *map;
1155 map = bpf_map_get(attr->link_create.map_fd);
1157 return PTR_ERR(map);
1159 st_map = (struct bpf_struct_ops_map *)map;
1161 if (!bpf_struct_ops_valid_to_reg(map)) {
1166 link = kzalloc(sizeof(*link), GFP_USER);
1171 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
1173 err = bpf_link_prime(&link->link, &link_primer);
1177 err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data);
1179 bpf_link_cleanup(&link_primer);
1183 RCU_INIT_POINTER(link->map, map);
1185 return bpf_link_settle(&link_primer);
1193 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1195 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1197 info->btf_vmlinux_id = btf_obj_id(st_map->btf);