1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/bpf_sk_storage.h>
13 #include <uapi/linux/sock_diag.h>
14 #include <uapi/linux/btf.h>
16 DEFINE_BPF_STORAGE_CACHE(sk_cache);
18 static int omem_charge(struct sock *sk, unsigned int size)
20 /* same check as in sock_kmalloc() */
21 if (size <= sysctl_optmem_max &&
22 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
23 atomic_add(size, &sk->sk_omem_alloc);
30 static struct bpf_local_storage_data *
31 sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
33 struct bpf_local_storage *sk_storage;
34 struct bpf_local_storage_map *smap;
36 sk_storage = rcu_dereference(sk->sk_bpf_storage);
40 smap = (struct bpf_local_storage_map *)map;
41 return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
44 static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
46 struct bpf_local_storage_data *sdata;
48 sdata = sk_storage_lookup(sk, map, false);
52 bpf_selem_unlink(SELEM(sdata));
57 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
58 void bpf_sk_storage_free(struct sock *sk)
60 struct bpf_local_storage_elem *selem;
61 struct bpf_local_storage *sk_storage;
62 bool free_sk_storage = false;
66 sk_storage = rcu_dereference(sk->sk_bpf_storage);
72 /* Netiher the bpf_prog nor the bpf-map's syscall
73 * could be modifying the sk_storage->list now.
74 * Thus, no elem can be added-to or deleted-from the
75 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
77 * It is racing with bpf_local_storage_map_free() alone
78 * when unlinking elem from the sk_storage->list and
79 * the map's bucket->list.
81 raw_spin_lock_bh(&sk_storage->lock);
82 hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
83 /* Always unlink from map before unlinking from
86 bpf_selem_unlink_map(selem);
87 free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
90 raw_spin_unlock_bh(&sk_storage->lock);
94 kfree_rcu(sk_storage, rcu);
97 static void sk_storage_map_free(struct bpf_map *map)
99 struct bpf_local_storage_map *smap;
101 smap = (struct bpf_local_storage_map *)map;
102 bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
103 bpf_local_storage_map_free(smap);
106 static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr)
108 struct bpf_local_storage_map *smap;
110 smap = bpf_local_storage_map_alloc(attr);
112 return ERR_CAST(smap);
114 smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
118 static int notsupp_get_next_key(struct bpf_map *map, void *key,
124 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
126 struct bpf_local_storage_data *sdata;
131 sock = sockfd_lookup(fd, &err);
133 sdata = sk_storage_lookup(sock->sk, map, true);
135 return sdata ? sdata->data : NULL;
141 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
142 void *value, u64 map_flags)
144 struct bpf_local_storage_data *sdata;
149 sock = sockfd_lookup(fd, &err);
151 sdata = bpf_local_storage_update(
152 sock->sk, (struct bpf_local_storage_map *)map, value,
155 return PTR_ERR_OR_ZERO(sdata);
161 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
167 sock = sockfd_lookup(fd, &err);
169 err = sk_storage_delete(sock->sk, map);
177 static struct bpf_local_storage_elem *
178 bpf_sk_storage_clone_elem(struct sock *newsk,
179 struct bpf_local_storage_map *smap,
180 struct bpf_local_storage_elem *selem)
182 struct bpf_local_storage_elem *copy_selem;
184 copy_selem = bpf_selem_alloc(smap, newsk, NULL, true);
188 if (map_value_has_spin_lock(&smap->map))
189 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
190 SDATA(selem)->data, true);
192 copy_map_value(&smap->map, SDATA(copy_selem)->data,
198 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
200 struct bpf_local_storage *new_sk_storage = NULL;
201 struct bpf_local_storage *sk_storage;
202 struct bpf_local_storage_elem *selem;
205 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
208 sk_storage = rcu_dereference(sk->sk_bpf_storage);
210 if (!sk_storage || hlist_empty(&sk_storage->list))
213 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
214 struct bpf_local_storage_elem *copy_selem;
215 struct bpf_local_storage_map *smap;
218 smap = rcu_dereference(SDATA(selem)->smap);
219 if (!(smap->map.map_flags & BPF_F_CLONE))
222 /* Note that for lockless listeners adding new element
223 * here can race with cleanup in bpf_local_storage_map_free.
224 * Try to grab map refcnt to make sure that it's still
225 * alive and prevent concurrent removal.
227 map = bpf_map_inc_not_zero(&smap->map);
231 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
238 if (new_sk_storage) {
239 bpf_selem_link_map(smap, copy_selem);
240 bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
242 ret = bpf_local_storage_alloc(newsk, smap, copy_selem);
245 atomic_sub(smap->elem_size,
246 &newsk->sk_omem_alloc);
252 rcu_dereference(copy_selem->local_storage);
260 /* In case of an error, don't free anything explicitly here, the
261 * caller is responsible to call bpf_sk_storage_free.
267 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
268 void *, value, u64, flags)
270 struct bpf_local_storage_data *sdata;
272 if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
273 return (unsigned long)NULL;
275 sdata = sk_storage_lookup(sk, map, true);
277 return (unsigned long)sdata->data;
279 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
280 /* Cannot add new elem to a going away sk.
281 * Otherwise, the new elem may become a leak
282 * (and also other memory issues during map
285 refcount_inc_not_zero(&sk->sk_refcnt)) {
286 sdata = bpf_local_storage_update(
287 sk, (struct bpf_local_storage_map *)map, value,
289 /* sk must be a fullsock (guaranteed by verifier),
290 * so sock_gen_put() is unnecessary.
293 return IS_ERR(sdata) ?
294 (unsigned long)NULL : (unsigned long)sdata->data;
297 return (unsigned long)NULL;
300 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
302 if (!sk || !sk_fullsock(sk))
305 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
308 err = sk_storage_delete(sk, map);
316 static int sk_storage_charge(struct bpf_local_storage_map *smap,
317 void *owner, u32 size)
319 return omem_charge(owner, size);
322 static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
323 void *owner, u32 size)
325 struct sock *sk = owner;
327 atomic_sub(size, &sk->sk_omem_alloc);
330 static struct bpf_local_storage __rcu **
331 sk_storage_ptr(void *owner)
333 struct sock *sk = owner;
335 return &sk->sk_bpf_storage;
338 static int sk_storage_map_btf_id;
339 const struct bpf_map_ops sk_storage_map_ops = {
340 .map_meta_equal = bpf_map_meta_equal,
341 .map_alloc_check = bpf_local_storage_map_alloc_check,
342 .map_alloc = sk_storage_map_alloc,
343 .map_free = sk_storage_map_free,
344 .map_get_next_key = notsupp_get_next_key,
345 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
346 .map_update_elem = bpf_fd_sk_storage_update_elem,
347 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
348 .map_check_btf = bpf_local_storage_map_check_btf,
349 .map_btf_name = "bpf_local_storage_map",
350 .map_btf_id = &sk_storage_map_btf_id,
351 .map_local_storage_charge = sk_storage_charge,
352 .map_local_storage_uncharge = sk_storage_uncharge,
353 .map_owner_storage_ptr = sk_storage_ptr,
356 const struct bpf_func_proto bpf_sk_storage_get_proto = {
357 .func = bpf_sk_storage_get,
359 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
360 .arg1_type = ARG_CONST_MAP_PTR,
361 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
362 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
363 .arg4_type = ARG_ANYTHING,
366 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
367 .func = bpf_sk_storage_get,
369 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
370 .arg1_type = ARG_CONST_MAP_PTR,
371 .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
372 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
373 .arg4_type = ARG_ANYTHING,
376 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
377 .func = bpf_sk_storage_delete,
379 .ret_type = RET_INTEGER,
380 .arg1_type = ARG_CONST_MAP_PTR,
381 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
384 struct bpf_sk_storage_diag {
386 struct bpf_map *maps[];
389 /* The reply will be like:
390 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
391 * SK_DIAG_BPF_STORAGE (nla_nest)
392 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
393 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
394 * SK_DIAG_BPF_STORAGE (nla_nest)
395 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
396 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
399 static int nla_value_size(u32 value_size)
401 /* SK_DIAG_BPF_STORAGE (nla_nest)
402 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
403 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
405 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
406 nla_total_size_64bit(value_size);
409 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
416 for (i = 0; i < diag->nr_maps; i++)
417 bpf_map_put(diag->maps[i]);
421 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
423 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
424 const struct bpf_map *map)
428 for (i = 0; i < diag->nr_maps; i++) {
429 if (diag->maps[i] == map)
436 struct bpf_sk_storage_diag *
437 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
439 struct bpf_sk_storage_diag *diag;
444 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
445 * the map_alloc_check() side also does.
448 return ERR_PTR(-EPERM);
450 nla_for_each_nested(nla, nla_stgs, rem) {
451 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
455 diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
458 return ERR_PTR(-ENOMEM);
460 nla_for_each_nested(nla, nla_stgs, rem) {
464 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
467 map_fd = nla_get_u32(nla);
468 map = bpf_map_get(map_fd);
473 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
478 if (diag_check_dup(diag, map)) {
483 diag->maps[diag->nr_maps++] = map;
489 bpf_sk_storage_diag_free(diag);
492 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
494 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
496 struct nlattr *nla_stg, *nla_value;
497 struct bpf_local_storage_map *smap;
499 /* It cannot exceed max nlattr's payload */
500 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
502 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
506 smap = rcu_dereference(sdata->smap);
507 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
510 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
511 smap->map.value_size,
512 SK_DIAG_BPF_STORAGE_PAD);
516 if (map_value_has_spin_lock(&smap->map))
517 copy_map_value_locked(&smap->map, nla_data(nla_value),
520 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
522 nla_nest_end(skb, nla_stg);
526 nla_nest_cancel(skb, nla_stg);
530 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
532 unsigned int *res_diag_size)
534 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
535 unsigned int diag_size = nla_total_size(0);
536 struct bpf_local_storage *sk_storage;
537 struct bpf_local_storage_elem *selem;
538 struct bpf_local_storage_map *smap;
539 struct nlattr *nla_stgs;
540 unsigned int saved_len;
545 sk_storage = rcu_dereference(sk->sk_bpf_storage);
546 if (!sk_storage || hlist_empty(&sk_storage->list)) {
551 nla_stgs = nla_nest_start(skb, stg_array_type);
553 /* Continue to learn diag_size */
556 saved_len = skb->len;
557 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
558 smap = rcu_dereference(SDATA(selem)->smap);
559 diag_size += nla_value_size(smap->map.value_size);
561 if (nla_stgs && diag_get(SDATA(selem), skb))
562 /* Continue to learn diag_size */
569 if (saved_len == skb->len)
570 nla_nest_cancel(skb, nla_stgs);
572 nla_nest_end(skb, nla_stgs);
575 if (diag_size == nla_total_size(0)) {
580 *res_diag_size = diag_size;
584 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
585 struct sock *sk, struct sk_buff *skb,
587 unsigned int *res_diag_size)
589 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
590 unsigned int diag_size = nla_total_size(0);
591 struct bpf_local_storage *sk_storage;
592 struct bpf_local_storage_data *sdata;
593 struct nlattr *nla_stgs;
594 unsigned int saved_len;
600 /* No map has been specified. Dump all. */
602 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
606 sk_storage = rcu_dereference(sk->sk_bpf_storage);
607 if (!sk_storage || hlist_empty(&sk_storage->list)) {
612 nla_stgs = nla_nest_start(skb, stg_array_type);
614 /* Continue to learn diag_size */
617 saved_len = skb->len;
618 for (i = 0; i < diag->nr_maps; i++) {
619 sdata = bpf_local_storage_lookup(sk_storage,
620 (struct bpf_local_storage_map *)diag->maps[i],
626 diag_size += nla_value_size(diag->maps[i]->value_size);
628 if (nla_stgs && diag_get(sdata, skb))
629 /* Continue to learn diag_size */
635 if (saved_len == skb->len)
636 nla_nest_cancel(skb, nla_stgs);
638 nla_nest_end(skb, nla_stgs);
641 if (diag_size == nla_total_size(0)) {
646 *res_diag_size = diag_size;
649 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
651 struct bpf_iter_seq_sk_storage_map_info {
653 unsigned int bucket_id;
657 static struct bpf_local_storage_elem *
658 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
659 struct bpf_local_storage_elem *prev_selem)
660 __acquires(RCU) __releases(RCU)
662 struct bpf_local_storage *sk_storage;
663 struct bpf_local_storage_elem *selem;
664 u32 skip_elems = info->skip_elems;
665 struct bpf_local_storage_map *smap;
666 u32 bucket_id = info->bucket_id;
667 u32 i, count, n_buckets;
668 struct bpf_local_storage_map_bucket *b;
670 smap = (struct bpf_local_storage_map *)info->map;
671 n_buckets = 1U << smap->bucket_log;
672 if (bucket_id >= n_buckets)
675 /* try to find next selem in the same bucket */
679 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
680 struct bpf_local_storage_elem, map_node);
682 /* not found, unlock and go to the next bucket */
683 b = &smap->buckets[bucket_id++];
688 sk_storage = rcu_dereference(selem->local_storage);
690 info->skip_elems = skip_elems + count;
696 for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
697 b = &smap->buckets[i];
700 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
701 sk_storage = rcu_dereference(selem->local_storage);
702 if (sk_storage && count >= skip_elems) {
704 info->skip_elems = count;
714 info->skip_elems = 0;
718 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
720 struct bpf_local_storage_elem *selem;
722 selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
731 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
734 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
738 return bpf_sk_storage_map_seq_find_next(seq->private, v);
741 struct bpf_iter__bpf_sk_storage_map {
742 __bpf_md_ptr(struct bpf_iter_meta *, meta);
743 __bpf_md_ptr(struct bpf_map *, map);
744 __bpf_md_ptr(struct sock *, sk);
745 __bpf_md_ptr(void *, value);
748 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
749 struct bpf_map *map, struct sock *sk,
752 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
753 struct bpf_local_storage_elem *selem)
755 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
756 struct bpf_iter__bpf_sk_storage_map ctx = {};
757 struct bpf_local_storage *sk_storage;
758 struct bpf_iter_meta meta;
759 struct bpf_prog *prog;
763 prog = bpf_iter_get_info(&meta, selem == NULL);
768 sk_storage = rcu_dereference(selem->local_storage);
769 ctx.sk = sk_storage->owner;
770 ctx.value = SDATA(selem)->data;
772 ret = bpf_iter_run_prog(prog, &ctx);
778 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
780 return __bpf_sk_storage_map_seq_show(seq, v);
783 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
787 (void)__bpf_sk_storage_map_seq_show(seq, v);
792 static int bpf_iter_init_sk_storage_map(void *priv_data,
793 struct bpf_iter_aux_info *aux)
795 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
797 seq_info->map = aux->map;
801 static int bpf_iter_attach_map(struct bpf_prog *prog,
802 union bpf_iter_link_info *linfo,
803 struct bpf_iter_aux_info *aux)
808 if (!linfo->map.map_fd)
811 map = bpf_map_get_with_uref(linfo->map.map_fd);
815 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
818 if (prog->aux->max_rdonly_access > map->value_size) {
827 bpf_map_put_with_uref(map);
831 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
833 bpf_map_put_with_uref(aux->map);
836 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
837 .start = bpf_sk_storage_map_seq_start,
838 .next = bpf_sk_storage_map_seq_next,
839 .stop = bpf_sk_storage_map_seq_stop,
840 .show = bpf_sk_storage_map_seq_show,
843 static const struct bpf_iter_seq_info iter_seq_info = {
844 .seq_ops = &bpf_sk_storage_map_seq_ops,
845 .init_seq_private = bpf_iter_init_sk_storage_map,
846 .fini_seq_private = NULL,
847 .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
850 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
851 .target = "bpf_sk_storage_map",
852 .attach_target = bpf_iter_attach_map,
853 .detach_target = bpf_iter_detach_map,
854 .show_fdinfo = bpf_iter_map_show_fdinfo,
855 .fill_link_info = bpf_iter_map_fill_link_info,
856 .ctx_arg_info_size = 2,
858 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
859 PTR_TO_BTF_ID_OR_NULL },
860 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
861 PTR_TO_RDWR_BUF_OR_NULL },
863 .seq_info = &iter_seq_info,
866 static int __init bpf_sk_storage_map_iter_init(void)
868 bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
869 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
870 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
872 late_initcall(bpf_sk_storage_map_iter_init);