1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
9 #include <net/bpf_sk_storage.h>
11 #include <uapi/linux/sock_diag.h>
12 #include <uapi/linux/btf.h>
14 #define SK_STORAGE_CREATE_FLAG_MASK \
15 (BPF_F_NO_PREALLOC | BPF_F_CLONE)
18 struct hlist_head list;
22 /* Thp map is not the primary owner of a bpf_sk_storage_elem.
23 * Instead, the sk->sk_bpf_storage is.
25 * The map (bpf_sk_storage_map) is for two purposes
26 * 1. Define the size of the "sk local storage". It is
27 * the map's value_size.
29 * 2. Maintain a list to keep track of all elems such
30 * that they can be cleaned up during the map destruction.
32 * When a bpf local storage is being looked up for a
33 * particular sk, the "bpf_map" pointer is actually used
34 * as the "key" to search in the list of elem in
37 * Hence, consider sk->sk_bpf_storage is the mini-map
38 * with the "bpf_map" pointer as the searching key.
40 struct bpf_sk_storage_map {
42 /* Lookup elem does not require accessing the map.
44 * Updating/Deleting requires a bucket lock to
45 * link/unlink the elem from the map. Having
46 * multiple buckets to improve contention.
48 struct bucket *buckets;
54 struct bpf_sk_storage_data {
55 /* smap is used as the searching key when looking up
56 * from sk->sk_bpf_storage.
58 * Put it in the same cacheline as the data to minimize
59 * the number of cachelines access during the cache hit case.
61 struct bpf_sk_storage_map __rcu *smap;
62 u8 data[] __aligned(8);
65 /* Linked to bpf_sk_storage and bpf_sk_storage_map */
66 struct bpf_sk_storage_elem {
67 struct hlist_node map_node; /* Linked to bpf_sk_storage_map */
68 struct hlist_node snode; /* Linked to bpf_sk_storage */
69 struct bpf_sk_storage __rcu *sk_storage;
72 /* The data is stored in aother cacheline to minimize
73 * the number of cachelines access during a cache hit.
75 struct bpf_sk_storage_data sdata ____cacheline_aligned;
78 #define SELEM(_SDATA) container_of((_SDATA), struct bpf_sk_storage_elem, sdata)
79 #define SDATA(_SELEM) (&(_SELEM)->sdata)
80 #define BPF_SK_STORAGE_CACHE_SIZE 16
82 static DEFINE_SPINLOCK(cache_idx_lock);
83 static u64 cache_idx_usage_counts[BPF_SK_STORAGE_CACHE_SIZE];
85 struct bpf_sk_storage {
86 struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE];
87 struct hlist_head list; /* List of bpf_sk_storage_elem */
88 struct sock *sk; /* The sk that owns the the above "list" of
89 * bpf_sk_storage_elem.
92 raw_spinlock_t lock; /* Protect adding/removing from the "list" */
95 static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
96 struct bpf_sk_storage_elem *selem)
98 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
101 static int omem_charge(struct sock *sk, unsigned int size)
103 /* same check as in sock_kmalloc() */
104 if (size <= sysctl_optmem_max &&
105 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
106 atomic_add(size, &sk->sk_omem_alloc);
113 static bool selem_linked_to_sk(const struct bpf_sk_storage_elem *selem)
115 return !hlist_unhashed(&selem->snode);
118 static bool selem_linked_to_map(const struct bpf_sk_storage_elem *selem)
120 return !hlist_unhashed(&selem->map_node);
123 static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
124 struct sock *sk, void *value,
127 struct bpf_sk_storage_elem *selem;
129 if (charge_omem && omem_charge(sk, smap->elem_size))
132 selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
135 memcpy(SDATA(selem)->data, value, smap->map.value_size);
140 atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
145 /* sk_storage->lock must be held and selem->sk_storage == sk_storage.
146 * The caller must ensure selem->smap is still valid to be
147 * dereferenced for its smap->elem_size and smap->cache_idx.
149 static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
150 struct bpf_sk_storage_elem *selem,
153 struct bpf_sk_storage_map *smap;
154 bool free_sk_storage;
157 smap = rcu_dereference(SDATA(selem)->smap);
160 /* All uncharging on sk->sk_omem_alloc must be done first.
161 * sk may be freed once the last selem is unlinked from sk_storage.
164 atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
166 free_sk_storage = hlist_is_singular_node(&selem->snode,
168 if (free_sk_storage) {
169 atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
170 sk_storage->sk = NULL;
171 /* After this RCU_INIT, sk may be freed and cannot be used */
172 RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
174 /* sk_storage is not freed now. sk_storage->lock is
175 * still held and raw_spin_unlock_bh(&sk_storage->lock)
176 * will be done by the caller.
178 * Although the unlock will be done under
179 * rcu_read_lock(), it is more intutivie to
180 * read if kfree_rcu(sk_storage, rcu) is done
181 * after the raw_spin_unlock_bh(&sk_storage->lock).
183 * Hence, a "bool free_sk_storage" is returned
184 * to the caller which then calls the kfree_rcu()
188 hlist_del_init_rcu(&selem->snode);
189 if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
191 RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
193 kfree_rcu(selem, rcu);
195 return free_sk_storage;
198 static void selem_unlink_sk(struct bpf_sk_storage_elem *selem)
200 struct bpf_sk_storage *sk_storage;
201 bool free_sk_storage = false;
203 if (unlikely(!selem_linked_to_sk(selem)))
204 /* selem has already been unlinked from sk */
207 sk_storage = rcu_dereference(selem->sk_storage);
208 raw_spin_lock_bh(&sk_storage->lock);
209 if (likely(selem_linked_to_sk(selem)))
210 free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
211 raw_spin_unlock_bh(&sk_storage->lock);
214 kfree_rcu(sk_storage, rcu);
217 static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
218 struct bpf_sk_storage_elem *selem)
220 RCU_INIT_POINTER(selem->sk_storage, sk_storage);
221 hlist_add_head(&selem->snode, &sk_storage->list);
224 static void selem_unlink_map(struct bpf_sk_storage_elem *selem)
226 struct bpf_sk_storage_map *smap;
229 if (unlikely(!selem_linked_to_map(selem)))
230 /* selem has already be unlinked from smap */
233 smap = rcu_dereference(SDATA(selem)->smap);
234 b = select_bucket(smap, selem);
235 raw_spin_lock_bh(&b->lock);
236 if (likely(selem_linked_to_map(selem)))
237 hlist_del_init_rcu(&selem->map_node);
238 raw_spin_unlock_bh(&b->lock);
241 static void selem_link_map(struct bpf_sk_storage_map *smap,
242 struct bpf_sk_storage_elem *selem)
244 struct bucket *b = select_bucket(smap, selem);
246 raw_spin_lock_bh(&b->lock);
247 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
248 hlist_add_head_rcu(&selem->map_node, &b->list);
249 raw_spin_unlock_bh(&b->lock);
252 static void selem_unlink(struct bpf_sk_storage_elem *selem)
254 /* Always unlink from map before unlinking from sk_storage
255 * because selem will be freed after successfully unlinked from
258 selem_unlink_map(selem);
259 selem_unlink_sk(selem);
262 static struct bpf_sk_storage_data *
263 __sk_storage_lookup(struct bpf_sk_storage *sk_storage,
264 struct bpf_sk_storage_map *smap,
267 struct bpf_sk_storage_data *sdata;
268 struct bpf_sk_storage_elem *selem;
270 /* Fast path (cache hit) */
271 sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]);
272 if (sdata && rcu_access_pointer(sdata->smap) == smap)
275 /* Slow path (cache miss) */
276 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode)
277 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
283 sdata = SDATA(selem);
284 if (cacheit_lockit) {
285 /* spinlock is needed to avoid racing with the
286 * parallel delete. Otherwise, publishing an already
287 * deleted sdata to the cache will become a use-after-free
288 * problem in the next __sk_storage_lookup().
290 raw_spin_lock_bh(&sk_storage->lock);
291 if (selem_linked_to_sk(selem))
292 rcu_assign_pointer(sk_storage->cache[smap->cache_idx],
294 raw_spin_unlock_bh(&sk_storage->lock);
300 static struct bpf_sk_storage_data *
301 sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
303 struct bpf_sk_storage *sk_storage;
304 struct bpf_sk_storage_map *smap;
306 sk_storage = rcu_dereference(sk->sk_bpf_storage);
310 smap = (struct bpf_sk_storage_map *)map;
311 return __sk_storage_lookup(sk_storage, smap, cacheit_lockit);
314 static int check_flags(const struct bpf_sk_storage_data *old_sdata,
317 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
318 /* elem already exists */
321 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
322 /* elem doesn't exist, cannot update it */
328 static int sk_storage_alloc(struct sock *sk,
329 struct bpf_sk_storage_map *smap,
330 struct bpf_sk_storage_elem *first_selem)
332 struct bpf_sk_storage *prev_sk_storage, *sk_storage;
335 err = omem_charge(sk, sizeof(*sk_storage));
339 sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN);
344 INIT_HLIST_HEAD(&sk_storage->list);
345 raw_spin_lock_init(&sk_storage->lock);
348 __selem_link_sk(sk_storage, first_selem);
349 selem_link_map(smap, first_selem);
350 /* Publish sk_storage to sk. sk->sk_lock cannot be acquired.
351 * Hence, atomic ops is used to set sk->sk_bpf_storage
352 * from NULL to the newly allocated sk_storage ptr.
354 * From now on, the sk->sk_bpf_storage pointer is protected
355 * by the sk_storage->lock. Hence, when freeing
356 * the sk->sk_bpf_storage, the sk_storage->lock must
357 * be held before setting sk->sk_bpf_storage to NULL.
359 prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage,
361 if (unlikely(prev_sk_storage)) {
362 selem_unlink_map(first_selem);
366 /* Note that even first_selem was linked to smap's
367 * bucket->list, first_selem can be freed immediately
368 * (instead of kfree_rcu) because
369 * bpf_sk_storage_map_free() does a
370 * synchronize_rcu() before walking the bucket->list.
371 * Hence, no one is accessing selem from the
372 * bucket->list under rcu_read_lock().
380 atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc);
384 /* sk cannot be going away because it is linking new elem
385 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
386 * Otherwise, it will become a leak (and other memory issues
387 * during map destruction).
389 static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk,
394 struct bpf_sk_storage_data *old_sdata = NULL;
395 struct bpf_sk_storage_elem *selem;
396 struct bpf_sk_storage *sk_storage;
397 struct bpf_sk_storage_map *smap;
400 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
401 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
402 /* BPF_F_LOCK can only be used in a value with spin_lock */
403 unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
404 return ERR_PTR(-EINVAL);
406 smap = (struct bpf_sk_storage_map *)map;
407 sk_storage = rcu_dereference(sk->sk_bpf_storage);
408 if (!sk_storage || hlist_empty(&sk_storage->list)) {
409 /* Very first elem for this sk */
410 err = check_flags(NULL, map_flags);
414 selem = selem_alloc(smap, sk, value, true);
416 return ERR_PTR(-ENOMEM);
418 err = sk_storage_alloc(sk, smap, selem);
421 atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
428 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
429 /* Hoping to find an old_sdata to do inline update
430 * such that it can avoid taking the sk_storage->lock
431 * and changing the lists.
433 old_sdata = __sk_storage_lookup(sk_storage, smap, false);
434 err = check_flags(old_sdata, map_flags);
437 if (old_sdata && selem_linked_to_sk(SELEM(old_sdata))) {
438 copy_map_value_locked(map, old_sdata->data,
444 raw_spin_lock_bh(&sk_storage->lock);
446 /* Recheck sk_storage->list under sk_storage->lock */
447 if (unlikely(hlist_empty(&sk_storage->list))) {
448 /* A parallel del is happening and sk_storage is going
449 * away. It has just been checked before, so very
450 * unlikely. Return instead of retry to keep things
457 old_sdata = __sk_storage_lookup(sk_storage, smap, false);
458 err = check_flags(old_sdata, map_flags);
462 if (old_sdata && (map_flags & BPF_F_LOCK)) {
463 copy_map_value_locked(map, old_sdata->data, value, false);
464 selem = SELEM(old_sdata);
468 /* sk_storage->lock is held. Hence, we are sure
469 * we can unlink and uncharge the old_sdata successfully
470 * later. Hence, instead of charging the new selem now
471 * and then uncharge the old selem later (which may cause
472 * a potential but unnecessary charge failure), avoid taking
473 * a charge at all here (the "!old_sdata" check) and the
474 * old_sdata will not be uncharged later during __selem_unlink_sk().
476 selem = selem_alloc(smap, sk, value, !old_sdata);
482 /* First, link the new selem to the map */
483 selem_link_map(smap, selem);
485 /* Second, link (and publish) the new selem to sk_storage */
486 __selem_link_sk(sk_storage, selem);
488 /* Third, remove old selem, SELEM(old_sdata) */
490 selem_unlink_map(SELEM(old_sdata));
491 __selem_unlink_sk(sk_storage, SELEM(old_sdata), false);
495 raw_spin_unlock_bh(&sk_storage->lock);
499 raw_spin_unlock_bh(&sk_storage->lock);
503 static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
505 struct bpf_sk_storage_data *sdata;
507 sdata = sk_storage_lookup(sk, map, false);
511 selem_unlink(SELEM(sdata));
516 static u16 cache_idx_get(void)
518 u64 min_usage = U64_MAX;
521 spin_lock(&cache_idx_lock);
523 for (i = 0; i < BPF_SK_STORAGE_CACHE_SIZE; i++) {
524 if (cache_idx_usage_counts[i] < min_usage) {
525 min_usage = cache_idx_usage_counts[i];
528 /* Found a free cache_idx */
533 cache_idx_usage_counts[res]++;
535 spin_unlock(&cache_idx_lock);
540 static void cache_idx_free(u16 idx)
542 spin_lock(&cache_idx_lock);
543 cache_idx_usage_counts[idx]--;
544 spin_unlock(&cache_idx_lock);
547 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
548 void bpf_sk_storage_free(struct sock *sk)
550 struct bpf_sk_storage_elem *selem;
551 struct bpf_sk_storage *sk_storage;
552 bool free_sk_storage = false;
553 struct hlist_node *n;
556 sk_storage = rcu_dereference(sk->sk_bpf_storage);
562 /* Netiher the bpf_prog nor the bpf-map's syscall
563 * could be modifying the sk_storage->list now.
564 * Thus, no elem can be added-to or deleted-from the
565 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
567 * It is racing with bpf_sk_storage_map_free() alone
568 * when unlinking elem from the sk_storage->list and
569 * the map's bucket->list.
571 raw_spin_lock_bh(&sk_storage->lock);
572 hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
573 /* Always unlink from map before unlinking from
576 selem_unlink_map(selem);
577 free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
579 raw_spin_unlock_bh(&sk_storage->lock);
583 kfree_rcu(sk_storage, rcu);
586 static void bpf_sk_storage_map_free(struct bpf_map *map)
588 struct bpf_sk_storage_elem *selem;
589 struct bpf_sk_storage_map *smap;
593 smap = (struct bpf_sk_storage_map *)map;
595 cache_idx_free(smap->cache_idx);
597 /* Note that this map might be concurrently cloned from
598 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
599 * RCU read section to finish before proceeding. New RCU
600 * read sections should be prevented via bpf_map_inc_not_zero.
604 /* bpf prog and the userspace can no longer access this map
605 * now. No new selem (of this map) can be added
606 * to the sk->sk_bpf_storage or to the map bucket's list.
608 * The elem of this map can be cleaned up here
610 * by bpf_sk_storage_free() during __sk_destruct().
612 for (i = 0; i < (1U << smap->bucket_log); i++) {
613 b = &smap->buckets[i];
616 /* No one is adding to b->list now */
617 while ((selem = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(&b->list)),
618 struct bpf_sk_storage_elem,
626 /* bpf_sk_storage_free() may still need to access the map.
627 * e.g. bpf_sk_storage_free() has unlinked selem from the map
628 * which then made the above while((selem = ...)) loop
629 * exited immediately.
631 * However, the bpf_sk_storage_free() still needs to access
632 * the smap->elem_size to do the uncharging in
633 * __selem_unlink_sk().
635 * Hence, wait another rcu grace period for the
636 * bpf_sk_storage_free() to finish.
640 kvfree(smap->buckets);
644 /* U16_MAX is much more than enough for sk local storage
645 * considering a tcp_sock is ~2k.
647 #define MAX_VALUE_SIZE \
649 (KMALLOC_MAX_SIZE - MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem)), \
650 (U16_MAX - sizeof(struct bpf_sk_storage_elem)))
652 static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
654 if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK ||
655 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
657 attr->key_size != sizeof(int) || !attr->value_size ||
658 /* Enforce BTF for userspace sk dumping */
659 !attr->btf_key_type_id || !attr->btf_value_type_id)
665 if (attr->value_size > MAX_VALUE_SIZE)
671 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
673 struct bpf_sk_storage_map *smap;
679 smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
681 return ERR_PTR(-ENOMEM);
682 bpf_map_init_from_attr(&smap->map, attr);
684 nbuckets = roundup_pow_of_two(num_possible_cpus());
685 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
686 nbuckets = max_t(u32, 2, nbuckets);
687 smap->bucket_log = ilog2(nbuckets);
688 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
690 ret = bpf_map_charge_init(&smap->map.memory, cost);
696 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
697 GFP_USER | __GFP_NOWARN);
698 if (!smap->buckets) {
699 bpf_map_charge_finish(&smap->map.memory);
701 return ERR_PTR(-ENOMEM);
704 for (i = 0; i < nbuckets; i++) {
705 INIT_HLIST_HEAD(&smap->buckets[i].list);
706 raw_spin_lock_init(&smap->buckets[i].lock);
709 smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
710 smap->cache_idx = cache_idx_get();
715 static int notsupp_get_next_key(struct bpf_map *map, void *key,
721 static int bpf_sk_storage_map_check_btf(const struct bpf_map *map,
722 const struct btf *btf,
723 const struct btf_type *key_type,
724 const struct btf_type *value_type)
728 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
731 int_data = *(u32 *)(key_type + 1);
732 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
738 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
740 struct bpf_sk_storage_data *sdata;
745 sock = sockfd_lookup(fd, &err);
747 sdata = sk_storage_lookup(sock->sk, map, true);
749 return sdata ? sdata->data : NULL;
755 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
756 void *value, u64 map_flags)
758 struct bpf_sk_storage_data *sdata;
763 sock = sockfd_lookup(fd, &err);
765 sdata = sk_storage_update(sock->sk, map, value, map_flags);
767 return PTR_ERR_OR_ZERO(sdata);
773 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
779 sock = sockfd_lookup(fd, &err);
781 err = sk_storage_delete(sock->sk, map);
789 static struct bpf_sk_storage_elem *
790 bpf_sk_storage_clone_elem(struct sock *newsk,
791 struct bpf_sk_storage_map *smap,
792 struct bpf_sk_storage_elem *selem)
794 struct bpf_sk_storage_elem *copy_selem;
796 copy_selem = selem_alloc(smap, newsk, NULL, true);
800 if (map_value_has_spin_lock(&smap->map))
801 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
802 SDATA(selem)->data, true);
804 copy_map_value(&smap->map, SDATA(copy_selem)->data,
810 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
812 struct bpf_sk_storage *new_sk_storage = NULL;
813 struct bpf_sk_storage *sk_storage;
814 struct bpf_sk_storage_elem *selem;
817 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
820 sk_storage = rcu_dereference(sk->sk_bpf_storage);
822 if (!sk_storage || hlist_empty(&sk_storage->list))
825 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
826 struct bpf_sk_storage_elem *copy_selem;
827 struct bpf_sk_storage_map *smap;
830 smap = rcu_dereference(SDATA(selem)->smap);
831 if (!(smap->map.map_flags & BPF_F_CLONE))
834 /* Note that for lockless listeners adding new element
835 * here can race with cleanup in bpf_sk_storage_map_free.
836 * Try to grab map refcnt to make sure that it's still
837 * alive and prevent concurrent removal.
839 map = bpf_map_inc_not_zero(&smap->map);
843 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
850 if (new_sk_storage) {
851 selem_link_map(smap, copy_selem);
852 __selem_link_sk(new_sk_storage, copy_selem);
854 ret = sk_storage_alloc(newsk, smap, copy_selem);
857 atomic_sub(smap->elem_size,
858 &newsk->sk_omem_alloc);
863 new_sk_storage = rcu_dereference(copy_selem->sk_storage);
871 /* In case of an error, don't free anything explicitly here, the
872 * caller is responsible to call bpf_sk_storage_free.
878 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
879 void *, value, u64, flags)
881 struct bpf_sk_storage_data *sdata;
883 if (flags > BPF_SK_STORAGE_GET_F_CREATE)
884 return (unsigned long)NULL;
886 sdata = sk_storage_lookup(sk, map, true);
888 return (unsigned long)sdata->data;
890 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
891 /* Cannot add new elem to a going away sk.
892 * Otherwise, the new elem may become a leak
893 * (and also other memory issues during map
896 refcount_inc_not_zero(&sk->sk_refcnt)) {
897 sdata = sk_storage_update(sk, map, value, BPF_NOEXIST);
898 /* sk must be a fullsock (guaranteed by verifier),
899 * so sock_gen_put() is unnecessary.
902 return IS_ERR(sdata) ?
903 (unsigned long)NULL : (unsigned long)sdata->data;
906 return (unsigned long)NULL;
909 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
911 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
914 err = sk_storage_delete(sk, map);
922 static int sk_storage_map_btf_id;
923 const struct bpf_map_ops sk_storage_map_ops = {
924 .map_alloc_check = bpf_sk_storage_map_alloc_check,
925 .map_alloc = bpf_sk_storage_map_alloc,
926 .map_free = bpf_sk_storage_map_free,
927 .map_get_next_key = notsupp_get_next_key,
928 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
929 .map_update_elem = bpf_fd_sk_storage_update_elem,
930 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
931 .map_check_btf = bpf_sk_storage_map_check_btf,
932 .map_btf_name = "bpf_sk_storage_map",
933 .map_btf_id = &sk_storage_map_btf_id,
936 const struct bpf_func_proto bpf_sk_storage_get_proto = {
937 .func = bpf_sk_storage_get,
939 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
940 .arg1_type = ARG_CONST_MAP_PTR,
941 .arg2_type = ARG_PTR_TO_SOCKET,
942 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
943 .arg4_type = ARG_ANYTHING,
946 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
947 .func = bpf_sk_storage_delete,
949 .ret_type = RET_INTEGER,
950 .arg1_type = ARG_CONST_MAP_PTR,
951 .arg2_type = ARG_PTR_TO_SOCKET,
954 struct bpf_sk_storage_diag {
956 struct bpf_map *maps[];
959 /* The reply will be like:
960 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
961 * SK_DIAG_BPF_STORAGE (nla_nest)
962 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
963 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
964 * SK_DIAG_BPF_STORAGE (nla_nest)
965 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
966 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
969 static int nla_value_size(u32 value_size)
971 /* SK_DIAG_BPF_STORAGE (nla_nest)
972 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
973 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
975 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
976 nla_total_size_64bit(value_size);
979 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
986 for (i = 0; i < diag->nr_maps; i++)
987 bpf_map_put(diag->maps[i]);
991 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
993 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
994 const struct bpf_map *map)
998 for (i = 0; i < diag->nr_maps; i++) {
999 if (diag->maps[i] == map)
1006 struct bpf_sk_storage_diag *
1007 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
1009 struct bpf_sk_storage_diag *diag;
1014 /* bpf_sk_storage_map is currently limited to CAP_SYS_ADMIN as
1015 * the map_alloc_check() side also does.
1018 return ERR_PTR(-EPERM);
1020 nla_for_each_nested(nla, nla_stgs, rem) {
1021 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
1025 diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
1028 return ERR_PTR(-ENOMEM);
1030 nla_for_each_nested(nla, nla_stgs, rem) {
1031 struct bpf_map *map;
1034 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
1037 map_fd = nla_get_u32(nla);
1038 map = bpf_map_get(map_fd);
1043 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
1048 if (diag_check_dup(diag, map)) {
1053 diag->maps[diag->nr_maps++] = map;
1059 bpf_sk_storage_diag_free(diag);
1060 return ERR_PTR(err);
1062 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
1064 static int diag_get(struct bpf_sk_storage_data *sdata, struct sk_buff *skb)
1066 struct nlattr *nla_stg, *nla_value;
1067 struct bpf_sk_storage_map *smap;
1069 /* It cannot exceed max nlattr's payload */
1070 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < MAX_VALUE_SIZE);
1072 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
1076 smap = rcu_dereference(sdata->smap);
1077 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
1080 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
1081 smap->map.value_size,
1082 SK_DIAG_BPF_STORAGE_PAD);
1086 if (map_value_has_spin_lock(&smap->map))
1087 copy_map_value_locked(&smap->map, nla_data(nla_value),
1090 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
1092 nla_nest_end(skb, nla_stg);
1096 nla_nest_cancel(skb, nla_stg);
1100 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
1102 unsigned int *res_diag_size)
1104 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
1105 unsigned int diag_size = nla_total_size(0);
1106 struct bpf_sk_storage *sk_storage;
1107 struct bpf_sk_storage_elem *selem;
1108 struct bpf_sk_storage_map *smap;
1109 struct nlattr *nla_stgs;
1110 unsigned int saved_len;
1115 sk_storage = rcu_dereference(sk->sk_bpf_storage);
1116 if (!sk_storage || hlist_empty(&sk_storage->list)) {
1121 nla_stgs = nla_nest_start(skb, stg_array_type);
1123 /* Continue to learn diag_size */
1126 saved_len = skb->len;
1127 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
1128 smap = rcu_dereference(SDATA(selem)->smap);
1129 diag_size += nla_value_size(smap->map.value_size);
1131 if (nla_stgs && diag_get(SDATA(selem), skb))
1132 /* Continue to learn diag_size */
1139 if (saved_len == skb->len)
1140 nla_nest_cancel(skb, nla_stgs);
1142 nla_nest_end(skb, nla_stgs);
1145 if (diag_size == nla_total_size(0)) {
1150 *res_diag_size = diag_size;
1154 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
1155 struct sock *sk, struct sk_buff *skb,
1157 unsigned int *res_diag_size)
1159 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
1160 unsigned int diag_size = nla_total_size(0);
1161 struct bpf_sk_storage *sk_storage;
1162 struct bpf_sk_storage_data *sdata;
1163 struct nlattr *nla_stgs;
1164 unsigned int saved_len;
1170 /* No map has been specified. Dump all. */
1172 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
1176 sk_storage = rcu_dereference(sk->sk_bpf_storage);
1177 if (!sk_storage || hlist_empty(&sk_storage->list)) {
1182 nla_stgs = nla_nest_start(skb, stg_array_type);
1184 /* Continue to learn diag_size */
1187 saved_len = skb->len;
1188 for (i = 0; i < diag->nr_maps; i++) {
1189 sdata = __sk_storage_lookup(sk_storage,
1190 (struct bpf_sk_storage_map *)diag->maps[i],
1196 diag_size += nla_value_size(diag->maps[i]->value_size);
1198 if (nla_stgs && diag_get(sdata, skb))
1199 /* Continue to learn diag_size */
1205 if (saved_len == skb->len)
1206 nla_nest_cancel(skb, nla_stgs);
1208 nla_nest_end(skb, nla_stgs);
1211 if (diag_size == nla_total_size(0)) {
1216 *res_diag_size = diag_size;
1219 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);