1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
31 #include <linux/bpf.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
42 #include <net/strparser.h>
44 #include <linux/ptr_ring.h>
45 #include <net/inet_common.h>
46 #include <linux/sched/signal.h>
48 #define SOCK_CREATE_FLAG_MASK \
49 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
51 struct bpf_sock_progs {
52 struct bpf_prog *bpf_tx_msg;
53 struct bpf_prog *bpf_parse;
54 struct bpf_prog *bpf_verdict;
59 struct sock **sock_map;
60 struct bpf_sock_progs progs;
64 struct hlist_head head;
70 struct bucket *buckets;
74 struct bpf_sock_progs progs;
80 struct hlist_node hash_node;
86 enum smap_psock_state {
90 struct smap_psock_map_entry {
91 struct list_head list;
93 struct htab_elem __rcu *hash_link;
94 struct bpf_htab __rcu *htab;
101 /* datapath variables */
102 struct sk_buff_head rxqueue;
105 /* datapath error path cache across tx work invocations */
108 struct sk_buff *save_skb;
110 /* datapath variables for tx_msg ULP */
111 struct sock *sk_redir;
116 struct sk_msg_buff *cork;
117 struct list_head ingress;
119 struct strparser strp;
120 struct bpf_prog *bpf_tx_msg;
121 struct bpf_prog *bpf_parse;
122 struct bpf_prog *bpf_verdict;
123 struct list_head maps;
124 spinlock_t maps_lock;
126 /* Back reference used when sock callback trigger sockmap operations */
130 struct work_struct tx_work;
131 struct work_struct gc_work;
133 struct proto *sk_proto;
134 void (*save_close)(struct sock *sk, long timeout);
135 void (*save_data_ready)(struct sock *sk);
136 void (*save_write_space)(struct sock *sk);
139 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
140 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
141 int nonblock, int flags, int *addr_len);
142 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
143 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
144 int offset, size_t size, int flags);
145 static void bpf_tcp_close(struct sock *sk, long timeout);
147 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
149 return rcu_dereference_sk_user_data(sk);
152 static bool bpf_tcp_stream_read(const struct sock *sk)
154 struct smap_psock *psock;
158 psock = smap_psock_sk(sk);
159 if (unlikely(!psock))
161 empty = list_empty(&psock->ingress);
179 static struct proto *saved_tcpv6_prot __read_mostly;
180 static DEFINE_SPINLOCK(tcpv6_prot_lock);
181 static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
182 static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
185 prot[SOCKMAP_BASE] = *base;
186 prot[SOCKMAP_BASE].close = bpf_tcp_close;
187 prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
188 prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
190 prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
191 prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
192 prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
195 static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
197 int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
198 int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
200 sk->sk_prot = &bpf_tcp_prots[family][conf];
203 static int bpf_tcp_init(struct sock *sk)
205 struct smap_psock *psock;
208 psock = smap_psock_sk(sk);
209 if (unlikely(!psock)) {
214 if (unlikely(psock->sk_proto)) {
219 psock->save_close = sk->sk_prot->close;
220 psock->sk_proto = sk->sk_prot;
222 /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
223 if (sk->sk_family == AF_INET6 &&
224 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
225 spin_lock_bh(&tcpv6_prot_lock);
226 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
227 build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
228 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
230 spin_unlock_bh(&tcpv6_prot_lock);
232 update_sk_prot(sk, psock);
237 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
238 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
240 static void bpf_tcp_release(struct sock *sk)
242 struct smap_psock *psock;
245 psock = smap_psock_sk(sk);
246 if (unlikely(!psock))
250 free_start_sg(psock->sock, psock->cork);
255 if (psock->sk_proto) {
256 sk->sk_prot = psock->sk_proto;
257 psock->sk_proto = NULL;
263 static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
264 u32 hash, void *key, u32 key_size)
268 hlist_for_each_entry_rcu(l, head, hash_node) {
269 if (l->hash == hash && !memcmp(&l->key, key, key_size))
276 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
278 return &htab->buckets[hash & (htab->n_buckets - 1)];
281 static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
283 return &__select_bucket(htab, hash)->head;
286 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
288 atomic_dec(&htab->count);
292 static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
293 struct smap_psock *psock)
295 struct smap_psock_map_entry *e;
297 spin_lock_bh(&psock->maps_lock);
298 e = list_first_entry_or_null(&psock->maps,
299 struct smap_psock_map_entry,
303 spin_unlock_bh(&psock->maps_lock);
307 static void bpf_tcp_close(struct sock *sk, long timeout)
309 void (*close_fun)(struct sock *sk, long timeout);
310 struct smap_psock_map_entry *e;
311 struct sk_msg_buff *md, *mtmp;
312 struct smap_psock *psock;
316 psock = smap_psock_sk(sk);
317 if (unlikely(!psock)) {
319 return sk->sk_prot->close(sk, timeout);
322 /* The psock may be destroyed anytime after exiting the RCU critial
323 * section so by the time we use close_fun the psock may no longer
324 * be valid. However, bpf_tcp_close is called with the sock lock
325 * held so the close hook and sk are still valid.
327 close_fun = psock->save_close;
330 free_start_sg(psock->sock, psock->cork);
335 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
337 free_start_sg(psock->sock, md);
341 e = psock_map_pop(sk, psock);
344 osk = cmpxchg(e->entry, sk, NULL);
346 smap_release_sock(psock, sk);
349 struct htab_elem *link = rcu_dereference(e->hash_link);
350 struct bpf_htab *htab = rcu_dereference(e->htab);
351 struct hlist_head *head;
355 b = __select_bucket(htab, link->hash);
357 raw_spin_lock_bh(&b->lock);
358 l = lookup_elem_raw(head,
359 link->hash, link->key,
361 /* If another thread deleted this object skip deletion.
362 * The refcnt on psock may or may not be zero.
365 hlist_del_rcu(&link->hash_node);
366 smap_release_sock(psock, link->sk);
367 free_htab_elem(htab, link);
369 raw_spin_unlock_bh(&b->lock);
371 e = psock_map_pop(sk, psock);
374 close_fun(sk, timeout);
384 static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
387 .user_visible = false,
389 .init = bpf_tcp_init,
390 .release = bpf_tcp_release,
393 static int memcopy_from_iter(struct sock *sk,
394 struct sk_msg_buff *md,
395 struct iov_iter *from, int bytes)
397 struct scatterlist *sg = md->sg_data;
398 int i = md->sg_curr, rc = -ENOSPC;
404 if (md->sg_copybreak >= sg[i].length) {
405 md->sg_copybreak = 0;
407 if (++i == MAX_SKB_FRAGS)
414 copy = sg[i].length - md->sg_copybreak;
415 to = sg_virt(&sg[i]) + md->sg_copybreak;
416 md->sg_copybreak += copy;
418 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
419 rc = copy_from_iter_nocache(to, copy, from);
421 rc = copy_from_iter(to, copy, from);
432 md->sg_copybreak = 0;
433 if (++i == MAX_SKB_FRAGS)
435 } while (i != md->sg_end);
441 static int bpf_tcp_push(struct sock *sk, int apply_bytes,
442 struct sk_msg_buff *md,
443 int flags, bool uncharge)
445 bool apply = apply_bytes;
446 struct scatterlist *sg;
452 sg = md->sg_data + md->sg_start;
453 size = (apply && apply_bytes < sg->length) ?
454 apply_bytes : sg->length;
457 tcp_rate_check_app_limited(sk);
460 ret = do_tcp_sendpages(sk, p, offset, size, flags);
471 sk_mem_uncharge(sk, ret);
483 sk_mem_uncharge(sk, ret);
488 if (md->sg_start == MAX_SKB_FRAGS)
490 sg_init_table(sg, 1);
492 if (md->sg_start == md->sg_end)
496 if (apply && !apply_bytes)
502 static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
504 struct scatterlist *sg = md->sg_data + md->sg_start;
506 if (md->sg_copy[md->sg_start]) {
507 md->data = md->data_end = 0;
509 md->data = sg_virt(sg);
510 md->data_end = md->data + sg->length;
514 static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
516 struct scatterlist *sg = md->sg_data;
517 int i = md->sg_start;
520 int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
522 sk_mem_uncharge(sk, uncharge);
527 if (i == MAX_SKB_FRAGS)
529 } while (i != md->sg_end);
532 static void free_bytes_sg(struct sock *sk, int bytes,
533 struct sk_msg_buff *md, bool charge)
535 struct scatterlist *sg = md->sg_data;
536 int i = md->sg_start, free;
538 while (bytes && sg[i].length) {
541 sg[i].length -= bytes;
542 sg[i].offset += bytes;
544 sk_mem_uncharge(sk, bytes);
549 sk_mem_uncharge(sk, sg[i].length);
550 put_page(sg_page(&sg[i]));
551 bytes -= sg[i].length;
557 if (i == MAX_SKB_FRAGS)
563 static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
565 struct scatterlist *sg = md->sg_data;
566 int i = start, free = 0;
568 while (sg[i].length) {
569 free += sg[i].length;
570 sk_mem_uncharge(sk, sg[i].length);
571 put_page(sg_page(&sg[i]));
577 if (i == MAX_SKB_FRAGS)
584 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
586 int free = free_sg(sk, md->sg_start, md);
588 md->sg_start = md->sg_end;
592 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
594 return free_sg(sk, md->sg_curr, md);
597 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
599 return ((_rc == SK_PASS) ?
600 (md->sk_redir ? __SK_REDIRECT : __SK_PASS) :
604 static unsigned int smap_do_tx_msg(struct sock *sk,
605 struct smap_psock *psock,
606 struct sk_msg_buff *md)
608 struct bpf_prog *prog;
609 unsigned int rc, _rc;
614 /* If the policy was removed mid-send then default to 'accept' */
615 prog = READ_ONCE(psock->bpf_tx_msg);
616 if (unlikely(!prog)) {
621 bpf_compute_data_pointers_sg(md);
623 rc = (*prog->bpf_func)(md, prog->insnsi);
624 psock->apply_bytes = md->apply_bytes;
626 /* Moving return codes from UAPI namespace into internal namespace */
627 _rc = bpf_map_msg_verdict(rc, md);
629 /* The psock has a refcount on the sock but not on the map and because
630 * we need to drop rcu read lock here its possible the map could be
631 * removed between here and when we need it to execute the sock
632 * redirect. So do the map lookup now for future use.
634 if (_rc == __SK_REDIRECT) {
636 sock_put(psock->sk_redir);
637 psock->sk_redir = do_msg_redirect_map(md);
638 if (!psock->sk_redir) {
642 sock_hold(psock->sk_redir);
651 static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
652 struct smap_psock *psock,
653 struct sk_msg_buff *md, int flags)
655 bool apply = apply_bytes;
656 size_t size, copied = 0;
657 struct sk_msg_buff *r;
660 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
665 r->sg_start = md->sg_start;
669 size = (apply && apply_bytes < md->sg_data[i].length) ?
670 apply_bytes : md->sg_data[i].length;
672 if (!sk_wmem_schedule(sk, size)) {
678 sk_mem_charge(sk, size);
679 r->sg_data[i] = md->sg_data[i];
680 r->sg_data[i].length = size;
681 md->sg_data[i].length -= size;
682 md->sg_data[i].offset += size;
685 if (md->sg_data[i].length) {
686 get_page(sg_page(&r->sg_data[i]));
687 r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
690 if (i == MAX_SKB_FRAGS)
700 } while (i != md->sg_end);
705 list_add_tail(&r->list, &psock->ingress);
706 sk->sk_data_ready(sk);
708 free_start_sg(sk, r);
716 static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
717 struct sk_msg_buff *md,
720 bool ingress = !!(md->flags & BPF_F_INGRESS);
721 struct smap_psock *psock;
722 struct scatterlist *sg;
728 psock = smap_psock_sk(sk);
729 if (unlikely(!psock))
732 if (!refcount_inc_not_zero(&psock->refcnt))
738 err = bpf_tcp_ingress(sk, send, psock, md, flags);
741 err = bpf_tcp_push(sk, send, md, flags, false);
744 smap_release_sock(psock, sk);
751 free_bytes_sg(NULL, send, md, false);
755 static inline void bpf_md_init(struct smap_psock *psock)
757 if (!psock->apply_bytes) {
758 psock->eval = __SK_NONE;
759 if (psock->sk_redir) {
760 sock_put(psock->sk_redir);
761 psock->sk_redir = NULL;
766 static void apply_bytes_dec(struct smap_psock *psock, int i)
768 if (psock->apply_bytes) {
769 if (psock->apply_bytes < i)
770 psock->apply_bytes = 0;
772 psock->apply_bytes -= i;
776 static int bpf_exec_tx_verdict(struct smap_psock *psock,
777 struct sk_msg_buff *m,
779 int *copied, int flags)
781 bool cork = false, enospc = (m->sg_start == m->sg_end);
787 if (psock->eval == __SK_NONE)
788 psock->eval = smap_do_tx_msg(sk, psock, m);
791 m->cork_bytes > psock->sg_size && !enospc) {
792 psock->cork_bytes = m->cork_bytes - psock->sg_size;
794 psock->cork = kcalloc(1,
795 sizeof(struct sk_msg_buff),
796 GFP_ATOMIC | __GFP_NOWARN);
803 memcpy(psock->cork, m, sizeof(*m));
807 send = psock->sg_size;
808 if (psock->apply_bytes && psock->apply_bytes < send)
809 send = psock->apply_bytes;
811 switch (psock->eval) {
813 err = bpf_tcp_push(sk, send, m, flags, true);
815 *copied -= free_start_sg(sk, m);
819 apply_bytes_dec(psock, send);
820 psock->sg_size -= send;
823 redir = psock->sk_redir;
824 apply_bytes_dec(psock, send);
831 return_mem_sg(sk, send, m);
834 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
837 if (unlikely(err < 0)) {
838 free_start_sg(sk, m);
843 psock->sg_size -= send;
847 free_start_sg(sk, m);
856 free_bytes_sg(sk, send, m, true);
857 apply_bytes_dec(psock, send);
859 psock->sg_size -= send;
867 m->sg_data[m->sg_start].page_link &&
868 m->sg_data[m->sg_start].length)
876 static int bpf_wait_data(struct sock *sk,
877 struct smap_psock *psk, int flags,
878 long timeo, int *err)
882 DEFINE_WAIT_FUNC(wait, woken_wake_function);
884 add_wait_queue(sk_sleep(sk), &wait);
885 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
886 rc = sk_wait_event(sk, &timeo,
887 !list_empty(&psk->ingress) ||
888 !skb_queue_empty(&sk->sk_receive_queue),
890 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
891 remove_wait_queue(sk_sleep(sk), &wait);
896 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
897 int nonblock, int flags, int *addr_len)
899 struct iov_iter *iter = &msg->msg_iter;
900 struct smap_psock *psock;
903 if (unlikely(flags & MSG_ERRQUEUE))
904 return inet_recv_error(sk, msg, len, addr_len);
907 psock = smap_psock_sk(sk);
908 if (unlikely(!psock))
911 if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
915 if (!skb_queue_empty(&sk->sk_receive_queue))
916 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
920 while (copied != len) {
921 struct scatterlist *sg;
922 struct sk_msg_buff *md;
925 md = list_first_entry_or_null(&psock->ingress,
926 struct sk_msg_buff, list);
934 sg = &md->sg_data[i];
938 if (copied + copy > len)
941 n = copy_page_to_iter(page, sg->offset, copy, iter);
945 smap_release_sock(psock, sk);
952 sk_mem_uncharge(sk, copy);
956 if (i == MAX_SKB_FRAGS)
963 } while (i != md->sg_end);
966 if (!sg->length && md->sg_start == md->sg_end) {
969 consume_skb(md->skb);
979 timeo = sock_rcvtimeo(sk, nonblock);
980 data = bpf_wait_data(sk, psock, flags, timeo, &err);
983 if (!skb_queue_empty(&sk->sk_receive_queue)) {
985 smap_release_sock(psock, sk);
986 copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
997 smap_release_sock(psock, sk);
1001 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
1005 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1007 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
1008 struct sk_msg_buff md = {0};
1009 unsigned int sg_copy = 0;
1010 struct smap_psock *psock;
1011 int copied = 0, err = 0;
1012 struct scatterlist *sg;
1015 /* Its possible a sock event or user removed the psock _but_ the ops
1016 * have not been reprogrammed yet so we get here. In this case fallback
1017 * to tcp_sendmsg. Note this only works because we _only_ ever allow
1018 * a single ULP there is no hierarchy here.
1021 psock = smap_psock_sk(sk);
1022 if (unlikely(!psock)) {
1024 return tcp_sendmsg(sk, msg, size);
1027 /* Increment the psock refcnt to ensure its not released while sending a
1028 * message. Required because sk lookup and bpf programs are used in
1029 * separate rcu critical sections. Its OK if we lose the map entry
1030 * but we can't lose the sock reference.
1032 if (!refcount_inc_not_zero(&psock->refcnt)) {
1034 return tcp_sendmsg(sk, msg, size);
1038 sg_init_marker(sg, MAX_SKB_FRAGS);
1042 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1044 while (msg_data_left(msg)) {
1045 struct sk_msg_buff *m;
1046 bool enospc = false;
1054 copy = msg_data_left(msg);
1055 if (!sk_stream_memory_free(sk))
1056 goto wait_for_sndbuf;
1058 m = psock->cork_bytes ? psock->cork : &md;
1059 m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
1060 err = sk_alloc_sg(sk, copy, m->sg_data,
1061 m->sg_start, &m->sg_end, &sg_copy,
1065 goto wait_for_memory;
1070 err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
1072 free_curr_sg(sk, m);
1076 psock->sg_size += copy;
1080 /* When bytes are being corked skip running BPF program and
1081 * applying verdict unless there is no more buffer space. In
1082 * the ENOSPC case simply run BPF prorgram with currently
1083 * accumulated data. We don't have much choice at this point
1084 * we could try extending the page frags or chaining complex
1085 * frags but even in these cases _eventually_ we will hit an
1086 * OOM scenario. More complex recovery schemes may be
1087 * implemented in the future, but BPF programs must handle
1088 * the case where apply_cork requests are not honored. The
1089 * canonical method to verify this is to check data length.
1091 if (psock->cork_bytes) {
1092 if (copy > psock->cork_bytes)
1093 psock->cork_bytes = 0;
1095 psock->cork_bytes -= copy;
1097 if (psock->cork_bytes && !enospc)
1100 /* All cork bytes accounted for re-run filter */
1101 psock->eval = __SK_NONE;
1102 psock->cork_bytes = 0;
1105 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1106 if (unlikely(err < 0))
1110 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1112 err = sk_stream_wait_memory(sk, &timeo);
1118 err = sk_stream_error(sk, msg->msg_flags, err);
1121 smap_release_sock(psock, sk);
1122 return copied ? copied : err;
1125 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
1126 int offset, size_t size, int flags)
1128 struct sk_msg_buff md = {0}, *m = NULL;
1129 int err = 0, copied = 0;
1130 struct smap_psock *psock;
1131 struct scatterlist *sg;
1132 bool enospc = false;
1135 psock = smap_psock_sk(sk);
1136 if (unlikely(!psock))
1139 if (!refcount_inc_not_zero(&psock->refcnt))
1145 if (psock->cork_bytes) {
1147 sg = &m->sg_data[m->sg_end];
1151 sg_init_marker(sg, MAX_SKB_FRAGS);
1154 /* Catch case where ring is full and sendpage is stalled. */
1155 if (unlikely(m->sg_end == m->sg_start &&
1156 m->sg_data[m->sg_end].length))
1159 psock->sg_size += size;
1160 sg_set_page(sg, page, size, offset);
1162 m->sg_copy[m->sg_end] = true;
1163 sk_mem_charge(sk, size);
1167 if (m->sg_end == MAX_SKB_FRAGS)
1170 if (m->sg_end == m->sg_start)
1173 if (psock->cork_bytes) {
1174 if (size > psock->cork_bytes)
1175 psock->cork_bytes = 0;
1177 psock->cork_bytes -= size;
1179 if (psock->cork_bytes && !enospc)
1182 /* All cork bytes accounted for re-run filter */
1183 psock->eval = __SK_NONE;
1184 psock->cork_bytes = 0;
1187 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1190 smap_release_sock(psock, sk);
1191 return copied ? copied : err;
1194 return tcp_sendpage(sk, page, offset, size, flags);
1197 static void bpf_tcp_msg_add(struct smap_psock *psock,
1199 struct bpf_prog *tx_msg)
1201 struct bpf_prog *orig_tx_msg;
1203 orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
1205 bpf_prog_put(orig_tx_msg);
1208 static int bpf_tcp_ulp_register(void)
1210 build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
1211 /* Once BPF TX ULP is registered it is never unregistered. It
1212 * will be in the ULP list for the lifetime of the system. Doing
1213 * duplicate registers is not a problem.
1215 return tcp_register_ulp(&bpf_tcp_ulp_ops);
1218 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1220 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
1223 if (unlikely(!prog))
1227 /* We need to ensure that BPF metadata for maps is also cleared
1228 * when we orphan the skb so that we don't have the possibility
1229 * to reference a stale map.
1231 TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
1232 skb->sk = psock->sock;
1233 bpf_compute_data_pointers(skb);
1235 rc = (*prog->bpf_func)(skb, prog->insnsi);
1239 /* Moving return codes from UAPI namespace into internal namespace */
1240 return rc == SK_PASS ?
1241 (TCP_SKB_CB(skb)->bpf.sk_redir ? __SK_REDIRECT : __SK_PASS) :
1245 static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
1247 struct sock *sk = psock->sock;
1248 int copied = 0, num_sg;
1249 struct sk_msg_buff *r;
1251 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
1255 if (!sk_rmem_schedule(sk, skb, skb->len)) {
1260 sg_init_table(r->sg_data, MAX_SKB_FRAGS);
1261 num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
1262 if (unlikely(num_sg < 0)) {
1266 sk_mem_charge(sk, skb->len);
1269 r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
1271 list_add_tail(&r->list, &psock->ingress);
1272 sk->sk_data_ready(sk);
1276 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
1278 struct smap_psock *peer;
1283 rc = smap_verdict_func(psock, skb);
1286 sk = do_sk_redirect_map(skb);
1292 peer = smap_psock_sk(sk);
1293 in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1295 if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
1296 !test_bit(SMAP_TX_RUNNING, &peer->state))) {
1301 if (!in && sock_writeable(sk)) {
1302 skb_set_owner_w(skb, sk);
1303 skb_queue_tail(&peer->rxqueue, skb);
1304 schedule_work(&peer->tx_work);
1307 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
1308 skb_queue_tail(&peer->rxqueue, skb);
1309 schedule_work(&peer->tx_work);
1312 /* Fall through and free skb otherwise */
1319 static void smap_report_sk_error(struct smap_psock *psock, int err)
1321 struct sock *sk = psock->sock;
1324 sk->sk_error_report(sk);
1327 static void smap_read_sock_strparser(struct strparser *strp,
1328 struct sk_buff *skb)
1330 struct smap_psock *psock;
1333 psock = container_of(strp, struct smap_psock, strp);
1334 smap_do_verdict(psock, skb);
1338 /* Called with lock held on socket */
1339 static void smap_data_ready(struct sock *sk)
1341 struct smap_psock *psock;
1344 psock = smap_psock_sk(sk);
1345 if (likely(psock)) {
1346 write_lock_bh(&sk->sk_callback_lock);
1347 strp_data_ready(&psock->strp);
1348 write_unlock_bh(&sk->sk_callback_lock);
1353 static void smap_tx_work(struct work_struct *w)
1355 struct smap_psock *psock;
1356 struct sk_buff *skb;
1359 psock = container_of(w, struct smap_psock, tx_work);
1361 /* lock sock to avoid losing sk_socket at some point during loop */
1362 lock_sock(psock->sock);
1363 if (psock->save_skb) {
1364 skb = psock->save_skb;
1365 rem = psock->save_rem;
1366 off = psock->save_off;
1367 psock->save_skb = NULL;
1371 while ((skb = skb_dequeue(&psock->rxqueue))) {
1377 flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1379 if (likely(psock->sock->sk_socket)) {
1381 n = smap_do_ingress(psock, skb);
1383 n = skb_send_sock_locked(psock->sock,
1391 /* Retry when space is available */
1392 psock->save_skb = skb;
1393 psock->save_rem = rem;
1394 psock->save_off = off;
1397 /* Hard errors break pipe and stop xmit */
1398 smap_report_sk_error(psock, n ? -n : EPIPE);
1399 clear_bit(SMAP_TX_RUNNING, &psock->state);
1411 release_sock(psock->sock);
1414 static void smap_write_space(struct sock *sk)
1416 struct smap_psock *psock;
1419 psock = smap_psock_sk(sk);
1420 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1421 schedule_work(&psock->tx_work);
1425 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
1427 if (!psock->strp_enabled)
1429 sk->sk_data_ready = psock->save_data_ready;
1430 sk->sk_write_space = psock->save_write_space;
1431 psock->save_data_ready = NULL;
1432 psock->save_write_space = NULL;
1433 strp_stop(&psock->strp);
1434 psock->strp_enabled = false;
1437 static void smap_destroy_psock(struct rcu_head *rcu)
1439 struct smap_psock *psock = container_of(rcu,
1440 struct smap_psock, rcu);
1442 /* Now that a grace period has passed there is no longer
1443 * any reference to this sock in the sockmap so we can
1444 * destroy the psock, strparser, and bpf programs. But,
1445 * because we use workqueue sync operations we can not
1446 * do it in rcu context
1448 schedule_work(&psock->gc_work);
1451 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
1453 if (refcount_dec_and_test(&psock->refcnt)) {
1454 tcp_cleanup_ulp(sock);
1455 write_lock_bh(&sock->sk_callback_lock);
1456 smap_stop_sock(psock, sock);
1457 write_unlock_bh(&sock->sk_callback_lock);
1458 clear_bit(SMAP_TX_RUNNING, &psock->state);
1459 rcu_assign_sk_user_data(sock, NULL);
1460 call_rcu_sched(&psock->rcu, smap_destroy_psock);
1464 static int smap_parse_func_strparser(struct strparser *strp,
1465 struct sk_buff *skb)
1467 struct smap_psock *psock;
1468 struct bpf_prog *prog;
1472 psock = container_of(strp, struct smap_psock, strp);
1473 prog = READ_ONCE(psock->bpf_parse);
1475 if (unlikely(!prog)) {
1480 /* Attach socket for bpf program to use if needed we can do this
1481 * because strparser clones the skb before handing it to a upper
1482 * layer, meaning skb_orphan has been called. We NULL sk on the
1483 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
1484 * later and because we are not charging the memory of this skb to
1487 skb->sk = psock->sock;
1488 bpf_compute_data_pointers(skb);
1489 rc = (*prog->bpf_func)(skb, prog->insnsi);
1495 static int smap_read_sock_done(struct strparser *strp, int err)
1500 static int smap_init_sock(struct smap_psock *psock,
1503 static const struct strp_callbacks cb = {
1504 .rcv_msg = smap_read_sock_strparser,
1505 .parse_msg = smap_parse_func_strparser,
1506 .read_sock_done = smap_read_sock_done,
1509 return strp_init(&psock->strp, sk, &cb);
1512 static void smap_init_progs(struct smap_psock *psock,
1513 struct bpf_prog *verdict,
1514 struct bpf_prog *parse)
1516 struct bpf_prog *orig_parse, *orig_verdict;
1518 orig_parse = xchg(&psock->bpf_parse, parse);
1519 orig_verdict = xchg(&psock->bpf_verdict, verdict);
1522 bpf_prog_put(orig_verdict);
1524 bpf_prog_put(orig_parse);
1527 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
1529 if (sk->sk_data_ready == smap_data_ready)
1531 psock->save_data_ready = sk->sk_data_ready;
1532 psock->save_write_space = sk->sk_write_space;
1533 sk->sk_data_ready = smap_data_ready;
1534 sk->sk_write_space = smap_write_space;
1535 psock->strp_enabled = true;
1538 static void sock_map_remove_complete(struct bpf_stab *stab)
1540 bpf_map_area_free(stab->sock_map);
1544 static void smap_gc_work(struct work_struct *w)
1546 struct smap_psock_map_entry *e, *tmp;
1547 struct sk_msg_buff *md, *mtmp;
1548 struct smap_psock *psock;
1550 psock = container_of(w, struct smap_psock, gc_work);
1552 /* no callback lock needed because we already detached sockmap ops */
1553 if (psock->strp_enabled)
1554 strp_done(&psock->strp);
1556 cancel_work_sync(&psock->tx_work);
1557 __skb_queue_purge(&psock->rxqueue);
1559 /* At this point all strparser and xmit work must be complete */
1560 if (psock->bpf_parse)
1561 bpf_prog_put(psock->bpf_parse);
1562 if (psock->bpf_verdict)
1563 bpf_prog_put(psock->bpf_verdict);
1564 if (psock->bpf_tx_msg)
1565 bpf_prog_put(psock->bpf_tx_msg);
1568 free_start_sg(psock->sock, psock->cork);
1572 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1573 list_del(&md->list);
1574 free_start_sg(psock->sock, md);
1578 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1583 if (psock->sk_redir)
1584 sock_put(psock->sk_redir);
1586 sock_put(psock->sock);
1590 static struct smap_psock *smap_init_psock(struct sock *sock, int node)
1592 struct smap_psock *psock;
1594 psock = kzalloc_node(sizeof(struct smap_psock),
1595 GFP_ATOMIC | __GFP_NOWARN,
1598 return ERR_PTR(-ENOMEM);
1600 psock->eval = __SK_NONE;
1602 skb_queue_head_init(&psock->rxqueue);
1603 INIT_WORK(&psock->tx_work, smap_tx_work);
1604 INIT_WORK(&psock->gc_work, smap_gc_work);
1605 INIT_LIST_HEAD(&psock->maps);
1606 INIT_LIST_HEAD(&psock->ingress);
1607 refcount_set(&psock->refcnt, 1);
1608 spin_lock_init(&psock->maps_lock);
1610 rcu_assign_sk_user_data(sock, psock);
1615 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1617 struct bpf_stab *stab;
1621 if (!capable(CAP_NET_ADMIN))
1622 return ERR_PTR(-EPERM);
1624 /* check sanity of attributes */
1625 if (attr->max_entries == 0 || attr->key_size != 4 ||
1626 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1627 return ERR_PTR(-EINVAL);
1629 err = bpf_tcp_ulp_register();
1630 if (err && err != -EEXIST)
1631 return ERR_PTR(err);
1633 stab = kzalloc(sizeof(*stab), GFP_USER);
1635 return ERR_PTR(-ENOMEM);
1637 bpf_map_init_from_attr(&stab->map, attr);
1639 /* make sure page count doesn't overflow */
1640 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
1642 if (cost >= U32_MAX - PAGE_SIZE)
1645 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1647 /* if map size is larger than memlock limit, reject it early */
1648 err = bpf_map_precharge_memlock(stab->map.pages);
1653 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
1654 sizeof(struct sock *),
1655 stab->map.numa_node);
1656 if (!stab->sock_map)
1662 return ERR_PTR(err);
1665 static void smap_list_map_remove(struct smap_psock *psock,
1666 struct sock **entry)
1668 struct smap_psock_map_entry *e, *tmp;
1670 spin_lock_bh(&psock->maps_lock);
1671 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1672 if (e->entry == entry)
1675 spin_unlock_bh(&psock->maps_lock);
1678 static void smap_list_hash_remove(struct smap_psock *psock,
1679 struct htab_elem *hash_link)
1681 struct smap_psock_map_entry *e, *tmp;
1683 spin_lock_bh(&psock->maps_lock);
1684 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1685 struct htab_elem *c = rcu_dereference(e->hash_link);
1690 spin_unlock_bh(&psock->maps_lock);
1693 static void sock_map_free(struct bpf_map *map)
1695 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1700 /* At this point no update, lookup or delete operations can happen.
1701 * However, be aware we can still get a socket state event updates,
1702 * and data ready callabacks that reference the psock from sk_user_data
1703 * Also psock worker threads are still in-flight. So smap_release_sock
1704 * will only free the psock after cancel_sync on the worker threads
1705 * and a grace period expire to ensure psock is really safe to remove.
1708 for (i = 0; i < stab->map.max_entries; i++) {
1709 struct smap_psock *psock;
1712 sock = xchg(&stab->sock_map[i], NULL);
1716 psock = smap_psock_sk(sock);
1717 /* This check handles a racing sock event that can get the
1718 * sk_callback_lock before this case but after xchg happens
1719 * causing the refcnt to hit zero and sock user data (psock)
1720 * to be null and queued for garbage collection.
1722 if (likely(psock)) {
1723 smap_list_map_remove(psock, &stab->sock_map[i]);
1724 smap_release_sock(psock, sock);
1729 sock_map_remove_complete(stab);
1732 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
1734 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1735 u32 i = key ? *(u32 *)key : U32_MAX;
1736 u32 *next = (u32 *)next_key;
1738 if (i >= stab->map.max_entries) {
1743 if (i == stab->map.max_entries - 1)
1750 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
1752 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1754 if (key >= map->max_entries)
1757 return READ_ONCE(stab->sock_map[key]);
1760 static int sock_map_delete_elem(struct bpf_map *map, void *key)
1762 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1763 struct smap_psock *psock;
1764 int k = *(u32 *)key;
1767 if (k >= map->max_entries)
1770 sock = xchg(&stab->sock_map[k], NULL);
1774 psock = smap_psock_sk(sock);
1778 if (psock->bpf_parse)
1779 smap_stop_sock(psock, sock);
1780 smap_list_map_remove(psock, &stab->sock_map[k]);
1781 smap_release_sock(psock, sock);
1786 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
1787 * done inside rcu critical sections. This ensures on updates that the psock
1788 * will not be released via smap_release_sock() until concurrent updates/deletes
1789 * complete. All operations operate on sock_map using cmpxchg and xchg
1790 * operations to ensure we do not get stale references. Any reads into the
1791 * map must be done with READ_ONCE() because of this.
1793 * A psock is destroyed via call_rcu and after any worker threads are cancelled
1794 * and syncd so we are certain all references from the update/lookup/delete
1795 * operations as well as references in the data path are no longer in use.
1797 * Psocks may exist in multiple maps, but only a single set of parse/verdict
1798 * programs may be inherited from the maps it belongs to. A reference count
1799 * is kept with the total number of references to the psock from all maps. The
1800 * psock will not be released until this reaches zero. The psock and sock
1801 * user data data use the sk_callback_lock to protect critical data structures
1802 * from concurrent access. This allows us to avoid two updates from modifying
1803 * the user data in sock and the lock is required anyways for modifying
1804 * callbacks, we simply increase its scope slightly.
1807 * - psock must always be read inside RCU critical section
1808 * - sk_user_data must only be modified inside sk_callback_lock and read
1809 * inside RCU critical section.
1810 * - psock->maps list must only be read & modified inside sk_callback_lock
1811 * - sock_map must use READ_ONCE and (cmp)xchg operations
1812 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
1815 static int __sock_map_ctx_update_elem(struct bpf_map *map,
1816 struct bpf_sock_progs *progs,
1818 struct sock **map_link,
1821 struct bpf_prog *verdict, *parse, *tx_msg;
1822 struct smap_psock_map_entry *e = NULL;
1823 struct smap_psock *psock;
1827 /* 1. If sock map has BPF programs those will be inherited by the
1828 * sock being added. If the sock is already attached to BPF programs
1829 * this results in an error.
1831 verdict = READ_ONCE(progs->bpf_verdict);
1832 parse = READ_ONCE(progs->bpf_parse);
1833 tx_msg = READ_ONCE(progs->bpf_tx_msg);
1835 if (parse && verdict) {
1836 /* bpf prog refcnt may be zero if a concurrent attach operation
1837 * removes the program after the above READ_ONCE() but before
1838 * we increment the refcnt. If this is the case abort with an
1841 verdict = bpf_prog_inc_not_zero(verdict);
1842 if (IS_ERR(verdict))
1843 return PTR_ERR(verdict);
1845 parse = bpf_prog_inc_not_zero(parse);
1846 if (IS_ERR(parse)) {
1847 bpf_prog_put(verdict);
1848 return PTR_ERR(parse);
1853 tx_msg = bpf_prog_inc_not_zero(tx_msg);
1854 if (IS_ERR(tx_msg)) {
1855 if (parse && verdict) {
1856 bpf_prog_put(parse);
1857 bpf_prog_put(verdict);
1859 return PTR_ERR(tx_msg);
1863 psock = smap_psock_sk(sock);
1865 /* 2. Do not allow inheriting programs if psock exists and has
1866 * already inherited programs. This would create confusion on
1867 * which parser/verdict program is running. If no psock exists
1868 * create one. Inside sk_callback_lock to ensure concurrent create
1869 * doesn't update user data.
1872 if (READ_ONCE(psock->bpf_parse) && parse) {
1876 if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
1880 if (!refcount_inc_not_zero(&psock->refcnt)) {
1885 psock = smap_init_psock(sock, map->numa_node);
1886 if (IS_ERR(psock)) {
1887 err = PTR_ERR(psock);
1891 set_bit(SMAP_TX_RUNNING, &psock->state);
1896 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1903 /* 3. At this point we have a reference to a valid psock that is
1904 * running. Attach any BPF programs needed.
1907 bpf_tcp_msg_add(psock, sock, tx_msg);
1909 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
1914 if (parse && verdict && !psock->strp_enabled) {
1915 err = smap_init_sock(psock, sock);
1918 smap_init_progs(psock, verdict, parse);
1919 write_lock_bh(&sock->sk_callback_lock);
1920 smap_start_sock(psock, sock);
1921 write_unlock_bh(&sock->sk_callback_lock);
1924 /* 4. Place psock in sockmap for use and stop any programs on
1925 * the old sock assuming its not the same sock we are replacing
1926 * it with. Because we can only have a single set of programs if
1927 * old_sock has a strp we can stop it.
1930 e->entry = map_link;
1931 spin_lock_bh(&psock->maps_lock);
1932 list_add_tail(&e->list, &psock->maps);
1933 spin_unlock_bh(&psock->maps_lock);
1937 smap_release_sock(psock, sock);
1939 if (parse && verdict) {
1940 bpf_prog_put(parse);
1941 bpf_prog_put(verdict);
1944 bpf_prog_put(tx_msg);
1949 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1950 struct bpf_map *map,
1951 void *key, u64 flags)
1953 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1954 struct bpf_sock_progs *progs = &stab->progs;
1955 struct sock *osock, *sock;
1956 u32 i = *(u32 *)key;
1959 if (unlikely(flags > BPF_EXIST))
1962 if (unlikely(i >= stab->map.max_entries))
1965 sock = READ_ONCE(stab->sock_map[i]);
1966 if (flags == BPF_EXIST && !sock)
1968 else if (flags == BPF_NOEXIST && sock)
1972 err = __sock_map_ctx_update_elem(map, progs, sock, &stab->sock_map[i],
1977 osock = xchg(&stab->sock_map[i], sock);
1979 struct smap_psock *opsock = smap_psock_sk(osock);
1981 smap_list_map_remove(opsock, &stab->sock_map[i]);
1982 smap_release_sock(opsock, osock);
1988 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
1990 struct bpf_sock_progs *progs;
1991 struct bpf_prog *orig;
1993 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
1994 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1996 progs = &stab->progs;
1997 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1998 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2000 progs = &htab->progs;
2006 case BPF_SK_MSG_VERDICT:
2007 orig = xchg(&progs->bpf_tx_msg, prog);
2009 case BPF_SK_SKB_STREAM_PARSER:
2010 orig = xchg(&progs->bpf_parse, prog);
2012 case BPF_SK_SKB_STREAM_VERDICT:
2013 orig = xchg(&progs->bpf_verdict, prog);
2025 int sockmap_get_from_fd(const union bpf_attr *attr, int type,
2026 struct bpf_prog *prog)
2028 int ufd = attr->target_fd;
2029 struct bpf_map *map;
2034 map = __bpf_map_get(f);
2036 return PTR_ERR(map);
2038 err = sock_map_prog(map, prog, attr->attach_type);
2043 static void *sock_map_lookup(struct bpf_map *map, void *key)
2048 static int sock_map_update_elem(struct bpf_map *map,
2049 void *key, void *value, u64 flags)
2051 struct bpf_sock_ops_kern skops;
2052 u32 fd = *(u32 *)value;
2053 struct socket *socket;
2056 socket = sockfd_lookup(fd, &err);
2060 skops.sk = socket->sk;
2066 if (skops.sk->sk_type != SOCK_STREAM ||
2067 skops.sk->sk_protocol != IPPROTO_TCP) {
2072 err = sock_map_ctx_update_elem(&skops, map, key, flags);
2077 static void sock_map_release(struct bpf_map *map)
2079 struct bpf_sock_progs *progs;
2080 struct bpf_prog *orig;
2082 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
2083 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2085 progs = &stab->progs;
2087 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2089 progs = &htab->progs;
2092 orig = xchg(&progs->bpf_parse, NULL);
2095 orig = xchg(&progs->bpf_verdict, NULL);
2099 orig = xchg(&progs->bpf_tx_msg, NULL);
2104 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
2106 struct bpf_htab *htab;
2110 if (!capable(CAP_NET_ADMIN))
2111 return ERR_PTR(-EPERM);
2113 /* check sanity of attributes */
2114 if (attr->max_entries == 0 || attr->value_size != 4 ||
2115 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
2116 return ERR_PTR(-EINVAL);
2118 if (attr->key_size > MAX_BPF_STACK)
2119 /* eBPF programs initialize keys on stack, so they cannot be
2120 * larger than max stack size
2122 return ERR_PTR(-E2BIG);
2124 err = bpf_tcp_ulp_register();
2125 if (err && err != -EEXIST)
2126 return ERR_PTR(err);
2128 htab = kzalloc(sizeof(*htab), GFP_USER);
2130 return ERR_PTR(-ENOMEM);
2132 bpf_map_init_from_attr(&htab->map, attr);
2134 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
2135 htab->elem_size = sizeof(struct htab_elem) +
2136 round_up(htab->map.key_size, 8);
2138 if (htab->n_buckets == 0 ||
2139 htab->n_buckets > U32_MAX / sizeof(struct bucket))
2142 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
2143 (u64) htab->elem_size * htab->map.max_entries;
2145 if (cost >= U32_MAX - PAGE_SIZE)
2148 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
2149 err = bpf_map_precharge_memlock(htab->map.pages);
2154 htab->buckets = bpf_map_area_alloc(
2155 htab->n_buckets * sizeof(struct bucket),
2156 htab->map.numa_node);
2160 for (i = 0; i < htab->n_buckets; i++) {
2161 INIT_HLIST_HEAD(&htab->buckets[i].head);
2162 raw_spin_lock_init(&htab->buckets[i].lock);
2168 return ERR_PTR(err);
2171 static void __bpf_htab_free(struct rcu_head *rcu)
2173 struct bpf_htab *htab;
2175 htab = container_of(rcu, struct bpf_htab, rcu);
2176 bpf_map_area_free(htab->buckets);
2180 static void sock_hash_free(struct bpf_map *map)
2182 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2187 /* At this point no update, lookup or delete operations can happen.
2188 * However, be aware we can still get a socket state event updates,
2189 * and data ready callabacks that reference the psock from sk_user_data
2190 * Also psock worker threads are still in-flight. So smap_release_sock
2191 * will only free the psock after cancel_sync on the worker threads
2192 * and a grace period expire to ensure psock is really safe to remove.
2195 for (i = 0; i < htab->n_buckets; i++) {
2196 struct bucket *b = __select_bucket(htab, i);
2197 struct hlist_head *head;
2198 struct hlist_node *n;
2199 struct htab_elem *l;
2201 raw_spin_lock_bh(&b->lock);
2203 hlist_for_each_entry_safe(l, n, head, hash_node) {
2204 struct sock *sock = l->sk;
2205 struct smap_psock *psock;
2207 hlist_del_rcu(&l->hash_node);
2208 psock = smap_psock_sk(sock);
2209 /* This check handles a racing sock event that can get
2210 * the sk_callback_lock before this case but after xchg
2211 * causing the refcnt to hit zero and sock user data
2212 * (psock) to be null and queued for garbage collection.
2214 if (likely(psock)) {
2215 smap_list_hash_remove(psock, l);
2216 smap_release_sock(psock, sock);
2218 free_htab_elem(htab, l);
2220 raw_spin_unlock_bh(&b->lock);
2223 call_rcu(&htab->rcu, __bpf_htab_free);
2226 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
2227 void *key, u32 key_size, u32 hash,
2229 struct htab_elem *old_elem)
2231 struct htab_elem *l_new;
2233 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
2235 atomic_dec(&htab->count);
2236 return ERR_PTR(-E2BIG);
2239 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
2240 htab->map.numa_node);
2242 return ERR_PTR(-ENOMEM);
2244 memcpy(l_new->key, key, key_size);
2250 static inline u32 htab_map_hash(const void *key, u32 key_len)
2252 return jhash(key, key_len, 0);
2255 static int sock_hash_get_next_key(struct bpf_map *map,
2256 void *key, void *next_key)
2258 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2259 struct htab_elem *l, *next_l;
2260 struct hlist_head *h;
2264 WARN_ON_ONCE(!rcu_read_lock_held());
2266 key_size = map->key_size;
2268 goto find_first_elem;
2269 hash = htab_map_hash(key, key_size);
2270 h = select_bucket(htab, hash);
2272 l = lookup_elem_raw(h, hash, key, key_size);
2274 goto find_first_elem;
2275 next_l = hlist_entry_safe(
2276 rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
2277 struct htab_elem, hash_node);
2279 memcpy(next_key, next_l->key, key_size);
2283 /* no more elements in this hash list, go to the next bucket */
2284 i = hash & (htab->n_buckets - 1);
2288 /* iterate over buckets */
2289 for (; i < htab->n_buckets; i++) {
2290 h = select_bucket(htab, i);
2292 /* pick first element in the bucket */
2293 next_l = hlist_entry_safe(
2294 rcu_dereference_raw(hlist_first_rcu(h)),
2295 struct htab_elem, hash_node);
2297 /* if it's not empty, just return it */
2298 memcpy(next_key, next_l->key, key_size);
2303 /* iterated over all buckets and all elements */
2307 static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
2308 struct bpf_map *map,
2309 void *key, u64 map_flags)
2311 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2312 struct bpf_sock_progs *progs = &htab->progs;
2313 struct htab_elem *l_new = NULL, *l_old;
2314 struct smap_psock_map_entry *e = NULL;
2315 struct hlist_head *head;
2316 struct smap_psock *psock;
2324 if (sock->sk_type != SOCK_STREAM ||
2325 sock->sk_protocol != IPPROTO_TCP)
2328 if (unlikely(map_flags > BPF_EXIST))
2331 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
2335 WARN_ON_ONCE(!rcu_read_lock_held());
2336 key_size = map->key_size;
2337 hash = htab_map_hash(key, key_size);
2338 b = __select_bucket(htab, hash);
2341 err = __sock_map_ctx_update_elem(map, progs, sock, NULL, key);
2345 /* bpf_map_update_elem() can be called in_irq() */
2346 raw_spin_lock_bh(&b->lock);
2347 l_old = lookup_elem_raw(head, hash, key, key_size);
2348 if (l_old && map_flags == BPF_NOEXIST) {
2352 if (!l_old && map_flags == BPF_EXIST) {
2357 l_new = alloc_sock_hash_elem(htab, key, key_size, hash, sock, l_old);
2358 if (IS_ERR(l_new)) {
2359 err = PTR_ERR(l_new);
2363 psock = smap_psock_sk(sock);
2364 if (unlikely(!psock)) {
2369 rcu_assign_pointer(e->hash_link, l_new);
2370 rcu_assign_pointer(e->htab,
2371 container_of(map, struct bpf_htab, map));
2372 spin_lock_bh(&psock->maps_lock);
2373 list_add_tail(&e->list, &psock->maps);
2374 spin_unlock_bh(&psock->maps_lock);
2376 /* add new element to the head of the list, so that
2377 * concurrent search will find it before old elem
2379 hlist_add_head_rcu(&l_new->hash_node, head);
2381 psock = smap_psock_sk(l_old->sk);
2383 hlist_del_rcu(&l_old->hash_node);
2384 smap_list_hash_remove(psock, l_old);
2385 smap_release_sock(psock, l_old->sk);
2386 free_htab_elem(htab, l_old);
2388 raw_spin_unlock_bh(&b->lock);
2391 raw_spin_unlock_bh(&b->lock);
2394 psock = smap_psock_sk(sock);
2396 smap_release_sock(psock, sock);
2400 static int sock_hash_update_elem(struct bpf_map *map,
2401 void *key, void *value, u64 flags)
2403 struct bpf_sock_ops_kern skops;
2404 u32 fd = *(u32 *)value;
2405 struct socket *socket;
2408 socket = sockfd_lookup(fd, &err);
2412 skops.sk = socket->sk;
2418 err = sock_hash_ctx_update_elem(&skops, map, key, flags);
2423 static int sock_hash_delete_elem(struct bpf_map *map, void *key)
2425 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2426 struct hlist_head *head;
2428 struct htab_elem *l;
2432 key_size = map->key_size;
2433 hash = htab_map_hash(key, key_size);
2434 b = __select_bucket(htab, hash);
2437 raw_spin_lock_bh(&b->lock);
2438 l = lookup_elem_raw(head, hash, key, key_size);
2440 struct sock *sock = l->sk;
2441 struct smap_psock *psock;
2443 hlist_del_rcu(&l->hash_node);
2444 psock = smap_psock_sk(sock);
2445 /* This check handles a racing sock event that can get the
2446 * sk_callback_lock before this case but after xchg happens
2447 * causing the refcnt to hit zero and sock user data (psock)
2448 * to be null and queued for garbage collection.
2450 if (likely(psock)) {
2451 smap_list_hash_remove(psock, l);
2452 smap_release_sock(psock, sock);
2454 free_htab_elem(htab, l);
2457 raw_spin_unlock_bh(&b->lock);
2461 struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
2463 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2464 struct hlist_head *head;
2465 struct htab_elem *l;
2470 key_size = map->key_size;
2471 hash = htab_map_hash(key, key_size);
2472 b = __select_bucket(htab, hash);
2475 raw_spin_lock_bh(&b->lock);
2476 l = lookup_elem_raw(head, hash, key, key_size);
2477 sk = l ? l->sk : NULL;
2478 raw_spin_unlock_bh(&b->lock);
2482 const struct bpf_map_ops sock_map_ops = {
2483 .map_alloc = sock_map_alloc,
2484 .map_free = sock_map_free,
2485 .map_lookup_elem = sock_map_lookup,
2486 .map_get_next_key = sock_map_get_next_key,
2487 .map_update_elem = sock_map_update_elem,
2488 .map_delete_elem = sock_map_delete_elem,
2489 .map_release_uref = sock_map_release,
2492 const struct bpf_map_ops sock_hash_ops = {
2493 .map_alloc = sock_hash_alloc,
2494 .map_free = sock_hash_free,
2495 .map_lookup_elem = sock_map_lookup,
2496 .map_get_next_key = sock_hash_get_next_key,
2497 .map_update_elem = sock_hash_update_elem,
2498 .map_delete_elem = sock_hash_delete_elem,
2499 .map_release_uref = sock_map_release,
2502 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
2503 struct bpf_map *, map, void *, key, u64, flags)
2505 WARN_ON_ONCE(!rcu_read_lock_held());
2506 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
2509 const struct bpf_func_proto bpf_sock_map_update_proto = {
2510 .func = bpf_sock_map_update,
2513 .ret_type = RET_INTEGER,
2514 .arg1_type = ARG_PTR_TO_CTX,
2515 .arg2_type = ARG_CONST_MAP_PTR,
2516 .arg3_type = ARG_PTR_TO_MAP_KEY,
2517 .arg4_type = ARG_ANYTHING,
2520 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
2521 struct bpf_map *, map, void *, key, u64, flags)
2523 WARN_ON_ONCE(!rcu_read_lock_held());
2524 return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
2527 const struct bpf_func_proto bpf_sock_hash_update_proto = {
2528 .func = bpf_sock_hash_update,
2531 .ret_type = RET_INTEGER,
2532 .arg1_type = ARG_PTR_TO_CTX,
2533 .arg2_type = ARG_CONST_MAP_PTR,
2534 .arg3_type = ARG_PTR_TO_MAP_KEY,
2535 .arg4_type = ARG_ANYTHING,