1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
31 #include <linux/bpf.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
42 #include <net/strparser.h>
44 #include <linux/ptr_ring.h>
45 #include <net/inet_common.h>
46 #include <linux/sched/signal.h>
48 #define SOCK_CREATE_FLAG_MASK \
49 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
51 struct bpf_sock_progs {
52 struct bpf_prog *bpf_tx_msg;
53 struct bpf_prog *bpf_parse;
54 struct bpf_prog *bpf_verdict;
59 struct sock **sock_map;
60 struct bpf_sock_progs progs;
65 struct hlist_head head;
71 struct bucket *buckets;
75 struct bpf_sock_progs progs;
81 struct hlist_node hash_node;
87 enum smap_psock_state {
91 struct smap_psock_map_entry {
92 struct list_head list;
95 struct htab_elem __rcu *hash_link;
102 /* datapath variables */
103 struct sk_buff_head rxqueue;
106 /* datapath error path cache across tx work invocations */
109 struct sk_buff *save_skb;
111 /* datapath variables for tx_msg ULP */
112 struct sock *sk_redir;
117 struct sk_msg_buff *cork;
118 struct list_head ingress;
120 struct strparser strp;
121 struct bpf_prog *bpf_tx_msg;
122 struct bpf_prog *bpf_parse;
123 struct bpf_prog *bpf_verdict;
124 struct list_head maps;
125 spinlock_t maps_lock;
127 /* Back reference used when sock callback trigger sockmap operations */
131 struct work_struct tx_work;
132 struct work_struct gc_work;
134 struct proto *sk_proto;
135 void (*save_close)(struct sock *sk, long timeout);
136 void (*save_data_ready)(struct sock *sk);
137 void (*save_write_space)(struct sock *sk);
140 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
141 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
142 int nonblock, int flags, int *addr_len);
143 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
144 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
145 int offset, size_t size, int flags);
146 static void bpf_tcp_close(struct sock *sk, long timeout);
148 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
150 return rcu_dereference_sk_user_data(sk);
153 static bool bpf_tcp_stream_read(const struct sock *sk)
155 struct smap_psock *psock;
159 psock = smap_psock_sk(sk);
160 if (unlikely(!psock))
162 empty = list_empty(&psock->ingress);
180 static struct proto *saved_tcpv6_prot __read_mostly;
181 static DEFINE_SPINLOCK(tcpv6_prot_lock);
182 static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
183 static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
186 prot[SOCKMAP_BASE] = *base;
187 prot[SOCKMAP_BASE].close = bpf_tcp_close;
188 prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
189 prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
191 prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
192 prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
193 prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
196 static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
198 int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
199 int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
201 sk->sk_prot = &bpf_tcp_prots[family][conf];
204 static int bpf_tcp_init(struct sock *sk)
206 struct smap_psock *psock;
209 psock = smap_psock_sk(sk);
210 if (unlikely(!psock)) {
215 if (unlikely(psock->sk_proto)) {
220 psock->save_close = sk->sk_prot->close;
221 psock->sk_proto = sk->sk_prot;
223 /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
224 if (sk->sk_family == AF_INET6 &&
225 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
226 spin_lock_bh(&tcpv6_prot_lock);
227 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
228 build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
229 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
231 spin_unlock_bh(&tcpv6_prot_lock);
233 update_sk_prot(sk, psock);
238 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
239 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
241 static void bpf_tcp_release(struct sock *sk)
243 struct smap_psock *psock;
246 psock = smap_psock_sk(sk);
247 if (unlikely(!psock))
251 free_start_sg(psock->sock, psock->cork, true);
256 if (psock->sk_proto) {
257 sk->sk_prot = psock->sk_proto;
258 psock->sk_proto = NULL;
264 static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
265 u32 hash, void *key, u32 key_size)
269 hlist_for_each_entry_rcu(l, head, hash_node) {
270 if (l->hash == hash && !memcmp(&l->key, key, key_size))
277 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
279 return &htab->buckets[hash & (htab->n_buckets - 1)];
282 static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
284 return &__select_bucket(htab, hash)->head;
287 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
289 atomic_dec(&htab->count);
293 static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
294 struct smap_psock *psock)
296 struct smap_psock_map_entry *e;
298 spin_lock_bh(&psock->maps_lock);
299 e = list_first_entry_or_null(&psock->maps,
300 struct smap_psock_map_entry,
304 spin_unlock_bh(&psock->maps_lock);
308 static void bpf_tcp_close(struct sock *sk, long timeout)
310 void (*close_fun)(struct sock *sk, long timeout);
311 struct smap_psock_map_entry *e;
312 struct sk_msg_buff *md, *mtmp;
313 struct smap_psock *psock;
318 psock = smap_psock_sk(sk);
319 if (unlikely(!psock)) {
322 return sk->sk_prot->close(sk, timeout);
325 /* The psock may be destroyed anytime after exiting the RCU critial
326 * section so by the time we use close_fun the psock may no longer
327 * be valid. However, bpf_tcp_close is called with the sock lock
328 * held so the close hook and sk are still valid.
330 close_fun = psock->save_close;
333 free_start_sg(psock->sock, psock->cork, true);
338 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
340 free_start_sg(psock->sock, md, true);
344 e = psock_map_pop(sk, psock);
347 struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map);
349 raw_spin_lock_bh(&stab->lock);
353 smap_release_sock(psock, sk);
355 raw_spin_unlock_bh(&stab->lock);
357 struct htab_elem *link = rcu_dereference(e->hash_link);
358 struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map);
359 struct hlist_head *head;
363 b = __select_bucket(htab, link->hash);
365 raw_spin_lock_bh(&b->lock);
366 l = lookup_elem_raw(head,
367 link->hash, link->key,
369 /* If another thread deleted this object skip deletion.
370 * The refcnt on psock may or may not be zero.
372 if (l && l == link) {
373 hlist_del_rcu(&link->hash_node);
374 smap_release_sock(psock, link->sk);
375 free_htab_elem(htab, link);
377 raw_spin_unlock_bh(&b->lock);
380 e = psock_map_pop(sk, psock);
384 close_fun(sk, timeout);
394 static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
397 .user_visible = false,
399 .init = bpf_tcp_init,
400 .release = bpf_tcp_release,
403 static int memcopy_from_iter(struct sock *sk,
404 struct sk_msg_buff *md,
405 struct iov_iter *from, int bytes)
407 struct scatterlist *sg = md->sg_data;
408 int i = md->sg_curr, rc = -ENOSPC;
414 if (md->sg_copybreak >= sg[i].length) {
415 md->sg_copybreak = 0;
417 if (++i == MAX_SKB_FRAGS)
424 copy = sg[i].length - md->sg_copybreak;
425 to = sg_virt(&sg[i]) + md->sg_copybreak;
426 md->sg_copybreak += copy;
428 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
429 rc = copy_from_iter_nocache(to, copy, from);
431 rc = copy_from_iter(to, copy, from);
442 md->sg_copybreak = 0;
443 if (++i == MAX_SKB_FRAGS)
445 } while (i != md->sg_end);
451 static int bpf_tcp_push(struct sock *sk, int apply_bytes,
452 struct sk_msg_buff *md,
453 int flags, bool uncharge)
455 bool apply = apply_bytes;
456 struct scatterlist *sg;
462 sg = md->sg_data + md->sg_start;
463 size = (apply && apply_bytes < sg->length) ?
464 apply_bytes : sg->length;
467 tcp_rate_check_app_limited(sk);
470 ret = do_tcp_sendpages(sk, p, offset, size, flags);
481 sk_mem_uncharge(sk, ret);
493 sk_mem_uncharge(sk, ret);
498 if (md->sg_start == MAX_SKB_FRAGS)
500 sg_init_table(sg, 1);
502 if (md->sg_start == md->sg_end)
506 if (apply && !apply_bytes)
512 static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
514 struct scatterlist *sg = md->sg_data + md->sg_start;
516 if (md->sg_copy[md->sg_start]) {
517 md->data = md->data_end = 0;
519 md->data = sg_virt(sg);
520 md->data_end = md->data + sg->length;
524 static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
526 struct scatterlist *sg = md->sg_data;
527 int i = md->sg_start;
530 int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
532 sk_mem_uncharge(sk, uncharge);
537 if (i == MAX_SKB_FRAGS)
539 } while (i != md->sg_end);
542 static void free_bytes_sg(struct sock *sk, int bytes,
543 struct sk_msg_buff *md, bool charge)
545 struct scatterlist *sg = md->sg_data;
546 int i = md->sg_start, free;
548 while (bytes && sg[i].length) {
551 sg[i].length -= bytes;
552 sg[i].offset += bytes;
554 sk_mem_uncharge(sk, bytes);
559 sk_mem_uncharge(sk, sg[i].length);
560 put_page(sg_page(&sg[i]));
561 bytes -= sg[i].length;
567 if (i == MAX_SKB_FRAGS)
573 static int free_sg(struct sock *sk, int start,
574 struct sk_msg_buff *md, bool charge)
576 struct scatterlist *sg = md->sg_data;
577 int i = start, free = 0;
579 while (sg[i].length) {
580 free += sg[i].length;
582 sk_mem_uncharge(sk, sg[i].length);
584 put_page(sg_page(&sg[i]));
590 if (i == MAX_SKB_FRAGS)
594 consume_skb(md->skb);
599 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
601 int free = free_sg(sk, md->sg_start, md, charge);
603 md->sg_start = md->sg_end;
607 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
609 return free_sg(sk, md->sg_curr, md, true);
612 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
614 return ((_rc == SK_PASS) ?
615 (md->sk_redir ? __SK_REDIRECT : __SK_PASS) :
619 static unsigned int smap_do_tx_msg(struct sock *sk,
620 struct smap_psock *psock,
621 struct sk_msg_buff *md)
623 struct bpf_prog *prog;
624 unsigned int rc, _rc;
629 /* If the policy was removed mid-send then default to 'accept' */
630 prog = READ_ONCE(psock->bpf_tx_msg);
631 if (unlikely(!prog)) {
636 bpf_compute_data_pointers_sg(md);
638 rc = (*prog->bpf_func)(md, prog->insnsi);
639 psock->apply_bytes = md->apply_bytes;
641 /* Moving return codes from UAPI namespace into internal namespace */
642 _rc = bpf_map_msg_verdict(rc, md);
644 /* The psock has a refcount on the sock but not on the map and because
645 * we need to drop rcu read lock here its possible the map could be
646 * removed between here and when we need it to execute the sock
647 * redirect. So do the map lookup now for future use.
649 if (_rc == __SK_REDIRECT) {
651 sock_put(psock->sk_redir);
652 psock->sk_redir = do_msg_redirect_map(md);
653 if (!psock->sk_redir) {
657 sock_hold(psock->sk_redir);
666 static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
667 struct smap_psock *psock,
668 struct sk_msg_buff *md, int flags)
670 bool apply = apply_bytes;
671 size_t size, copied = 0;
672 struct sk_msg_buff *r;
675 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
680 r->sg_start = md->sg_start;
684 size = (apply && apply_bytes < md->sg_data[i].length) ?
685 apply_bytes : md->sg_data[i].length;
687 if (!sk_wmem_schedule(sk, size)) {
693 sk_mem_charge(sk, size);
694 r->sg_data[i] = md->sg_data[i];
695 r->sg_data[i].length = size;
696 md->sg_data[i].length -= size;
697 md->sg_data[i].offset += size;
700 if (md->sg_data[i].length) {
701 get_page(sg_page(&r->sg_data[i]));
702 r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
705 if (i == MAX_SKB_FRAGS)
715 } while (i != md->sg_end);
720 list_add_tail(&r->list, &psock->ingress);
721 sk->sk_data_ready(sk);
723 free_start_sg(sk, r, true);
731 static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
732 struct sk_msg_buff *md,
735 bool ingress = !!(md->flags & BPF_F_INGRESS);
736 struct smap_psock *psock;
740 psock = smap_psock_sk(sk);
741 if (unlikely(!psock))
744 if (!refcount_inc_not_zero(&psock->refcnt))
750 err = bpf_tcp_ingress(sk, send, psock, md, flags);
753 err = bpf_tcp_push(sk, send, md, flags, false);
756 smap_release_sock(psock, sk);
763 static inline void bpf_md_init(struct smap_psock *psock)
765 if (!psock->apply_bytes) {
766 psock->eval = __SK_NONE;
767 if (psock->sk_redir) {
768 sock_put(psock->sk_redir);
769 psock->sk_redir = NULL;
774 static void apply_bytes_dec(struct smap_psock *psock, int i)
776 if (psock->apply_bytes) {
777 if (psock->apply_bytes < i)
778 psock->apply_bytes = 0;
780 psock->apply_bytes -= i;
784 static int bpf_exec_tx_verdict(struct smap_psock *psock,
785 struct sk_msg_buff *m,
787 int *copied, int flags)
789 bool cork = false, enospc = (m->sg_start == m->sg_end);
795 if (psock->eval == __SK_NONE)
796 psock->eval = smap_do_tx_msg(sk, psock, m);
799 m->cork_bytes > psock->sg_size && !enospc) {
800 psock->cork_bytes = m->cork_bytes - psock->sg_size;
802 psock->cork = kcalloc(1,
803 sizeof(struct sk_msg_buff),
804 GFP_ATOMIC | __GFP_NOWARN);
811 memcpy(psock->cork, m, sizeof(*m));
815 send = psock->sg_size;
816 if (psock->apply_bytes && psock->apply_bytes < send)
817 send = psock->apply_bytes;
819 switch (psock->eval) {
821 err = bpf_tcp_push(sk, send, m, flags, true);
823 *copied -= free_start_sg(sk, m, true);
827 apply_bytes_dec(psock, send);
828 psock->sg_size -= send;
831 redir = psock->sk_redir;
832 apply_bytes_dec(psock, send);
839 return_mem_sg(sk, send, m);
842 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
845 if (unlikely(err < 0)) {
846 int free = free_start_sg(sk, m, false);
852 psock->sg_size -= send;
856 free_start_sg(sk, m, true);
865 free_bytes_sg(sk, send, m, true);
866 apply_bytes_dec(psock, send);
868 psock->sg_size -= send;
876 m->sg_data[m->sg_start].page_link &&
877 m->sg_data[m->sg_start].length)
885 static int bpf_wait_data(struct sock *sk,
886 struct smap_psock *psk, int flags,
887 long timeo, int *err)
891 DEFINE_WAIT_FUNC(wait, woken_wake_function);
893 add_wait_queue(sk_sleep(sk), &wait);
894 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
895 rc = sk_wait_event(sk, &timeo,
896 !list_empty(&psk->ingress) ||
897 !skb_queue_empty(&sk->sk_receive_queue),
899 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
900 remove_wait_queue(sk_sleep(sk), &wait);
905 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
906 int nonblock, int flags, int *addr_len)
908 struct iov_iter *iter = &msg->msg_iter;
909 struct smap_psock *psock;
912 if (unlikely(flags & MSG_ERRQUEUE))
913 return inet_recv_error(sk, msg, len, addr_len);
914 if (!skb_queue_empty(&sk->sk_receive_queue))
915 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
918 psock = smap_psock_sk(sk);
919 if (unlikely(!psock))
922 if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
928 while (copied != len) {
929 struct scatterlist *sg;
930 struct sk_msg_buff *md;
933 md = list_first_entry_or_null(&psock->ingress,
934 struct sk_msg_buff, list);
942 sg = &md->sg_data[i];
946 if (copied + copy > len)
949 n = copy_page_to_iter(page, sg->offset, copy, iter);
953 smap_release_sock(psock, sk);
960 sk_mem_uncharge(sk, copy);
964 if (i == MAX_SKB_FRAGS)
971 } while (i != md->sg_end);
974 if (!sg->length && md->sg_start == md->sg_end) {
977 consume_skb(md->skb);
987 timeo = sock_rcvtimeo(sk, nonblock);
988 data = bpf_wait_data(sk, psock, flags, timeo, &err);
991 if (!skb_queue_empty(&sk->sk_receive_queue)) {
993 smap_release_sock(psock, sk);
994 copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
1005 smap_release_sock(psock, sk);
1009 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
1013 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1015 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
1016 struct sk_msg_buff md = {0};
1017 unsigned int sg_copy = 0;
1018 struct smap_psock *psock;
1019 int copied = 0, err = 0;
1020 struct scatterlist *sg;
1023 /* Its possible a sock event or user removed the psock _but_ the ops
1024 * have not been reprogrammed yet so we get here. In this case fallback
1025 * to tcp_sendmsg. Note this only works because we _only_ ever allow
1026 * a single ULP there is no hierarchy here.
1029 psock = smap_psock_sk(sk);
1030 if (unlikely(!psock)) {
1032 return tcp_sendmsg(sk, msg, size);
1035 /* Increment the psock refcnt to ensure its not released while sending a
1036 * message. Required because sk lookup and bpf programs are used in
1037 * separate rcu critical sections. Its OK if we lose the map entry
1038 * but we can't lose the sock reference.
1040 if (!refcount_inc_not_zero(&psock->refcnt)) {
1042 return tcp_sendmsg(sk, msg, size);
1046 sg_init_marker(sg, MAX_SKB_FRAGS);
1050 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1052 while (msg_data_left(msg)) {
1053 struct sk_msg_buff *m = NULL;
1054 bool enospc = false;
1062 copy = msg_data_left(msg);
1063 if (!sk_stream_memory_free(sk))
1064 goto wait_for_sndbuf;
1066 m = psock->cork_bytes ? psock->cork : &md;
1067 m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
1068 err = sk_alloc_sg(sk, copy, m->sg_data,
1069 m->sg_start, &m->sg_end, &sg_copy,
1073 goto wait_for_memory;
1078 err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
1080 free_curr_sg(sk, m);
1084 psock->sg_size += copy;
1088 /* When bytes are being corked skip running BPF program and
1089 * applying verdict unless there is no more buffer space. In
1090 * the ENOSPC case simply run BPF prorgram with currently
1091 * accumulated data. We don't have much choice at this point
1092 * we could try extending the page frags or chaining complex
1093 * frags but even in these cases _eventually_ we will hit an
1094 * OOM scenario. More complex recovery schemes may be
1095 * implemented in the future, but BPF programs must handle
1096 * the case where apply_cork requests are not honored. The
1097 * canonical method to verify this is to check data length.
1099 if (psock->cork_bytes) {
1100 if (copy > psock->cork_bytes)
1101 psock->cork_bytes = 0;
1103 psock->cork_bytes -= copy;
1105 if (psock->cork_bytes && !enospc)
1108 /* All cork bytes accounted for re-run filter */
1109 psock->eval = __SK_NONE;
1110 psock->cork_bytes = 0;
1113 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1114 if (unlikely(err < 0))
1118 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1120 err = sk_stream_wait_memory(sk, &timeo);
1122 if (m && m != psock->cork)
1123 free_start_sg(sk, m, true);
1129 err = sk_stream_error(sk, msg->msg_flags, err);
1132 smap_release_sock(psock, sk);
1133 return copied ? copied : err;
1136 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
1137 int offset, size_t size, int flags)
1139 struct sk_msg_buff md = {0}, *m = NULL;
1140 int err = 0, copied = 0;
1141 struct smap_psock *psock;
1142 struct scatterlist *sg;
1143 bool enospc = false;
1146 psock = smap_psock_sk(sk);
1147 if (unlikely(!psock))
1150 if (!refcount_inc_not_zero(&psock->refcnt))
1156 if (psock->cork_bytes) {
1158 sg = &m->sg_data[m->sg_end];
1162 sg_init_marker(sg, MAX_SKB_FRAGS);
1165 /* Catch case where ring is full and sendpage is stalled. */
1166 if (unlikely(m->sg_end == m->sg_start &&
1167 m->sg_data[m->sg_end].length))
1170 psock->sg_size += size;
1171 sg_set_page(sg, page, size, offset);
1173 m->sg_copy[m->sg_end] = true;
1174 sk_mem_charge(sk, size);
1178 if (m->sg_end == MAX_SKB_FRAGS)
1181 if (m->sg_end == m->sg_start)
1184 if (psock->cork_bytes) {
1185 if (size > psock->cork_bytes)
1186 psock->cork_bytes = 0;
1188 psock->cork_bytes -= size;
1190 if (psock->cork_bytes && !enospc)
1193 /* All cork bytes accounted for re-run filter */
1194 psock->eval = __SK_NONE;
1195 psock->cork_bytes = 0;
1198 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1201 smap_release_sock(psock, sk);
1202 return copied ? copied : err;
1205 return tcp_sendpage(sk, page, offset, size, flags);
1208 static void bpf_tcp_msg_add(struct smap_psock *psock,
1210 struct bpf_prog *tx_msg)
1212 struct bpf_prog *orig_tx_msg;
1214 orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
1216 bpf_prog_put(orig_tx_msg);
1219 static int bpf_tcp_ulp_register(void)
1221 build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
1222 /* Once BPF TX ULP is registered it is never unregistered. It
1223 * will be in the ULP list for the lifetime of the system. Doing
1224 * duplicate registers is not a problem.
1226 return tcp_register_ulp(&bpf_tcp_ulp_ops);
1229 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1231 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
1234 if (unlikely(!prog))
1238 /* We need to ensure that BPF metadata for maps is also cleared
1239 * when we orphan the skb so that we don't have the possibility
1240 * to reference a stale map.
1242 TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
1243 skb->sk = psock->sock;
1244 bpf_compute_data_end_sk_skb(skb);
1246 rc = (*prog->bpf_func)(skb, prog->insnsi);
1250 /* Moving return codes from UAPI namespace into internal namespace */
1251 return rc == SK_PASS ?
1252 (TCP_SKB_CB(skb)->bpf.sk_redir ? __SK_REDIRECT : __SK_PASS) :
1256 static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
1258 struct sock *sk = psock->sock;
1259 int copied = 0, num_sg;
1260 struct sk_msg_buff *r;
1262 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
1266 if (!sk_rmem_schedule(sk, skb, skb->len)) {
1271 sg_init_table(r->sg_data, MAX_SKB_FRAGS);
1272 num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
1273 if (unlikely(num_sg < 0)) {
1277 sk_mem_charge(sk, skb->len);
1280 r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
1282 list_add_tail(&r->list, &psock->ingress);
1283 sk->sk_data_ready(sk);
1287 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
1289 struct smap_psock *peer;
1294 rc = smap_verdict_func(psock, skb);
1297 sk = do_sk_redirect_map(skb);
1303 peer = smap_psock_sk(sk);
1304 in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1306 if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
1307 !test_bit(SMAP_TX_RUNNING, &peer->state))) {
1312 if (!in && sock_writeable(sk)) {
1313 skb_set_owner_w(skb, sk);
1314 skb_queue_tail(&peer->rxqueue, skb);
1315 schedule_work(&peer->tx_work);
1318 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
1319 skb_queue_tail(&peer->rxqueue, skb);
1320 schedule_work(&peer->tx_work);
1323 /* Fall through and free skb otherwise */
1330 static void smap_report_sk_error(struct smap_psock *psock, int err)
1332 struct sock *sk = psock->sock;
1335 sk->sk_error_report(sk);
1338 static void smap_read_sock_strparser(struct strparser *strp,
1339 struct sk_buff *skb)
1341 struct smap_psock *psock;
1344 psock = container_of(strp, struct smap_psock, strp);
1345 smap_do_verdict(psock, skb);
1349 /* Called with lock held on socket */
1350 static void smap_data_ready(struct sock *sk)
1352 struct smap_psock *psock;
1355 psock = smap_psock_sk(sk);
1356 if (likely(psock)) {
1357 write_lock_bh(&sk->sk_callback_lock);
1358 strp_data_ready(&psock->strp);
1359 write_unlock_bh(&sk->sk_callback_lock);
1364 static void smap_tx_work(struct work_struct *w)
1366 struct smap_psock *psock;
1367 struct sk_buff *skb;
1370 psock = container_of(w, struct smap_psock, tx_work);
1372 /* lock sock to avoid losing sk_socket at some point during loop */
1373 lock_sock(psock->sock);
1374 if (psock->save_skb) {
1375 skb = psock->save_skb;
1376 rem = psock->save_rem;
1377 off = psock->save_off;
1378 psock->save_skb = NULL;
1382 while ((skb = skb_dequeue(&psock->rxqueue))) {
1388 flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1390 if (likely(psock->sock->sk_socket)) {
1392 n = smap_do_ingress(psock, skb);
1394 n = skb_send_sock_locked(psock->sock,
1402 /* Retry when space is available */
1403 psock->save_skb = skb;
1404 psock->save_rem = rem;
1405 psock->save_off = off;
1408 /* Hard errors break pipe and stop xmit */
1409 smap_report_sk_error(psock, n ? -n : EPIPE);
1410 clear_bit(SMAP_TX_RUNNING, &psock->state);
1422 release_sock(psock->sock);
1425 static void smap_write_space(struct sock *sk)
1427 struct smap_psock *psock;
1428 void (*write_space)(struct sock *sk);
1431 psock = smap_psock_sk(sk);
1432 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1433 schedule_work(&psock->tx_work);
1434 write_space = psock->save_write_space;
1439 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
1441 if (!psock->strp_enabled)
1443 sk->sk_data_ready = psock->save_data_ready;
1444 sk->sk_write_space = psock->save_write_space;
1445 psock->save_data_ready = NULL;
1446 psock->save_write_space = NULL;
1447 strp_stop(&psock->strp);
1448 psock->strp_enabled = false;
1451 static void smap_destroy_psock(struct rcu_head *rcu)
1453 struct smap_psock *psock = container_of(rcu,
1454 struct smap_psock, rcu);
1456 /* Now that a grace period has passed there is no longer
1457 * any reference to this sock in the sockmap so we can
1458 * destroy the psock, strparser, and bpf programs. But,
1459 * because we use workqueue sync operations we can not
1460 * do it in rcu context
1462 schedule_work(&psock->gc_work);
1465 static bool psock_is_smap_sk(struct sock *sk)
1467 return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
1470 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
1472 if (refcount_dec_and_test(&psock->refcnt)) {
1473 if (psock_is_smap_sk(sock))
1474 tcp_cleanup_ulp(sock);
1475 write_lock_bh(&sock->sk_callback_lock);
1476 smap_stop_sock(psock, sock);
1477 write_unlock_bh(&sock->sk_callback_lock);
1478 clear_bit(SMAP_TX_RUNNING, &psock->state);
1479 rcu_assign_sk_user_data(sock, NULL);
1480 call_rcu_sched(&psock->rcu, smap_destroy_psock);
1484 static int smap_parse_func_strparser(struct strparser *strp,
1485 struct sk_buff *skb)
1487 struct smap_psock *psock;
1488 struct bpf_prog *prog;
1492 psock = container_of(strp, struct smap_psock, strp);
1493 prog = READ_ONCE(psock->bpf_parse);
1495 if (unlikely(!prog)) {
1500 /* Attach socket for bpf program to use if needed we can do this
1501 * because strparser clones the skb before handing it to a upper
1502 * layer, meaning skb_orphan has been called. We NULL sk on the
1503 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
1504 * later and because we are not charging the memory of this skb to
1507 skb->sk = psock->sock;
1508 bpf_compute_data_end_sk_skb(skb);
1509 rc = (*prog->bpf_func)(skb, prog->insnsi);
1515 static int smap_read_sock_done(struct strparser *strp, int err)
1520 static int smap_init_sock(struct smap_psock *psock,
1523 static const struct strp_callbacks cb = {
1524 .rcv_msg = smap_read_sock_strparser,
1525 .parse_msg = smap_parse_func_strparser,
1526 .read_sock_done = smap_read_sock_done,
1529 return strp_init(&psock->strp, sk, &cb);
1532 static void smap_init_progs(struct smap_psock *psock,
1533 struct bpf_prog *verdict,
1534 struct bpf_prog *parse)
1536 struct bpf_prog *orig_parse, *orig_verdict;
1538 orig_parse = xchg(&psock->bpf_parse, parse);
1539 orig_verdict = xchg(&psock->bpf_verdict, verdict);
1542 bpf_prog_put(orig_verdict);
1544 bpf_prog_put(orig_parse);
1547 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
1549 if (sk->sk_data_ready == smap_data_ready)
1551 psock->save_data_ready = sk->sk_data_ready;
1552 psock->save_write_space = sk->sk_write_space;
1553 sk->sk_data_ready = smap_data_ready;
1554 sk->sk_write_space = smap_write_space;
1555 psock->strp_enabled = true;
1558 static void sock_map_remove_complete(struct bpf_stab *stab)
1560 bpf_map_area_free(stab->sock_map);
1564 static void smap_gc_work(struct work_struct *w)
1566 struct smap_psock_map_entry *e, *tmp;
1567 struct sk_msg_buff *md, *mtmp;
1568 struct smap_psock *psock;
1570 psock = container_of(w, struct smap_psock, gc_work);
1572 /* no callback lock needed because we already detached sockmap ops */
1573 if (psock->strp_enabled)
1574 strp_done(&psock->strp);
1576 cancel_work_sync(&psock->tx_work);
1577 __skb_queue_purge(&psock->rxqueue);
1579 /* At this point all strparser and xmit work must be complete */
1580 if (psock->bpf_parse)
1581 bpf_prog_put(psock->bpf_parse);
1582 if (psock->bpf_verdict)
1583 bpf_prog_put(psock->bpf_verdict);
1584 if (psock->bpf_tx_msg)
1585 bpf_prog_put(psock->bpf_tx_msg);
1588 free_start_sg(psock->sock, psock->cork, true);
1592 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1593 list_del(&md->list);
1594 free_start_sg(psock->sock, md, true);
1598 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1603 if (psock->sk_redir)
1604 sock_put(psock->sk_redir);
1606 sock_put(psock->sock);
1610 static struct smap_psock *smap_init_psock(struct sock *sock, int node)
1612 struct smap_psock *psock;
1614 psock = kzalloc_node(sizeof(struct smap_psock),
1615 GFP_ATOMIC | __GFP_NOWARN,
1618 return ERR_PTR(-ENOMEM);
1620 psock->eval = __SK_NONE;
1622 skb_queue_head_init(&psock->rxqueue);
1623 INIT_WORK(&psock->tx_work, smap_tx_work);
1624 INIT_WORK(&psock->gc_work, smap_gc_work);
1625 INIT_LIST_HEAD(&psock->maps);
1626 INIT_LIST_HEAD(&psock->ingress);
1627 refcount_set(&psock->refcnt, 1);
1628 spin_lock_init(&psock->maps_lock);
1630 rcu_assign_sk_user_data(sock, psock);
1635 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1637 struct bpf_stab *stab;
1641 if (!capable(CAP_NET_ADMIN))
1642 return ERR_PTR(-EPERM);
1644 /* check sanity of attributes */
1645 if (attr->max_entries == 0 || attr->key_size != 4 ||
1646 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1647 return ERR_PTR(-EINVAL);
1649 err = bpf_tcp_ulp_register();
1650 if (err && err != -EEXIST)
1651 return ERR_PTR(err);
1653 stab = kzalloc(sizeof(*stab), GFP_USER);
1655 return ERR_PTR(-ENOMEM);
1657 bpf_map_init_from_attr(&stab->map, attr);
1658 raw_spin_lock_init(&stab->lock);
1660 /* make sure page count doesn't overflow */
1661 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
1663 if (cost >= U32_MAX - PAGE_SIZE)
1666 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1668 /* if map size is larger than memlock limit, reject it early */
1669 err = bpf_map_precharge_memlock(stab->map.pages);
1674 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
1675 sizeof(struct sock *),
1676 stab->map.numa_node);
1677 if (!stab->sock_map)
1683 return ERR_PTR(err);
1686 static void smap_list_map_remove(struct smap_psock *psock,
1687 struct sock **entry)
1689 struct smap_psock_map_entry *e, *tmp;
1691 spin_lock_bh(&psock->maps_lock);
1692 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1693 if (e->entry == entry) {
1698 spin_unlock_bh(&psock->maps_lock);
1701 static void smap_list_hash_remove(struct smap_psock *psock,
1702 struct htab_elem *hash_link)
1704 struct smap_psock_map_entry *e, *tmp;
1706 spin_lock_bh(&psock->maps_lock);
1707 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1708 struct htab_elem *c = rcu_dereference(e->hash_link);
1710 if (c == hash_link) {
1715 spin_unlock_bh(&psock->maps_lock);
1718 static void sock_map_free(struct bpf_map *map)
1720 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1725 /* At this point no update, lookup or delete operations can happen.
1726 * However, be aware we can still get a socket state event updates,
1727 * and data ready callabacks that reference the psock from sk_user_data
1728 * Also psock worker threads are still in-flight. So smap_release_sock
1729 * will only free the psock after cancel_sync on the worker threads
1730 * and a grace period expire to ensure psock is really safe to remove.
1733 raw_spin_lock_bh(&stab->lock);
1734 for (i = 0; i < stab->map.max_entries; i++) {
1735 struct smap_psock *psock;
1738 sock = stab->sock_map[i];
1741 stab->sock_map[i] = NULL;
1742 psock = smap_psock_sk(sock);
1743 /* This check handles a racing sock event that can get the
1744 * sk_callback_lock before this case but after xchg happens
1745 * causing the refcnt to hit zero and sock user data (psock)
1746 * to be null and queued for garbage collection.
1748 if (likely(psock)) {
1749 smap_list_map_remove(psock, &stab->sock_map[i]);
1750 smap_release_sock(psock, sock);
1753 raw_spin_unlock_bh(&stab->lock);
1756 sock_map_remove_complete(stab);
1759 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
1761 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1762 u32 i = key ? *(u32 *)key : U32_MAX;
1763 u32 *next = (u32 *)next_key;
1765 if (i >= stab->map.max_entries) {
1770 if (i == stab->map.max_entries - 1)
1777 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
1779 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1781 if (key >= map->max_entries)
1784 return READ_ONCE(stab->sock_map[key]);
1787 static int sock_map_delete_elem(struct bpf_map *map, void *key)
1789 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1790 struct smap_psock *psock;
1791 int k = *(u32 *)key;
1794 if (k >= map->max_entries)
1797 raw_spin_lock_bh(&stab->lock);
1798 sock = stab->sock_map[k];
1799 stab->sock_map[k] = NULL;
1800 raw_spin_unlock_bh(&stab->lock);
1804 psock = smap_psock_sk(sock);
1807 if (psock->bpf_parse) {
1808 write_lock_bh(&sock->sk_callback_lock);
1809 smap_stop_sock(psock, sock);
1810 write_unlock_bh(&sock->sk_callback_lock);
1812 smap_list_map_remove(psock, &stab->sock_map[k]);
1813 smap_release_sock(psock, sock);
1817 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
1818 * done inside rcu critical sections. This ensures on updates that the psock
1819 * will not be released via smap_release_sock() until concurrent updates/deletes
1820 * complete. All operations operate on sock_map using cmpxchg and xchg
1821 * operations to ensure we do not get stale references. Any reads into the
1822 * map must be done with READ_ONCE() because of this.
1824 * A psock is destroyed via call_rcu and after any worker threads are cancelled
1825 * and syncd so we are certain all references from the update/lookup/delete
1826 * operations as well as references in the data path are no longer in use.
1828 * Psocks may exist in multiple maps, but only a single set of parse/verdict
1829 * programs may be inherited from the maps it belongs to. A reference count
1830 * is kept with the total number of references to the psock from all maps. The
1831 * psock will not be released until this reaches zero. The psock and sock
1832 * user data data use the sk_callback_lock to protect critical data structures
1833 * from concurrent access. This allows us to avoid two updates from modifying
1834 * the user data in sock and the lock is required anyways for modifying
1835 * callbacks, we simply increase its scope slightly.
1838 * - psock must always be read inside RCU critical section
1839 * - sk_user_data must only be modified inside sk_callback_lock and read
1840 * inside RCU critical section.
1841 * - psock->maps list must only be read & modified inside sk_callback_lock
1842 * - sock_map must use READ_ONCE and (cmp)xchg operations
1843 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
1846 static int __sock_map_ctx_update_elem(struct bpf_map *map,
1847 struct bpf_sock_progs *progs,
1851 struct bpf_prog *verdict, *parse, *tx_msg;
1852 struct smap_psock *psock;
1856 /* 1. If sock map has BPF programs those will be inherited by the
1857 * sock being added. If the sock is already attached to BPF programs
1858 * this results in an error.
1860 verdict = READ_ONCE(progs->bpf_verdict);
1861 parse = READ_ONCE(progs->bpf_parse);
1862 tx_msg = READ_ONCE(progs->bpf_tx_msg);
1864 if (parse && verdict) {
1865 /* bpf prog refcnt may be zero if a concurrent attach operation
1866 * removes the program after the above READ_ONCE() but before
1867 * we increment the refcnt. If this is the case abort with an
1870 verdict = bpf_prog_inc_not_zero(verdict);
1871 if (IS_ERR(verdict))
1872 return PTR_ERR(verdict);
1874 parse = bpf_prog_inc_not_zero(parse);
1875 if (IS_ERR(parse)) {
1876 bpf_prog_put(verdict);
1877 return PTR_ERR(parse);
1882 tx_msg = bpf_prog_inc_not_zero(tx_msg);
1883 if (IS_ERR(tx_msg)) {
1884 if (parse && verdict) {
1885 bpf_prog_put(parse);
1886 bpf_prog_put(verdict);
1888 return PTR_ERR(tx_msg);
1892 psock = smap_psock_sk(sock);
1894 /* 2. Do not allow inheriting programs if psock exists and has
1895 * already inherited programs. This would create confusion on
1896 * which parser/verdict program is running. If no psock exists
1897 * create one. Inside sk_callback_lock to ensure concurrent create
1898 * doesn't update user data.
1901 if (!psock_is_smap_sk(sock)) {
1905 if (READ_ONCE(psock->bpf_parse) && parse) {
1909 if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
1913 if (!refcount_inc_not_zero(&psock->refcnt)) {
1918 psock = smap_init_psock(sock, map->numa_node);
1919 if (IS_ERR(psock)) {
1920 err = PTR_ERR(psock);
1924 set_bit(SMAP_TX_RUNNING, &psock->state);
1928 /* 3. At this point we have a reference to a valid psock that is
1929 * running. Attach any BPF programs needed.
1932 bpf_tcp_msg_add(psock, sock, tx_msg);
1934 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
1939 if (parse && verdict && !psock->strp_enabled) {
1940 err = smap_init_sock(psock, sock);
1943 smap_init_progs(psock, verdict, parse);
1944 write_lock_bh(&sock->sk_callback_lock);
1945 smap_start_sock(psock, sock);
1946 write_unlock_bh(&sock->sk_callback_lock);
1951 smap_release_sock(psock, sock);
1953 if (parse && verdict) {
1954 bpf_prog_put(parse);
1955 bpf_prog_put(verdict);
1958 bpf_prog_put(tx_msg);
1962 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1963 struct bpf_map *map,
1964 void *key, u64 flags)
1966 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1967 struct bpf_sock_progs *progs = &stab->progs;
1968 struct sock *osock, *sock = skops->sk;
1969 struct smap_psock_map_entry *e;
1970 struct smap_psock *psock;
1971 u32 i = *(u32 *)key;
1974 if (unlikely(flags > BPF_EXIST))
1976 if (unlikely(i >= stab->map.max_entries))
1979 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1983 err = __sock_map_ctx_update_elem(map, progs, sock, key);
1987 /* psock guaranteed to be present. */
1988 psock = smap_psock_sk(sock);
1989 raw_spin_lock_bh(&stab->lock);
1990 osock = stab->sock_map[i];
1991 if (osock && flags == BPF_NOEXIST) {
1995 if (!osock && flags == BPF_EXIST) {
2000 e->entry = &stab->sock_map[i];
2002 spin_lock_bh(&psock->maps_lock);
2003 list_add_tail(&e->list, &psock->maps);
2004 spin_unlock_bh(&psock->maps_lock);
2006 stab->sock_map[i] = sock;
2008 psock = smap_psock_sk(osock);
2009 smap_list_map_remove(psock, &stab->sock_map[i]);
2010 smap_release_sock(psock, osock);
2012 raw_spin_unlock_bh(&stab->lock);
2015 smap_release_sock(psock, sock);
2016 raw_spin_unlock_bh(&stab->lock);
2022 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
2024 struct bpf_sock_progs *progs;
2025 struct bpf_prog *orig;
2027 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
2028 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2030 progs = &stab->progs;
2031 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH) {
2032 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2034 progs = &htab->progs;
2040 case BPF_SK_MSG_VERDICT:
2041 orig = xchg(&progs->bpf_tx_msg, prog);
2043 case BPF_SK_SKB_STREAM_PARSER:
2044 orig = xchg(&progs->bpf_parse, prog);
2046 case BPF_SK_SKB_STREAM_VERDICT:
2047 orig = xchg(&progs->bpf_verdict, prog);
2059 int sockmap_get_from_fd(const union bpf_attr *attr, int type,
2060 struct bpf_prog *prog)
2062 int ufd = attr->target_fd;
2063 struct bpf_map *map;
2068 map = __bpf_map_get(f);
2070 return PTR_ERR(map);
2072 err = sock_map_prog(map, prog, attr->attach_type);
2077 static void *sock_map_lookup(struct bpf_map *map, void *key)
2082 static int sock_map_update_elem(struct bpf_map *map,
2083 void *key, void *value, u64 flags)
2085 struct bpf_sock_ops_kern skops;
2086 u32 fd = *(u32 *)value;
2087 struct socket *socket;
2090 socket = sockfd_lookup(fd, &err);
2094 skops.sk = socket->sk;
2100 if (skops.sk->sk_type != SOCK_STREAM ||
2101 skops.sk->sk_protocol != IPPROTO_TCP) {
2106 lock_sock(skops.sk);
2109 err = sock_map_ctx_update_elem(&skops, map, key, flags);
2112 release_sock(skops.sk);
2117 static void sock_map_release(struct bpf_map *map)
2119 struct bpf_sock_progs *progs;
2120 struct bpf_prog *orig;
2122 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
2123 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2125 progs = &stab->progs;
2127 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2129 progs = &htab->progs;
2132 orig = xchg(&progs->bpf_parse, NULL);
2135 orig = xchg(&progs->bpf_verdict, NULL);
2139 orig = xchg(&progs->bpf_tx_msg, NULL);
2144 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
2146 struct bpf_htab *htab;
2150 if (!capable(CAP_NET_ADMIN))
2151 return ERR_PTR(-EPERM);
2153 /* check sanity of attributes */
2154 if (attr->max_entries == 0 ||
2155 attr->key_size == 0 ||
2156 attr->value_size != 4 ||
2157 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
2158 return ERR_PTR(-EINVAL);
2160 if (attr->key_size > MAX_BPF_STACK)
2161 /* eBPF programs initialize keys on stack, so they cannot be
2162 * larger than max stack size
2164 return ERR_PTR(-E2BIG);
2166 err = bpf_tcp_ulp_register();
2167 if (err && err != -EEXIST)
2168 return ERR_PTR(err);
2170 htab = kzalloc(sizeof(*htab), GFP_USER);
2172 return ERR_PTR(-ENOMEM);
2174 bpf_map_init_from_attr(&htab->map, attr);
2176 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
2177 htab->elem_size = sizeof(struct htab_elem) +
2178 round_up(htab->map.key_size, 8);
2180 if (htab->n_buckets == 0 ||
2181 htab->n_buckets > U32_MAX / sizeof(struct bucket))
2184 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
2185 (u64) htab->elem_size * htab->map.max_entries;
2187 if (cost >= U32_MAX - PAGE_SIZE)
2190 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
2191 err = bpf_map_precharge_memlock(htab->map.pages);
2196 htab->buckets = bpf_map_area_alloc(
2197 htab->n_buckets * sizeof(struct bucket),
2198 htab->map.numa_node);
2202 for (i = 0; i < htab->n_buckets; i++) {
2203 INIT_HLIST_HEAD(&htab->buckets[i].head);
2204 raw_spin_lock_init(&htab->buckets[i].lock);
2210 return ERR_PTR(err);
2213 static void __bpf_htab_free(struct rcu_head *rcu)
2215 struct bpf_htab *htab;
2217 htab = container_of(rcu, struct bpf_htab, rcu);
2218 bpf_map_area_free(htab->buckets);
2222 static void sock_hash_free(struct bpf_map *map)
2224 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2229 /* At this point no update, lookup or delete operations can happen.
2230 * However, be aware we can still get a socket state event updates,
2231 * and data ready callabacks that reference the psock from sk_user_data
2232 * Also psock worker threads are still in-flight. So smap_release_sock
2233 * will only free the psock after cancel_sync on the worker threads
2234 * and a grace period expire to ensure psock is really safe to remove.
2237 for (i = 0; i < htab->n_buckets; i++) {
2238 struct bucket *b = __select_bucket(htab, i);
2239 struct hlist_head *head;
2240 struct hlist_node *n;
2241 struct htab_elem *l;
2243 raw_spin_lock_bh(&b->lock);
2245 hlist_for_each_entry_safe(l, n, head, hash_node) {
2246 struct sock *sock = l->sk;
2247 struct smap_psock *psock;
2249 hlist_del_rcu(&l->hash_node);
2250 psock = smap_psock_sk(sock);
2251 /* This check handles a racing sock event that can get
2252 * the sk_callback_lock before this case but after xchg
2253 * causing the refcnt to hit zero and sock user data
2254 * (psock) to be null and queued for garbage collection.
2256 if (likely(psock)) {
2257 smap_list_hash_remove(psock, l);
2258 smap_release_sock(psock, sock);
2260 free_htab_elem(htab, l);
2262 raw_spin_unlock_bh(&b->lock);
2265 call_rcu(&htab->rcu, __bpf_htab_free);
2268 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
2269 void *key, u32 key_size, u32 hash,
2271 struct htab_elem *old_elem)
2273 struct htab_elem *l_new;
2275 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
2277 atomic_dec(&htab->count);
2278 return ERR_PTR(-E2BIG);
2281 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
2282 htab->map.numa_node);
2284 atomic_dec(&htab->count);
2285 return ERR_PTR(-ENOMEM);
2288 memcpy(l_new->key, key, key_size);
2294 static inline u32 htab_map_hash(const void *key, u32 key_len)
2296 return jhash(key, key_len, 0);
2299 static int sock_hash_get_next_key(struct bpf_map *map,
2300 void *key, void *next_key)
2302 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2303 struct htab_elem *l, *next_l;
2304 struct hlist_head *h;
2308 WARN_ON_ONCE(!rcu_read_lock_held());
2310 key_size = map->key_size;
2312 goto find_first_elem;
2313 hash = htab_map_hash(key, key_size);
2314 h = select_bucket(htab, hash);
2316 l = lookup_elem_raw(h, hash, key, key_size);
2318 goto find_first_elem;
2319 next_l = hlist_entry_safe(
2320 rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
2321 struct htab_elem, hash_node);
2323 memcpy(next_key, next_l->key, key_size);
2327 /* no more elements in this hash list, go to the next bucket */
2328 i = hash & (htab->n_buckets - 1);
2332 /* iterate over buckets */
2333 for (; i < htab->n_buckets; i++) {
2334 h = select_bucket(htab, i);
2336 /* pick first element in the bucket */
2337 next_l = hlist_entry_safe(
2338 rcu_dereference_raw(hlist_first_rcu(h)),
2339 struct htab_elem, hash_node);
2341 /* if it's not empty, just return it */
2342 memcpy(next_key, next_l->key, key_size);
2347 /* iterated over all buckets and all elements */
2351 static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
2352 struct bpf_map *map,
2353 void *key, u64 map_flags)
2355 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2356 struct bpf_sock_progs *progs = &htab->progs;
2357 struct htab_elem *l_new = NULL, *l_old;
2358 struct smap_psock_map_entry *e = NULL;
2359 struct hlist_head *head;
2360 struct smap_psock *psock;
2368 if (sock->sk_type != SOCK_STREAM ||
2369 sock->sk_protocol != IPPROTO_TCP)
2372 if (unlikely(map_flags > BPF_EXIST))
2375 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
2379 WARN_ON_ONCE(!rcu_read_lock_held());
2380 key_size = map->key_size;
2381 hash = htab_map_hash(key, key_size);
2382 b = __select_bucket(htab, hash);
2385 err = __sock_map_ctx_update_elem(map, progs, sock, key);
2389 /* psock is valid here because otherwise above *ctx_update_elem would
2390 * have thrown an error. It is safe to skip error check.
2392 psock = smap_psock_sk(sock);
2393 raw_spin_lock_bh(&b->lock);
2394 l_old = lookup_elem_raw(head, hash, key, key_size);
2395 if (l_old && map_flags == BPF_NOEXIST) {
2399 if (!l_old && map_flags == BPF_EXIST) {
2404 l_new = alloc_sock_hash_elem(htab, key, key_size, hash, sock, l_old);
2405 if (IS_ERR(l_new)) {
2406 err = PTR_ERR(l_new);
2410 rcu_assign_pointer(e->hash_link, l_new);
2412 spin_lock_bh(&psock->maps_lock);
2413 list_add_tail(&e->list, &psock->maps);
2414 spin_unlock_bh(&psock->maps_lock);
2416 /* add new element to the head of the list, so that
2417 * concurrent search will find it before old elem
2419 hlist_add_head_rcu(&l_new->hash_node, head);
2421 psock = smap_psock_sk(l_old->sk);
2423 hlist_del_rcu(&l_old->hash_node);
2424 smap_list_hash_remove(psock, l_old);
2425 smap_release_sock(psock, l_old->sk);
2426 free_htab_elem(htab, l_old);
2428 raw_spin_unlock_bh(&b->lock);
2431 smap_release_sock(psock, sock);
2432 raw_spin_unlock_bh(&b->lock);
2438 static int sock_hash_update_elem(struct bpf_map *map,
2439 void *key, void *value, u64 flags)
2441 struct bpf_sock_ops_kern skops;
2442 u32 fd = *(u32 *)value;
2443 struct socket *socket;
2446 socket = sockfd_lookup(fd, &err);
2450 skops.sk = socket->sk;
2456 lock_sock(skops.sk);
2459 err = sock_hash_ctx_update_elem(&skops, map, key, flags);
2462 release_sock(skops.sk);
2467 static int sock_hash_delete_elem(struct bpf_map *map, void *key)
2469 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2470 struct hlist_head *head;
2472 struct htab_elem *l;
2476 key_size = map->key_size;
2477 hash = htab_map_hash(key, key_size);
2478 b = __select_bucket(htab, hash);
2481 raw_spin_lock_bh(&b->lock);
2482 l = lookup_elem_raw(head, hash, key, key_size);
2484 struct sock *sock = l->sk;
2485 struct smap_psock *psock;
2487 hlist_del_rcu(&l->hash_node);
2488 psock = smap_psock_sk(sock);
2489 /* This check handles a racing sock event that can get the
2490 * sk_callback_lock before this case but after xchg happens
2491 * causing the refcnt to hit zero and sock user data (psock)
2492 * to be null and queued for garbage collection.
2494 if (likely(psock)) {
2495 smap_list_hash_remove(psock, l);
2496 smap_release_sock(psock, sock);
2498 free_htab_elem(htab, l);
2501 raw_spin_unlock_bh(&b->lock);
2505 struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
2507 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2508 struct hlist_head *head;
2509 struct htab_elem *l;
2514 key_size = map->key_size;
2515 hash = htab_map_hash(key, key_size);
2516 b = __select_bucket(htab, hash);
2519 l = lookup_elem_raw(head, hash, key, key_size);
2520 sk = l ? l->sk : NULL;
2524 const struct bpf_map_ops sock_map_ops = {
2525 .map_alloc = sock_map_alloc,
2526 .map_free = sock_map_free,
2527 .map_lookup_elem = sock_map_lookup,
2528 .map_get_next_key = sock_map_get_next_key,
2529 .map_update_elem = sock_map_update_elem,
2530 .map_delete_elem = sock_map_delete_elem,
2531 .map_release_uref = sock_map_release,
2532 .map_check_btf = map_check_no_btf,
2535 const struct bpf_map_ops sock_hash_ops = {
2536 .map_alloc = sock_hash_alloc,
2537 .map_free = sock_hash_free,
2538 .map_lookup_elem = sock_map_lookup,
2539 .map_get_next_key = sock_hash_get_next_key,
2540 .map_update_elem = sock_hash_update_elem,
2541 .map_delete_elem = sock_hash_delete_elem,
2542 .map_release_uref = sock_map_release,
2543 .map_check_btf = map_check_no_btf,
2546 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
2547 struct bpf_map *, map, void *, key, u64, flags)
2549 WARN_ON_ONCE(!rcu_read_lock_held());
2550 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
2553 const struct bpf_func_proto bpf_sock_map_update_proto = {
2554 .func = bpf_sock_map_update,
2557 .ret_type = RET_INTEGER,
2558 .arg1_type = ARG_PTR_TO_CTX,
2559 .arg2_type = ARG_CONST_MAP_PTR,
2560 .arg3_type = ARG_PTR_TO_MAP_KEY,
2561 .arg4_type = ARG_ANYTHING,
2564 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
2565 struct bpf_map *, map, void *, key, u64, flags)
2567 WARN_ON_ONCE(!rcu_read_lock_held());
2568 return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
2571 const struct bpf_func_proto bpf_sock_hash_update_proto = {
2572 .func = bpf_sock_hash_update,
2575 .ret_type = RET_INTEGER,
2576 .arg1_type = ARG_PTR_TO_CTX,
2577 .arg2_type = ARG_CONST_MAP_PTR,
2578 .arg3_type = ARG_PTR_TO_MAP_KEY,
2579 .arg4_type = ARG_ANYTHING,