sock_map: Introduce BPF_SK_SKB_VERDICT
[linux-2.6-microblaze.git] / net / core / skmsg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14         if (msg->sg.end > msg->sg.start &&
15             elem_first_coalesce < msg->sg.end)
16                 return true;
17
18         if (msg->sg.end < msg->sg.start &&
19             (elem_first_coalesce > msg->sg.start ||
20              elem_first_coalesce < msg->sg.end))
21                 return true;
22
23         return false;
24 }
25
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27                  int elem_first_coalesce)
28 {
29         struct page_frag *pfrag = sk_page_frag(sk);
30         int ret = 0;
31
32         len -= msg->sg.size;
33         while (len > 0) {
34                 struct scatterlist *sge;
35                 u32 orig_offset;
36                 int use, i;
37
38                 if (!sk_page_frag_refill(sk, pfrag))
39                         return -ENOMEM;
40
41                 orig_offset = pfrag->offset;
42                 use = min_t(int, len, pfrag->size - orig_offset);
43                 if (!sk_wmem_schedule(sk, use))
44                         return -ENOMEM;
45
46                 i = msg->sg.end;
47                 sk_msg_iter_var_prev(i);
48                 sge = &msg->sg.data[i];
49
50                 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51                     sg_page(sge) == pfrag->page &&
52                     sge->offset + sge->length == orig_offset) {
53                         sge->length += use;
54                 } else {
55                         if (sk_msg_full(msg)) {
56                                 ret = -ENOSPC;
57                                 break;
58                         }
59
60                         sge = &msg->sg.data[msg->sg.end];
61                         sg_unmark_end(sge);
62                         sg_set_page(sge, pfrag->page, use, orig_offset);
63                         get_page(pfrag->page);
64                         sk_msg_iter_next(msg, end);
65                 }
66
67                 sk_mem_charge(sk, use);
68                 msg->sg.size += use;
69                 pfrag->offset += use;
70                 len -= use;
71         }
72
73         return ret;
74 }
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78                  u32 off, u32 len)
79 {
80         int i = src->sg.start;
81         struct scatterlist *sge = sk_msg_elem(src, i);
82         struct scatterlist *sgd = NULL;
83         u32 sge_len, sge_off;
84
85         while (off) {
86                 if (sge->length > off)
87                         break;
88                 off -= sge->length;
89                 sk_msg_iter_var_next(i);
90                 if (i == src->sg.end && off)
91                         return -ENOSPC;
92                 sge = sk_msg_elem(src, i);
93         }
94
95         while (len) {
96                 sge_len = sge->length - off;
97                 if (sge_len > len)
98                         sge_len = len;
99
100                 if (dst->sg.end)
101                         sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103                 if (sgd &&
104                     (sg_page(sge) == sg_page(sgd)) &&
105                     (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106                         sgd->length += sge_len;
107                         dst->sg.size += sge_len;
108                 } else if (!sk_msg_full(dst)) {
109                         sge_off = sge->offset + off;
110                         sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111                 } else {
112                         return -ENOSPC;
113                 }
114
115                 off = 0;
116                 len -= sge_len;
117                 sk_mem_charge(sk, sge_len);
118                 sk_msg_iter_var_next(i);
119                 if (i == src->sg.end && len)
120                         return -ENOSPC;
121                 sge = sk_msg_elem(src, i);
122         }
123
124         return 0;
125 }
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129 {
130         int i = msg->sg.start;
131
132         do {
133                 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135                 if (bytes < sge->length) {
136                         sge->length -= bytes;
137                         sge->offset += bytes;
138                         sk_mem_uncharge(sk, bytes);
139                         break;
140                 }
141
142                 sk_mem_uncharge(sk, sge->length);
143                 bytes -= sge->length;
144                 sge->length = 0;
145                 sge->offset = 0;
146                 sk_msg_iter_var_next(i);
147         } while (bytes && i != msg->sg.end);
148         msg->sg.start = i;
149 }
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153 {
154         int i = msg->sg.start;
155
156         do {
157                 struct scatterlist *sge = &msg->sg.data[i];
158                 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160                 sk_mem_uncharge(sk, uncharge);
161                 bytes -= uncharge;
162                 sk_msg_iter_var_next(i);
163         } while (i != msg->sg.end);
164 }
165 EXPORT_SYMBOL_GPL(sk_msg_return);
166
167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168                             bool charge)
169 {
170         struct scatterlist *sge = sk_msg_elem(msg, i);
171         u32 len = sge->length;
172
173         /* When the skb owns the memory we free it from consume_skb path. */
174         if (!msg->skb) {
175                 if (charge)
176                         sk_mem_uncharge(sk, len);
177                 put_page(sg_page(sge));
178         }
179         memset(sge, 0, sizeof(*sge));
180         return len;
181 }
182
183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184                          bool charge)
185 {
186         struct scatterlist *sge = sk_msg_elem(msg, i);
187         int freed = 0;
188
189         while (msg->sg.size) {
190                 msg->sg.size -= sge->length;
191                 freed += sk_msg_free_elem(sk, msg, i, charge);
192                 sk_msg_iter_var_next(i);
193                 sk_msg_check_to_free(msg, i, msg->sg.size);
194                 sge = sk_msg_elem(msg, i);
195         }
196         consume_skb(msg->skb);
197         sk_msg_init(msg);
198         return freed;
199 }
200
201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202 {
203         return __sk_msg_free(sk, msg, msg->sg.start, false);
204 }
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208 {
209         return __sk_msg_free(sk, msg, msg->sg.start, true);
210 }
211 EXPORT_SYMBOL_GPL(sk_msg_free);
212
213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214                                   u32 bytes, bool charge)
215 {
216         struct scatterlist *sge;
217         u32 i = msg->sg.start;
218
219         while (bytes) {
220                 sge = sk_msg_elem(msg, i);
221                 if (!sge->length)
222                         break;
223                 if (bytes < sge->length) {
224                         if (charge)
225                                 sk_mem_uncharge(sk, bytes);
226                         sge->length -= bytes;
227                         sge->offset += bytes;
228                         msg->sg.size -= bytes;
229                         break;
230                 }
231
232                 msg->sg.size -= sge->length;
233                 bytes -= sge->length;
234                 sk_msg_free_elem(sk, msg, i, charge);
235                 sk_msg_iter_var_next(i);
236                 sk_msg_check_to_free(msg, i, bytes);
237         }
238         msg->sg.start = i;
239 }
240
241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242 {
243         __sk_msg_free_partial(sk, msg, bytes, true);
244 }
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248                                   u32 bytes)
249 {
250         __sk_msg_free_partial(sk, msg, bytes, false);
251 }
252
253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254 {
255         int trim = msg->sg.size - len;
256         u32 i = msg->sg.end;
257
258         if (trim <= 0) {
259                 WARN_ON(trim < 0);
260                 return;
261         }
262
263         sk_msg_iter_var_prev(i);
264         msg->sg.size = len;
265         while (msg->sg.data[i].length &&
266                trim >= msg->sg.data[i].length) {
267                 trim -= msg->sg.data[i].length;
268                 sk_msg_free_elem(sk, msg, i, true);
269                 sk_msg_iter_var_prev(i);
270                 if (!trim)
271                         goto out;
272         }
273
274         msg->sg.data[i].length -= trim;
275         sk_mem_uncharge(sk, trim);
276         /* Adjust copybreak if it falls into the trimmed part of last buf */
277         if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278                 msg->sg.copybreak = msg->sg.data[i].length;
279 out:
280         sk_msg_iter_var_next(i);
281         msg->sg.end = i;
282
283         /* If we trim data a full sg elem before curr pointer update
284          * copybreak and current so that any future copy operations
285          * start at new copy location.
286          * However trimed data that has not yet been used in a copy op
287          * does not require an update.
288          */
289         if (!msg->sg.size) {
290                 msg->sg.curr = msg->sg.start;
291                 msg->sg.copybreak = 0;
292         } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293                    sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294                 sk_msg_iter_var_prev(i);
295                 msg->sg.curr = i;
296                 msg->sg.copybreak = msg->sg.data[i].length;
297         }
298 }
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302                               struct sk_msg *msg, u32 bytes)
303 {
304         int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305         const int to_max_pages = MAX_MSG_FRAGS;
306         struct page *pages[MAX_MSG_FRAGS];
307         ssize_t orig, copied, use, offset;
308
309         orig = msg->sg.size;
310         while (bytes > 0) {
311                 i = 0;
312                 maxpages = to_max_pages - num_elems;
313                 if (maxpages == 0) {
314                         ret = -EFAULT;
315                         goto out;
316                 }
317
318                 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319                                             &offset);
320                 if (copied <= 0) {
321                         ret = -EFAULT;
322                         goto out;
323                 }
324
325                 iov_iter_advance(from, copied);
326                 bytes -= copied;
327                 msg->sg.size += copied;
328
329                 while (copied) {
330                         use = min_t(int, copied, PAGE_SIZE - offset);
331                         sg_set_page(&msg->sg.data[msg->sg.end],
332                                     pages[i], use, offset);
333                         sg_unmark_end(&msg->sg.data[msg->sg.end]);
334                         sk_mem_charge(sk, use);
335
336                         offset = 0;
337                         copied -= use;
338                         sk_msg_iter_next(msg, end);
339                         num_elems++;
340                         i++;
341                 }
342                 /* When zerocopy is mixed with sk_msg_*copy* operations we
343                  * may have a copybreak set in this case clear and prefer
344                  * zerocopy remainder when possible.
345                  */
346                 msg->sg.copybreak = 0;
347                 msg->sg.curr = msg->sg.end;
348         }
349 out:
350         /* Revert iov_iter updates, msg will need to use 'trim' later if it
351          * also needs to be cleared.
352          */
353         if (ret)
354                 iov_iter_revert(from, msg->sg.size - orig);
355         return ret;
356 }
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360                              struct sk_msg *msg, u32 bytes)
361 {
362         int ret = -ENOSPC, i = msg->sg.curr;
363         struct scatterlist *sge;
364         u32 copy, buf_size;
365         void *to;
366
367         do {
368                 sge = sk_msg_elem(msg, i);
369                 /* This is possible if a trim operation shrunk the buffer */
370                 if (msg->sg.copybreak >= sge->length) {
371                         msg->sg.copybreak = 0;
372                         sk_msg_iter_var_next(i);
373                         if (i == msg->sg.end)
374                                 break;
375                         sge = sk_msg_elem(msg, i);
376                 }
377
378                 buf_size = sge->length - msg->sg.copybreak;
379                 copy = (buf_size > bytes) ? bytes : buf_size;
380                 to = sg_virt(sge) + msg->sg.copybreak;
381                 msg->sg.copybreak += copy;
382                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383                         ret = copy_from_iter_nocache(to, copy, from);
384                 else
385                         ret = copy_from_iter(to, copy, from);
386                 if (ret != copy) {
387                         ret = -EFAULT;
388                         goto out;
389                 }
390                 bytes -= copy;
391                 if (!bytes)
392                         break;
393                 msg->sg.copybreak = 0;
394                 sk_msg_iter_var_next(i);
395         } while (i != msg->sg.end);
396 out:
397         msg->sg.curr = i;
398         return ret;
399 }
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
402 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
403                                                   struct sk_buff *skb)
404 {
405         struct sk_msg *msg;
406
407         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
408                 return NULL;
409
410         if (!sk_rmem_schedule(sk, skb, skb->truesize))
411                 return NULL;
412
413         msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
414         if (unlikely(!msg))
415                 return NULL;
416
417         sk_msg_init(msg);
418         return msg;
419 }
420
421 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
422                                         struct sk_psock *psock,
423                                         struct sock *sk,
424                                         struct sk_msg *msg)
425 {
426         int num_sge, copied;
427
428         /* skb linearize may fail with ENOMEM, but lets simply try again
429          * later if this happens. Under memory pressure we don't want to
430          * drop the skb. We need to linearize the skb so that the mapping
431          * in skb_to_sgvec can not error.
432          */
433         if (skb_linearize(skb))
434                 return -EAGAIN;
435         num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
436         if (unlikely(num_sge < 0)) {
437                 kfree(msg);
438                 return num_sge;
439         }
440
441         copied = skb->len;
442         msg->sg.start = 0;
443         msg->sg.size = copied;
444         msg->sg.end = num_sge;
445         msg->skb = skb;
446
447         sk_psock_queue_msg(psock, msg);
448         sk_psock_data_ready(sk, psock);
449         return copied;
450 }
451
452 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
453
454 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
455 {
456         struct sock *sk = psock->sk;
457         struct sk_msg *msg;
458
459         /* If we are receiving on the same sock skb->sk is already assigned,
460          * skip memory accounting and owner transition seeing it already set
461          * correctly.
462          */
463         if (unlikely(skb->sk == sk))
464                 return sk_psock_skb_ingress_self(psock, skb);
465         msg = sk_psock_create_ingress_msg(sk, skb);
466         if (!msg)
467                 return -EAGAIN;
468
469         /* This will transition ownership of the data from the socket where
470          * the BPF program was run initiating the redirect to the socket
471          * we will eventually receive this data on. The data will be released
472          * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
473          * into user buffers.
474          */
475         skb_set_owner_r(skb, sk);
476         return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
477 }
478
479 /* Puts an skb on the ingress queue of the socket already assigned to the
480  * skb. In this case we do not need to check memory limits or skb_set_owner_r
481  * because the skb is already accounted for here.
482  */
483 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
484 {
485         struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
486         struct sock *sk = psock->sk;
487
488         if (unlikely(!msg))
489                 return -EAGAIN;
490         sk_msg_init(msg);
491         return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
492 }
493
494 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
495                                u32 off, u32 len, bool ingress)
496 {
497         if (!ingress) {
498                 if (!sock_writeable(psock->sk))
499                         return -EAGAIN;
500                 return skb_send_sock(psock->sk, skb, off, len);
501         }
502         return sk_psock_skb_ingress(psock, skb);
503 }
504
505 static void sk_psock_backlog(struct work_struct *work)
506 {
507         struct sk_psock *psock = container_of(work, struct sk_psock, work);
508         struct sk_psock_work_state *state = &psock->work_state;
509         struct sk_buff *skb;
510         bool ingress;
511         u32 len, off;
512         int ret;
513
514         mutex_lock(&psock->work_mutex);
515         if (state->skb) {
516                 skb = state->skb;
517                 len = state->len;
518                 off = state->off;
519                 state->skb = NULL;
520                 goto start;
521         }
522
523         while ((skb = skb_dequeue(&psock->ingress_skb))) {
524                 len = skb->len;
525                 off = 0;
526 start:
527                 ingress = skb_bpf_ingress(skb);
528                 skb_bpf_redirect_clear(skb);
529                 do {
530                         ret = -EIO;
531                         if (!sock_flag(psock->sk, SOCK_DEAD))
532                                 ret = sk_psock_handle_skb(psock, skb, off,
533                                                           len, ingress);
534                         if (ret <= 0) {
535                                 if (ret == -EAGAIN) {
536                                         state->skb = skb;
537                                         state->len = len;
538                                         state->off = off;
539                                         goto end;
540                                 }
541                                 /* Hard errors break pipe and stop xmit. */
542                                 sk_psock_report_error(psock, ret ? -ret : EPIPE);
543                                 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
544                                 kfree_skb(skb);
545                                 goto end;
546                         }
547                         off += ret;
548                         len -= ret;
549                 } while (len);
550
551                 if (!ingress)
552                         kfree_skb(skb);
553         }
554 end:
555         mutex_unlock(&psock->work_mutex);
556 }
557
558 struct sk_psock *sk_psock_init(struct sock *sk, int node)
559 {
560         struct sk_psock *psock;
561         struct proto *prot;
562
563         write_lock_bh(&sk->sk_callback_lock);
564
565         if (inet_csk_has_ulp(sk)) {
566                 psock = ERR_PTR(-EINVAL);
567                 goto out;
568         }
569
570         if (sk->sk_user_data) {
571                 psock = ERR_PTR(-EBUSY);
572                 goto out;
573         }
574
575         psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
576         if (!psock) {
577                 psock = ERR_PTR(-ENOMEM);
578                 goto out;
579         }
580
581         prot = READ_ONCE(sk->sk_prot);
582         psock->sk = sk;
583         psock->eval = __SK_NONE;
584         psock->sk_proto = prot;
585         psock->saved_unhash = prot->unhash;
586         psock->saved_close = prot->close;
587         psock->saved_write_space = sk->sk_write_space;
588
589         INIT_LIST_HEAD(&psock->link);
590         spin_lock_init(&psock->link_lock);
591
592         INIT_WORK(&psock->work, sk_psock_backlog);
593         mutex_init(&psock->work_mutex);
594         INIT_LIST_HEAD(&psock->ingress_msg);
595         spin_lock_init(&psock->ingress_lock);
596         skb_queue_head_init(&psock->ingress_skb);
597
598         sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
599         refcount_set(&psock->refcnt, 1);
600
601         rcu_assign_sk_user_data_nocopy(sk, psock);
602         sock_hold(sk);
603
604 out:
605         write_unlock_bh(&sk->sk_callback_lock);
606         return psock;
607 }
608 EXPORT_SYMBOL_GPL(sk_psock_init);
609
610 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
611 {
612         struct sk_psock_link *link;
613
614         spin_lock_bh(&psock->link_lock);
615         link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
616                                         list);
617         if (link)
618                 list_del(&link->list);
619         spin_unlock_bh(&psock->link_lock);
620         return link;
621 }
622
623 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
624 {
625         struct sk_msg *msg, *tmp;
626
627         list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
628                 list_del(&msg->list);
629                 sk_msg_free(psock->sk, msg);
630                 kfree(msg);
631         }
632 }
633
634 static void __sk_psock_zap_ingress(struct sk_psock *psock)
635 {
636         struct sk_buff *skb;
637
638         while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
639                 skb_bpf_redirect_clear(skb);
640                 kfree_skb(skb);
641         }
642         __sk_psock_purge_ingress_msg(psock);
643 }
644
645 static void sk_psock_link_destroy(struct sk_psock *psock)
646 {
647         struct sk_psock_link *link, *tmp;
648
649         list_for_each_entry_safe(link, tmp, &psock->link, list) {
650                 list_del(&link->list);
651                 sk_psock_free_link(link);
652         }
653 }
654
655 void sk_psock_stop(struct sk_psock *psock, bool wait)
656 {
657         spin_lock_bh(&psock->ingress_lock);
658         sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
659         sk_psock_cork_free(psock);
660         __sk_psock_zap_ingress(psock);
661         spin_unlock_bh(&psock->ingress_lock);
662
663         if (wait)
664                 cancel_work_sync(&psock->work);
665 }
666
667 static void sk_psock_done_strp(struct sk_psock *psock);
668
669 static void sk_psock_destroy(struct work_struct *work)
670 {
671         struct sk_psock *psock = container_of(to_rcu_work(work),
672                                               struct sk_psock, rwork);
673         /* No sk_callback_lock since already detached. */
674
675         sk_psock_done_strp(psock);
676
677         cancel_work_sync(&psock->work);
678         mutex_destroy(&psock->work_mutex);
679
680         psock_progs_drop(&psock->progs);
681
682         sk_psock_link_destroy(psock);
683         sk_psock_cork_free(psock);
684
685         if (psock->sk_redir)
686                 sock_put(psock->sk_redir);
687         sock_put(psock->sk);
688         kfree(psock);
689 }
690
691 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
692 {
693         sk_psock_stop(psock, false);
694
695         write_lock_bh(&sk->sk_callback_lock);
696         sk_psock_restore_proto(sk, psock);
697         rcu_assign_sk_user_data(sk, NULL);
698         if (psock->progs.stream_parser)
699                 sk_psock_stop_strp(sk, psock);
700         else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
701                 sk_psock_stop_verdict(sk, psock);
702         write_unlock_bh(&sk->sk_callback_lock);
703
704         INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
705         queue_rcu_work(system_wq, &psock->rwork);
706 }
707 EXPORT_SYMBOL_GPL(sk_psock_drop);
708
709 static int sk_psock_map_verd(int verdict, bool redir)
710 {
711         switch (verdict) {
712         case SK_PASS:
713                 return redir ? __SK_REDIRECT : __SK_PASS;
714         case SK_DROP:
715         default:
716                 break;
717         }
718
719         return __SK_DROP;
720 }
721
722 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
723                          struct sk_msg *msg)
724 {
725         struct bpf_prog *prog;
726         int ret;
727
728         rcu_read_lock();
729         prog = READ_ONCE(psock->progs.msg_parser);
730         if (unlikely(!prog)) {
731                 ret = __SK_PASS;
732                 goto out;
733         }
734
735         sk_msg_compute_data_pointers(msg);
736         msg->sk = sk;
737         ret = bpf_prog_run_pin_on_cpu(prog, msg);
738         ret = sk_psock_map_verd(ret, msg->sk_redir);
739         psock->apply_bytes = msg->apply_bytes;
740         if (ret == __SK_REDIRECT) {
741                 if (psock->sk_redir)
742                         sock_put(psock->sk_redir);
743                 psock->sk_redir = msg->sk_redir;
744                 if (!psock->sk_redir) {
745                         ret = __SK_DROP;
746                         goto out;
747                 }
748                 sock_hold(psock->sk_redir);
749         }
750 out:
751         rcu_read_unlock();
752         return ret;
753 }
754 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
755
756 static void sk_psock_skb_redirect(struct sk_buff *skb)
757 {
758         struct sk_psock *psock_other;
759         struct sock *sk_other;
760
761         sk_other = skb_bpf_redirect_fetch(skb);
762         /* This error is a buggy BPF program, it returned a redirect
763          * return code, but then didn't set a redirect interface.
764          */
765         if (unlikely(!sk_other)) {
766                 kfree_skb(skb);
767                 return;
768         }
769         psock_other = sk_psock(sk_other);
770         /* This error indicates the socket is being torn down or had another
771          * error that caused the pipe to break. We can't send a packet on
772          * a socket that is in this state so we drop the skb.
773          */
774         if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
775                 kfree_skb(skb);
776                 return;
777         }
778         spin_lock_bh(&psock_other->ingress_lock);
779         if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
780                 spin_unlock_bh(&psock_other->ingress_lock);
781                 kfree_skb(skb);
782                 return;
783         }
784
785         skb_queue_tail(&psock_other->ingress_skb, skb);
786         schedule_work(&psock_other->work);
787         spin_unlock_bh(&psock_other->ingress_lock);
788 }
789
790 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
791 {
792         switch (verdict) {
793         case __SK_REDIRECT:
794                 skb_set_owner_r(skb, sk);
795                 sk_psock_skb_redirect(skb);
796                 break;
797         case __SK_PASS:
798         case __SK_DROP:
799         default:
800                 break;
801         }
802 }
803
804 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
805 {
806         struct bpf_prog *prog;
807         int ret = __SK_PASS;
808
809         rcu_read_lock();
810         prog = READ_ONCE(psock->progs.stream_verdict);
811         if (likely(prog)) {
812                 /* We skip full set_owner_r here because if we do a SK_PASS
813                  * or SK_DROP we can skip skb memory accounting and use the
814                  * TLS context.
815                  */
816                 skb->sk = psock->sk;
817                 skb_dst_drop(skb);
818                 skb_bpf_redirect_clear(skb);
819                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
820                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
821                 skb->sk = NULL;
822         }
823         sk_psock_tls_verdict_apply(skb, psock->sk, ret);
824         rcu_read_unlock();
825         return ret;
826 }
827 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
828
829 static void sk_psock_verdict_apply(struct sk_psock *psock,
830                                    struct sk_buff *skb, int verdict)
831 {
832         struct sock *sk_other;
833         int err = -EIO;
834
835         switch (verdict) {
836         case __SK_PASS:
837                 sk_other = psock->sk;
838                 if (sock_flag(sk_other, SOCK_DEAD) ||
839                     !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
840                         goto out_free;
841                 }
842
843                 skb_bpf_set_ingress(skb);
844
845                 /* If the queue is empty then we can submit directly
846                  * into the msg queue. If its not empty we have to
847                  * queue work otherwise we may get OOO data. Otherwise,
848                  * if sk_psock_skb_ingress errors will be handled by
849                  * retrying later from workqueue.
850                  */
851                 if (skb_queue_empty(&psock->ingress_skb)) {
852                         err = sk_psock_skb_ingress_self(psock, skb);
853                 }
854                 if (err < 0) {
855                         spin_lock_bh(&psock->ingress_lock);
856                         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
857                                 skb_queue_tail(&psock->ingress_skb, skb);
858                                 schedule_work(&psock->work);
859                         }
860                         spin_unlock_bh(&psock->ingress_lock);
861                 }
862                 break;
863         case __SK_REDIRECT:
864                 sk_psock_skb_redirect(skb);
865                 break;
866         case __SK_DROP:
867         default:
868 out_free:
869                 kfree_skb(skb);
870         }
871 }
872
873 static void sk_psock_write_space(struct sock *sk)
874 {
875         struct sk_psock *psock;
876         void (*write_space)(struct sock *sk) = NULL;
877
878         rcu_read_lock();
879         psock = sk_psock(sk);
880         if (likely(psock)) {
881                 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
882                         schedule_work(&psock->work);
883                 write_space = psock->saved_write_space;
884         }
885         rcu_read_unlock();
886         if (write_space)
887                 write_space(sk);
888 }
889
890 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
891 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
892 {
893         struct sk_psock *psock;
894         struct bpf_prog *prog;
895         int ret = __SK_DROP;
896         struct sock *sk;
897
898         rcu_read_lock();
899         sk = strp->sk;
900         psock = sk_psock(sk);
901         if (unlikely(!psock)) {
902                 kfree_skb(skb);
903                 goto out;
904         }
905         skb_set_owner_r(skb, sk);
906         prog = READ_ONCE(psock->progs.stream_verdict);
907         if (likely(prog)) {
908                 skb_dst_drop(skb);
909                 skb_bpf_redirect_clear(skb);
910                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
911                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
912         }
913         sk_psock_verdict_apply(psock, skb, ret);
914 out:
915         rcu_read_unlock();
916 }
917
918 static int sk_psock_strp_read_done(struct strparser *strp, int err)
919 {
920         return err;
921 }
922
923 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
924 {
925         struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
926         struct bpf_prog *prog;
927         int ret = skb->len;
928
929         rcu_read_lock();
930         prog = READ_ONCE(psock->progs.stream_parser);
931         if (likely(prog)) {
932                 skb->sk = psock->sk;
933                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
934                 skb->sk = NULL;
935         }
936         rcu_read_unlock();
937         return ret;
938 }
939
940 /* Called with socket lock held. */
941 static void sk_psock_strp_data_ready(struct sock *sk)
942 {
943         struct sk_psock *psock;
944
945         rcu_read_lock();
946         psock = sk_psock(sk);
947         if (likely(psock)) {
948                 if (tls_sw_has_ctx_rx(sk)) {
949                         psock->saved_data_ready(sk);
950                 } else {
951                         write_lock_bh(&sk->sk_callback_lock);
952                         strp_data_ready(&psock->strp);
953                         write_unlock_bh(&sk->sk_callback_lock);
954                 }
955         }
956         rcu_read_unlock();
957 }
958
959 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
960 {
961         static const struct strp_callbacks cb = {
962                 .rcv_msg        = sk_psock_strp_read,
963                 .read_sock_done = sk_psock_strp_read_done,
964                 .parse_msg      = sk_psock_strp_parse,
965         };
966
967         return strp_init(&psock->strp, sk, &cb);
968 }
969
970 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
971 {
972         if (psock->saved_data_ready)
973                 return;
974
975         psock->saved_data_ready = sk->sk_data_ready;
976         sk->sk_data_ready = sk_psock_strp_data_ready;
977         sk->sk_write_space = sk_psock_write_space;
978 }
979
980 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
981 {
982         if (!psock->saved_data_ready)
983                 return;
984
985         sk->sk_data_ready = psock->saved_data_ready;
986         psock->saved_data_ready = NULL;
987         strp_stop(&psock->strp);
988 }
989
990 static void sk_psock_done_strp(struct sk_psock *psock)
991 {
992         /* Parser has been stopped */
993         if (psock->progs.stream_parser)
994                 strp_done(&psock->strp);
995 }
996 #else
997 static void sk_psock_done_strp(struct sk_psock *psock)
998 {
999 }
1000 #endif /* CONFIG_BPF_STREAM_PARSER */
1001
1002 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1003                                  unsigned int offset, size_t orig_len)
1004 {
1005         struct sock *sk = (struct sock *)desc->arg.data;
1006         struct sk_psock *psock;
1007         struct bpf_prog *prog;
1008         int ret = __SK_DROP;
1009         int len = skb->len;
1010
1011         /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1012         skb = skb_clone(skb, GFP_ATOMIC);
1013         if (!skb) {
1014                 desc->error = -ENOMEM;
1015                 return 0;
1016         }
1017
1018         rcu_read_lock();
1019         psock = sk_psock(sk);
1020         if (unlikely(!psock)) {
1021                 len = 0;
1022                 kfree_skb(skb);
1023                 goto out;
1024         }
1025         skb_set_owner_r(skb, sk);
1026         prog = READ_ONCE(psock->progs.stream_verdict);
1027         if (!prog)
1028                 prog = READ_ONCE(psock->progs.skb_verdict);
1029         if (likely(prog)) {
1030                 skb_dst_drop(skb);
1031                 skb_bpf_redirect_clear(skb);
1032                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1033                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1034         }
1035         sk_psock_verdict_apply(psock, skb, ret);
1036 out:
1037         rcu_read_unlock();
1038         return len;
1039 }
1040
1041 static void sk_psock_verdict_data_ready(struct sock *sk)
1042 {
1043         struct socket *sock = sk->sk_socket;
1044         read_descriptor_t desc;
1045
1046         if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1047                 return;
1048
1049         desc.arg.data = sk;
1050         desc.error = 0;
1051         desc.count = 1;
1052
1053         sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1054 }
1055
1056 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1057 {
1058         if (psock->saved_data_ready)
1059                 return;
1060
1061         psock->saved_data_ready = sk->sk_data_ready;
1062         sk->sk_data_ready = sk_psock_verdict_data_ready;
1063         sk->sk_write_space = sk_psock_write_space;
1064 }
1065
1066 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1067 {
1068         if (!psock->saved_data_ready)
1069                 return;
1070
1071         sk->sk_data_ready = psock->saved_data_ready;
1072         psock->saved_data_ready = NULL;
1073 }