Merge tag 'tag-chrome-platform-for-v5.12' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-microblaze.git] / net / core / skmsg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14         if (msg->sg.end > msg->sg.start &&
15             elem_first_coalesce < msg->sg.end)
16                 return true;
17
18         if (msg->sg.end < msg->sg.start &&
19             (elem_first_coalesce > msg->sg.start ||
20              elem_first_coalesce < msg->sg.end))
21                 return true;
22
23         return false;
24 }
25
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27                  int elem_first_coalesce)
28 {
29         struct page_frag *pfrag = sk_page_frag(sk);
30         int ret = 0;
31
32         len -= msg->sg.size;
33         while (len > 0) {
34                 struct scatterlist *sge;
35                 u32 orig_offset;
36                 int use, i;
37
38                 if (!sk_page_frag_refill(sk, pfrag))
39                         return -ENOMEM;
40
41                 orig_offset = pfrag->offset;
42                 use = min_t(int, len, pfrag->size - orig_offset);
43                 if (!sk_wmem_schedule(sk, use))
44                         return -ENOMEM;
45
46                 i = msg->sg.end;
47                 sk_msg_iter_var_prev(i);
48                 sge = &msg->sg.data[i];
49
50                 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51                     sg_page(sge) == pfrag->page &&
52                     sge->offset + sge->length == orig_offset) {
53                         sge->length += use;
54                 } else {
55                         if (sk_msg_full(msg)) {
56                                 ret = -ENOSPC;
57                                 break;
58                         }
59
60                         sge = &msg->sg.data[msg->sg.end];
61                         sg_unmark_end(sge);
62                         sg_set_page(sge, pfrag->page, use, orig_offset);
63                         get_page(pfrag->page);
64                         sk_msg_iter_next(msg, end);
65                 }
66
67                 sk_mem_charge(sk, use);
68                 msg->sg.size += use;
69                 pfrag->offset += use;
70                 len -= use;
71         }
72
73         return ret;
74 }
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78                  u32 off, u32 len)
79 {
80         int i = src->sg.start;
81         struct scatterlist *sge = sk_msg_elem(src, i);
82         struct scatterlist *sgd = NULL;
83         u32 sge_len, sge_off;
84
85         while (off) {
86                 if (sge->length > off)
87                         break;
88                 off -= sge->length;
89                 sk_msg_iter_var_next(i);
90                 if (i == src->sg.end && off)
91                         return -ENOSPC;
92                 sge = sk_msg_elem(src, i);
93         }
94
95         while (len) {
96                 sge_len = sge->length - off;
97                 if (sge_len > len)
98                         sge_len = len;
99
100                 if (dst->sg.end)
101                         sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103                 if (sgd &&
104                     (sg_page(sge) == sg_page(sgd)) &&
105                     (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106                         sgd->length += sge_len;
107                         dst->sg.size += sge_len;
108                 } else if (!sk_msg_full(dst)) {
109                         sge_off = sge->offset + off;
110                         sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111                 } else {
112                         return -ENOSPC;
113                 }
114
115                 off = 0;
116                 len -= sge_len;
117                 sk_mem_charge(sk, sge_len);
118                 sk_msg_iter_var_next(i);
119                 if (i == src->sg.end && len)
120                         return -ENOSPC;
121                 sge = sk_msg_elem(src, i);
122         }
123
124         return 0;
125 }
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129 {
130         int i = msg->sg.start;
131
132         do {
133                 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135                 if (bytes < sge->length) {
136                         sge->length -= bytes;
137                         sge->offset += bytes;
138                         sk_mem_uncharge(sk, bytes);
139                         break;
140                 }
141
142                 sk_mem_uncharge(sk, sge->length);
143                 bytes -= sge->length;
144                 sge->length = 0;
145                 sge->offset = 0;
146                 sk_msg_iter_var_next(i);
147         } while (bytes && i != msg->sg.end);
148         msg->sg.start = i;
149 }
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153 {
154         int i = msg->sg.start;
155
156         do {
157                 struct scatterlist *sge = &msg->sg.data[i];
158                 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160                 sk_mem_uncharge(sk, uncharge);
161                 bytes -= uncharge;
162                 sk_msg_iter_var_next(i);
163         } while (i != msg->sg.end);
164 }
165 EXPORT_SYMBOL_GPL(sk_msg_return);
166
167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168                             bool charge)
169 {
170         struct scatterlist *sge = sk_msg_elem(msg, i);
171         u32 len = sge->length;
172
173         /* When the skb owns the memory we free it from consume_skb path. */
174         if (!msg->skb) {
175                 if (charge)
176                         sk_mem_uncharge(sk, len);
177                 put_page(sg_page(sge));
178         }
179         memset(sge, 0, sizeof(*sge));
180         return len;
181 }
182
183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184                          bool charge)
185 {
186         struct scatterlist *sge = sk_msg_elem(msg, i);
187         int freed = 0;
188
189         while (msg->sg.size) {
190                 msg->sg.size -= sge->length;
191                 freed += sk_msg_free_elem(sk, msg, i, charge);
192                 sk_msg_iter_var_next(i);
193                 sk_msg_check_to_free(msg, i, msg->sg.size);
194                 sge = sk_msg_elem(msg, i);
195         }
196         consume_skb(msg->skb);
197         sk_msg_init(msg);
198         return freed;
199 }
200
201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202 {
203         return __sk_msg_free(sk, msg, msg->sg.start, false);
204 }
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208 {
209         return __sk_msg_free(sk, msg, msg->sg.start, true);
210 }
211 EXPORT_SYMBOL_GPL(sk_msg_free);
212
213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214                                   u32 bytes, bool charge)
215 {
216         struct scatterlist *sge;
217         u32 i = msg->sg.start;
218
219         while (bytes) {
220                 sge = sk_msg_elem(msg, i);
221                 if (!sge->length)
222                         break;
223                 if (bytes < sge->length) {
224                         if (charge)
225                                 sk_mem_uncharge(sk, bytes);
226                         sge->length -= bytes;
227                         sge->offset += bytes;
228                         msg->sg.size -= bytes;
229                         break;
230                 }
231
232                 msg->sg.size -= sge->length;
233                 bytes -= sge->length;
234                 sk_msg_free_elem(sk, msg, i, charge);
235                 sk_msg_iter_var_next(i);
236                 sk_msg_check_to_free(msg, i, bytes);
237         }
238         msg->sg.start = i;
239 }
240
241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242 {
243         __sk_msg_free_partial(sk, msg, bytes, true);
244 }
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248                                   u32 bytes)
249 {
250         __sk_msg_free_partial(sk, msg, bytes, false);
251 }
252
253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254 {
255         int trim = msg->sg.size - len;
256         u32 i = msg->sg.end;
257
258         if (trim <= 0) {
259                 WARN_ON(trim < 0);
260                 return;
261         }
262
263         sk_msg_iter_var_prev(i);
264         msg->sg.size = len;
265         while (msg->sg.data[i].length &&
266                trim >= msg->sg.data[i].length) {
267                 trim -= msg->sg.data[i].length;
268                 sk_msg_free_elem(sk, msg, i, true);
269                 sk_msg_iter_var_prev(i);
270                 if (!trim)
271                         goto out;
272         }
273
274         msg->sg.data[i].length -= trim;
275         sk_mem_uncharge(sk, trim);
276         /* Adjust copybreak if it falls into the trimmed part of last buf */
277         if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278                 msg->sg.copybreak = msg->sg.data[i].length;
279 out:
280         sk_msg_iter_var_next(i);
281         msg->sg.end = i;
282
283         /* If we trim data a full sg elem before curr pointer update
284          * copybreak and current so that any future copy operations
285          * start at new copy location.
286          * However trimed data that has not yet been used in a copy op
287          * does not require an update.
288          */
289         if (!msg->sg.size) {
290                 msg->sg.curr = msg->sg.start;
291                 msg->sg.copybreak = 0;
292         } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293                    sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294                 sk_msg_iter_var_prev(i);
295                 msg->sg.curr = i;
296                 msg->sg.copybreak = msg->sg.data[i].length;
297         }
298 }
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302                               struct sk_msg *msg, u32 bytes)
303 {
304         int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305         const int to_max_pages = MAX_MSG_FRAGS;
306         struct page *pages[MAX_MSG_FRAGS];
307         ssize_t orig, copied, use, offset;
308
309         orig = msg->sg.size;
310         while (bytes > 0) {
311                 i = 0;
312                 maxpages = to_max_pages - num_elems;
313                 if (maxpages == 0) {
314                         ret = -EFAULT;
315                         goto out;
316                 }
317
318                 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319                                             &offset);
320                 if (copied <= 0) {
321                         ret = -EFAULT;
322                         goto out;
323                 }
324
325                 iov_iter_advance(from, copied);
326                 bytes -= copied;
327                 msg->sg.size += copied;
328
329                 while (copied) {
330                         use = min_t(int, copied, PAGE_SIZE - offset);
331                         sg_set_page(&msg->sg.data[msg->sg.end],
332                                     pages[i], use, offset);
333                         sg_unmark_end(&msg->sg.data[msg->sg.end]);
334                         sk_mem_charge(sk, use);
335
336                         offset = 0;
337                         copied -= use;
338                         sk_msg_iter_next(msg, end);
339                         num_elems++;
340                         i++;
341                 }
342                 /* When zerocopy is mixed with sk_msg_*copy* operations we
343                  * may have a copybreak set in this case clear and prefer
344                  * zerocopy remainder when possible.
345                  */
346                 msg->sg.copybreak = 0;
347                 msg->sg.curr = msg->sg.end;
348         }
349 out:
350         /* Revert iov_iter updates, msg will need to use 'trim' later if it
351          * also needs to be cleared.
352          */
353         if (ret)
354                 iov_iter_revert(from, msg->sg.size - orig);
355         return ret;
356 }
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360                              struct sk_msg *msg, u32 bytes)
361 {
362         int ret = -ENOSPC, i = msg->sg.curr;
363         struct scatterlist *sge;
364         u32 copy, buf_size;
365         void *to;
366
367         do {
368                 sge = sk_msg_elem(msg, i);
369                 /* This is possible if a trim operation shrunk the buffer */
370                 if (msg->sg.copybreak >= sge->length) {
371                         msg->sg.copybreak = 0;
372                         sk_msg_iter_var_next(i);
373                         if (i == msg->sg.end)
374                                 break;
375                         sge = sk_msg_elem(msg, i);
376                 }
377
378                 buf_size = sge->length - msg->sg.copybreak;
379                 copy = (buf_size > bytes) ? bytes : buf_size;
380                 to = sg_virt(sge) + msg->sg.copybreak;
381                 msg->sg.copybreak += copy;
382                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383                         ret = copy_from_iter_nocache(to, copy, from);
384                 else
385                         ret = copy_from_iter(to, copy, from);
386                 if (ret != copy) {
387                         ret = -EFAULT;
388                         goto out;
389                 }
390                 bytes -= copy;
391                 if (!bytes)
392                         break;
393                 msg->sg.copybreak = 0;
394                 sk_msg_iter_var_next(i);
395         } while (i != msg->sg.end);
396 out:
397         msg->sg.curr = i;
398         return ret;
399 }
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
402 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
403                                                   struct sk_buff *skb)
404 {
405         struct sk_msg *msg;
406
407         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
408                 return NULL;
409
410         if (!sk_rmem_schedule(sk, skb, skb->truesize))
411                 return NULL;
412
413         msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
414         if (unlikely(!msg))
415                 return NULL;
416
417         sk_msg_init(msg);
418         return msg;
419 }
420
421 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
422                                         struct sk_psock *psock,
423                                         struct sock *sk,
424                                         struct sk_msg *msg)
425 {
426         int num_sge, copied;
427
428         /* skb linearize may fail with ENOMEM, but lets simply try again
429          * later if this happens. Under memory pressure we don't want to
430          * drop the skb. We need to linearize the skb so that the mapping
431          * in skb_to_sgvec can not error.
432          */
433         if (skb_linearize(skb))
434                 return -EAGAIN;
435         num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
436         if (unlikely(num_sge < 0)) {
437                 kfree(msg);
438                 return num_sge;
439         }
440
441         copied = skb->len;
442         msg->sg.start = 0;
443         msg->sg.size = copied;
444         msg->sg.end = num_sge;
445         msg->skb = skb;
446
447         sk_psock_queue_msg(psock, msg);
448         sk_psock_data_ready(sk, psock);
449         return copied;
450 }
451
452 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
453
454 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
455 {
456         struct sock *sk = psock->sk;
457         struct sk_msg *msg;
458
459         /* If we are receiving on the same sock skb->sk is already assigned,
460          * skip memory accounting and owner transition seeing it already set
461          * correctly.
462          */
463         if (unlikely(skb->sk == sk))
464                 return sk_psock_skb_ingress_self(psock, skb);
465         msg = sk_psock_create_ingress_msg(sk, skb);
466         if (!msg)
467                 return -EAGAIN;
468
469         /* This will transition ownership of the data from the socket where
470          * the BPF program was run initiating the redirect to the socket
471          * we will eventually receive this data on. The data will be released
472          * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
473          * into user buffers.
474          */
475         skb_set_owner_r(skb, sk);
476         return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
477 }
478
479 /* Puts an skb on the ingress queue of the socket already assigned to the
480  * skb. In this case we do not need to check memory limits or skb_set_owner_r
481  * because the skb is already accounted for here.
482  */
483 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
484 {
485         struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
486         struct sock *sk = psock->sk;
487
488         if (unlikely(!msg))
489                 return -EAGAIN;
490         sk_msg_init(msg);
491         return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
492 }
493
494 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
495                                u32 off, u32 len, bool ingress)
496 {
497         if (!ingress) {
498                 if (!sock_writeable(psock->sk))
499                         return -EAGAIN;
500                 return skb_send_sock_locked(psock->sk, skb, off, len);
501         }
502         return sk_psock_skb_ingress(psock, skb);
503 }
504
505 static void sk_psock_backlog(struct work_struct *work)
506 {
507         struct sk_psock *psock = container_of(work, struct sk_psock, work);
508         struct sk_psock_work_state *state = &psock->work_state;
509         struct sk_buff *skb;
510         bool ingress;
511         u32 len, off;
512         int ret;
513
514         /* Lock sock to avoid losing sk_socket during loop. */
515         lock_sock(psock->sk);
516         if (state->skb) {
517                 skb = state->skb;
518                 len = state->len;
519                 off = state->off;
520                 state->skb = NULL;
521                 goto start;
522         }
523
524         while ((skb = skb_dequeue(&psock->ingress_skb))) {
525                 len = skb->len;
526                 off = 0;
527 start:
528                 ingress = tcp_skb_bpf_ingress(skb);
529                 do {
530                         ret = -EIO;
531                         if (likely(psock->sk->sk_socket))
532                                 ret = sk_psock_handle_skb(psock, skb, off,
533                                                           len, ingress);
534                         if (ret <= 0) {
535                                 if (ret == -EAGAIN) {
536                                         state->skb = skb;
537                                         state->len = len;
538                                         state->off = off;
539                                         goto end;
540                                 }
541                                 /* Hard errors break pipe and stop xmit. */
542                                 sk_psock_report_error(psock, ret ? -ret : EPIPE);
543                                 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
544                                 kfree_skb(skb);
545                                 goto end;
546                         }
547                         off += ret;
548                         len -= ret;
549                 } while (len);
550
551                 if (!ingress)
552                         kfree_skb(skb);
553         }
554 end:
555         release_sock(psock->sk);
556 }
557
558 struct sk_psock *sk_psock_init(struct sock *sk, int node)
559 {
560         struct sk_psock *psock;
561         struct proto *prot;
562
563         write_lock_bh(&sk->sk_callback_lock);
564
565         if (inet_csk_has_ulp(sk)) {
566                 psock = ERR_PTR(-EINVAL);
567                 goto out;
568         }
569
570         if (sk->sk_user_data) {
571                 psock = ERR_PTR(-EBUSY);
572                 goto out;
573         }
574
575         psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
576         if (!psock) {
577                 psock = ERR_PTR(-ENOMEM);
578                 goto out;
579         }
580
581         prot = READ_ONCE(sk->sk_prot);
582         psock->sk = sk;
583         psock->eval = __SK_NONE;
584         psock->sk_proto = prot;
585         psock->saved_unhash = prot->unhash;
586         psock->saved_close = prot->close;
587         psock->saved_write_space = sk->sk_write_space;
588
589         INIT_LIST_HEAD(&psock->link);
590         spin_lock_init(&psock->link_lock);
591
592         INIT_WORK(&psock->work, sk_psock_backlog);
593         INIT_LIST_HEAD(&psock->ingress_msg);
594         skb_queue_head_init(&psock->ingress_skb);
595
596         sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
597         refcount_set(&psock->refcnt, 1);
598
599         rcu_assign_sk_user_data_nocopy(sk, psock);
600         sock_hold(sk);
601
602 out:
603         write_unlock_bh(&sk->sk_callback_lock);
604         return psock;
605 }
606 EXPORT_SYMBOL_GPL(sk_psock_init);
607
608 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
609 {
610         struct sk_psock_link *link;
611
612         spin_lock_bh(&psock->link_lock);
613         link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
614                                         list);
615         if (link)
616                 list_del(&link->list);
617         spin_unlock_bh(&psock->link_lock);
618         return link;
619 }
620
621 void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
622 {
623         struct sk_msg *msg, *tmp;
624
625         list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
626                 list_del(&msg->list);
627                 sk_msg_free(psock->sk, msg);
628                 kfree(msg);
629         }
630 }
631
632 static void sk_psock_zap_ingress(struct sk_psock *psock)
633 {
634         __skb_queue_purge(&psock->ingress_skb);
635         __sk_psock_purge_ingress_msg(psock);
636 }
637
638 static void sk_psock_link_destroy(struct sk_psock *psock)
639 {
640         struct sk_psock_link *link, *tmp;
641
642         list_for_each_entry_safe(link, tmp, &psock->link, list) {
643                 list_del(&link->list);
644                 sk_psock_free_link(link);
645         }
646 }
647
648 static void sk_psock_destroy_deferred(struct work_struct *gc)
649 {
650         struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
651
652         /* No sk_callback_lock since already detached. */
653
654         /* Parser has been stopped */
655         if (psock->progs.skb_parser)
656                 strp_done(&psock->parser.strp);
657
658         cancel_work_sync(&psock->work);
659
660         psock_progs_drop(&psock->progs);
661
662         sk_psock_link_destroy(psock);
663         sk_psock_cork_free(psock);
664         sk_psock_zap_ingress(psock);
665
666         if (psock->sk_redir)
667                 sock_put(psock->sk_redir);
668         sock_put(psock->sk);
669         kfree(psock);
670 }
671
672 static void sk_psock_destroy(struct rcu_head *rcu)
673 {
674         struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
675
676         INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
677         schedule_work(&psock->gc);
678 }
679
680 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
681 {
682         sk_psock_cork_free(psock);
683         sk_psock_zap_ingress(psock);
684
685         write_lock_bh(&sk->sk_callback_lock);
686         sk_psock_restore_proto(sk, psock);
687         rcu_assign_sk_user_data(sk, NULL);
688         if (psock->progs.skb_parser)
689                 sk_psock_stop_strp(sk, psock);
690         else if (psock->progs.skb_verdict)
691                 sk_psock_stop_verdict(sk, psock);
692         write_unlock_bh(&sk->sk_callback_lock);
693         sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
694
695         call_rcu(&psock->rcu, sk_psock_destroy);
696 }
697 EXPORT_SYMBOL_GPL(sk_psock_drop);
698
699 static int sk_psock_map_verd(int verdict, bool redir)
700 {
701         switch (verdict) {
702         case SK_PASS:
703                 return redir ? __SK_REDIRECT : __SK_PASS;
704         case SK_DROP:
705         default:
706                 break;
707         }
708
709         return __SK_DROP;
710 }
711
712 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
713                          struct sk_msg *msg)
714 {
715         struct bpf_prog *prog;
716         int ret;
717
718         rcu_read_lock();
719         prog = READ_ONCE(psock->progs.msg_parser);
720         if (unlikely(!prog)) {
721                 ret = __SK_PASS;
722                 goto out;
723         }
724
725         sk_msg_compute_data_pointers(msg);
726         msg->sk = sk;
727         ret = bpf_prog_run_pin_on_cpu(prog, msg);
728         ret = sk_psock_map_verd(ret, msg->sk_redir);
729         psock->apply_bytes = msg->apply_bytes;
730         if (ret == __SK_REDIRECT) {
731                 if (psock->sk_redir)
732                         sock_put(psock->sk_redir);
733                 psock->sk_redir = msg->sk_redir;
734                 if (!psock->sk_redir) {
735                         ret = __SK_DROP;
736                         goto out;
737                 }
738                 sock_hold(psock->sk_redir);
739         }
740 out:
741         rcu_read_unlock();
742         return ret;
743 }
744 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
745
746 static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
747                             struct sk_buff *skb)
748 {
749         bpf_compute_data_end_sk_skb(skb);
750         return bpf_prog_run_pin_on_cpu(prog, skb);
751 }
752
753 static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
754 {
755         struct sk_psock_parser *parser;
756
757         parser = container_of(strp, struct sk_psock_parser, strp);
758         return container_of(parser, struct sk_psock, parser);
759 }
760
761 static void sk_psock_skb_redirect(struct sk_buff *skb)
762 {
763         struct sk_psock *psock_other;
764         struct sock *sk_other;
765
766         sk_other = tcp_skb_bpf_redirect_fetch(skb);
767         /* This error is a buggy BPF program, it returned a redirect
768          * return code, but then didn't set a redirect interface.
769          */
770         if (unlikely(!sk_other)) {
771                 kfree_skb(skb);
772                 return;
773         }
774         psock_other = sk_psock(sk_other);
775         /* This error indicates the socket is being torn down or had another
776          * error that caused the pipe to break. We can't send a packet on
777          * a socket that is in this state so we drop the skb.
778          */
779         if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
780             !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
781                 kfree_skb(skb);
782                 return;
783         }
784
785         skb_queue_tail(&psock_other->ingress_skb, skb);
786         schedule_work(&psock_other->work);
787 }
788
789 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
790 {
791         switch (verdict) {
792         case __SK_REDIRECT:
793                 skb_set_owner_r(skb, sk);
794                 sk_psock_skb_redirect(skb);
795                 break;
796         case __SK_PASS:
797         case __SK_DROP:
798         default:
799                 break;
800         }
801 }
802
803 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
804 {
805         struct bpf_prog *prog;
806         int ret = __SK_PASS;
807
808         rcu_read_lock();
809         prog = READ_ONCE(psock->progs.skb_verdict);
810         if (likely(prog)) {
811                 /* We skip full set_owner_r here because if we do a SK_PASS
812                  * or SK_DROP we can skip skb memory accounting and use the
813                  * TLS context.
814                  */
815                 skb->sk = psock->sk;
816                 tcp_skb_bpf_redirect_clear(skb);
817                 ret = sk_psock_bpf_run(psock, prog, skb);
818                 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
819                 skb->sk = NULL;
820         }
821         sk_psock_tls_verdict_apply(skb, psock->sk, ret);
822         rcu_read_unlock();
823         return ret;
824 }
825 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
826
827 static void sk_psock_verdict_apply(struct sk_psock *psock,
828                                    struct sk_buff *skb, int verdict)
829 {
830         struct tcp_skb_cb *tcp;
831         struct sock *sk_other;
832         int err = -EIO;
833
834         switch (verdict) {
835         case __SK_PASS:
836                 sk_other = psock->sk;
837                 if (sock_flag(sk_other, SOCK_DEAD) ||
838                     !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
839                         goto out_free;
840                 }
841
842                 tcp = TCP_SKB_CB(skb);
843                 tcp->bpf.flags |= BPF_F_INGRESS;
844
845                 /* If the queue is empty then we can submit directly
846                  * into the msg queue. If its not empty we have to
847                  * queue work otherwise we may get OOO data. Otherwise,
848                  * if sk_psock_skb_ingress errors will be handled by
849                  * retrying later from workqueue.
850                  */
851                 if (skb_queue_empty(&psock->ingress_skb)) {
852                         err = sk_psock_skb_ingress_self(psock, skb);
853                 }
854                 if (err < 0) {
855                         skb_queue_tail(&psock->ingress_skb, skb);
856                         schedule_work(&psock->work);
857                 }
858                 break;
859         case __SK_REDIRECT:
860                 sk_psock_skb_redirect(skb);
861                 break;
862         case __SK_DROP:
863         default:
864 out_free:
865                 kfree_skb(skb);
866         }
867 }
868
869 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
870 {
871         struct sk_psock *psock;
872         struct bpf_prog *prog;
873         int ret = __SK_DROP;
874         struct sock *sk;
875
876         rcu_read_lock();
877         sk = strp->sk;
878         psock = sk_psock(sk);
879         if (unlikely(!psock)) {
880                 kfree_skb(skb);
881                 goto out;
882         }
883         skb_set_owner_r(skb, sk);
884         prog = READ_ONCE(psock->progs.skb_verdict);
885         if (likely(prog)) {
886                 tcp_skb_bpf_redirect_clear(skb);
887                 ret = sk_psock_bpf_run(psock, prog, skb);
888                 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
889         }
890         sk_psock_verdict_apply(psock, skb, ret);
891 out:
892         rcu_read_unlock();
893 }
894
895 static int sk_psock_strp_read_done(struct strparser *strp, int err)
896 {
897         return err;
898 }
899
900 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
901 {
902         struct sk_psock *psock = sk_psock_from_strp(strp);
903         struct bpf_prog *prog;
904         int ret = skb->len;
905
906         rcu_read_lock();
907         prog = READ_ONCE(psock->progs.skb_parser);
908         if (likely(prog)) {
909                 skb->sk = psock->sk;
910                 ret = sk_psock_bpf_run(psock, prog, skb);
911                 skb->sk = NULL;
912         }
913         rcu_read_unlock();
914         return ret;
915 }
916
917 /* Called with socket lock held. */
918 static void sk_psock_strp_data_ready(struct sock *sk)
919 {
920         struct sk_psock *psock;
921
922         rcu_read_lock();
923         psock = sk_psock(sk);
924         if (likely(psock)) {
925                 if (tls_sw_has_ctx_rx(sk)) {
926                         psock->parser.saved_data_ready(sk);
927                 } else {
928                         write_lock_bh(&sk->sk_callback_lock);
929                         strp_data_ready(&psock->parser.strp);
930                         write_unlock_bh(&sk->sk_callback_lock);
931                 }
932         }
933         rcu_read_unlock();
934 }
935
936 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
937                                  unsigned int offset, size_t orig_len)
938 {
939         struct sock *sk = (struct sock *)desc->arg.data;
940         struct sk_psock *psock;
941         struct bpf_prog *prog;
942         int ret = __SK_DROP;
943         int len = skb->len;
944
945         /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
946         skb = skb_clone(skb, GFP_ATOMIC);
947         if (!skb) {
948                 desc->error = -ENOMEM;
949                 return 0;
950         }
951
952         rcu_read_lock();
953         psock = sk_psock(sk);
954         if (unlikely(!psock)) {
955                 len = 0;
956                 kfree_skb(skb);
957                 goto out;
958         }
959         skb_set_owner_r(skb, sk);
960         prog = READ_ONCE(psock->progs.skb_verdict);
961         if (likely(prog)) {
962                 tcp_skb_bpf_redirect_clear(skb);
963                 ret = sk_psock_bpf_run(psock, prog, skb);
964                 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
965         }
966         sk_psock_verdict_apply(psock, skb, ret);
967 out:
968         rcu_read_unlock();
969         return len;
970 }
971
972 static void sk_psock_verdict_data_ready(struct sock *sk)
973 {
974         struct socket *sock = sk->sk_socket;
975         read_descriptor_t desc;
976
977         if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
978                 return;
979
980         desc.arg.data = sk;
981         desc.error = 0;
982         desc.count = 1;
983
984         sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
985 }
986
987 static void sk_psock_write_space(struct sock *sk)
988 {
989         struct sk_psock *psock;
990         void (*write_space)(struct sock *sk) = NULL;
991
992         rcu_read_lock();
993         psock = sk_psock(sk);
994         if (likely(psock)) {
995                 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
996                         schedule_work(&psock->work);
997                 write_space = psock->saved_write_space;
998         }
999         rcu_read_unlock();
1000         if (write_space)
1001                 write_space(sk);
1002 }
1003
1004 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1005 {
1006         static const struct strp_callbacks cb = {
1007                 .rcv_msg        = sk_psock_strp_read,
1008                 .read_sock_done = sk_psock_strp_read_done,
1009                 .parse_msg      = sk_psock_strp_parse,
1010         };
1011
1012         psock->parser.enabled = false;
1013         return strp_init(&psock->parser.strp, sk, &cb);
1014 }
1015
1016 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1017 {
1018         struct sk_psock_parser *parser = &psock->parser;
1019
1020         if (parser->enabled)
1021                 return;
1022
1023         parser->saved_data_ready = sk->sk_data_ready;
1024         sk->sk_data_ready = sk_psock_verdict_data_ready;
1025         sk->sk_write_space = sk_psock_write_space;
1026         parser->enabled = true;
1027 }
1028
1029 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1030 {
1031         struct sk_psock_parser *parser = &psock->parser;
1032
1033         if (parser->enabled)
1034                 return;
1035
1036         parser->saved_data_ready = sk->sk_data_ready;
1037         sk->sk_data_ready = sk_psock_strp_data_ready;
1038         sk->sk_write_space = sk_psock_write_space;
1039         parser->enabled = true;
1040 }
1041
1042 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1043 {
1044         struct sk_psock_parser *parser = &psock->parser;
1045
1046         if (!parser->enabled)
1047                 return;
1048
1049         sk->sk_data_ready = parser->saved_data_ready;
1050         parser->saved_data_ready = NULL;
1051         strp_stop(&parser->strp);
1052         parser->enabled = false;
1053 }
1054
1055 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1056 {
1057         struct sk_psock_parser *parser = &psock->parser;
1058
1059         if (!parser->enabled)
1060                 return;
1061
1062         sk->sk_data_ready = parser->saved_data_ready;
1063         parser->saved_data_ready = NULL;
1064         parser->enabled = false;
1065 }