Merge tag 'Smack-for-5.15' of git://github.com/cschaufler/smack-next
[linux-2.6-microblaze.git] / net / core / skmsg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14         if (msg->sg.end > msg->sg.start &&
15             elem_first_coalesce < msg->sg.end)
16                 return true;
17
18         if (msg->sg.end < msg->sg.start &&
19             (elem_first_coalesce > msg->sg.start ||
20              elem_first_coalesce < msg->sg.end))
21                 return true;
22
23         return false;
24 }
25
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27                  int elem_first_coalesce)
28 {
29         struct page_frag *pfrag = sk_page_frag(sk);
30         int ret = 0;
31
32         len -= msg->sg.size;
33         while (len > 0) {
34                 struct scatterlist *sge;
35                 u32 orig_offset;
36                 int use, i;
37
38                 if (!sk_page_frag_refill(sk, pfrag))
39                         return -ENOMEM;
40
41                 orig_offset = pfrag->offset;
42                 use = min_t(int, len, pfrag->size - orig_offset);
43                 if (!sk_wmem_schedule(sk, use))
44                         return -ENOMEM;
45
46                 i = msg->sg.end;
47                 sk_msg_iter_var_prev(i);
48                 sge = &msg->sg.data[i];
49
50                 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51                     sg_page(sge) == pfrag->page &&
52                     sge->offset + sge->length == orig_offset) {
53                         sge->length += use;
54                 } else {
55                         if (sk_msg_full(msg)) {
56                                 ret = -ENOSPC;
57                                 break;
58                         }
59
60                         sge = &msg->sg.data[msg->sg.end];
61                         sg_unmark_end(sge);
62                         sg_set_page(sge, pfrag->page, use, orig_offset);
63                         get_page(pfrag->page);
64                         sk_msg_iter_next(msg, end);
65                 }
66
67                 sk_mem_charge(sk, use);
68                 msg->sg.size += use;
69                 pfrag->offset += use;
70                 len -= use;
71         }
72
73         return ret;
74 }
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78                  u32 off, u32 len)
79 {
80         int i = src->sg.start;
81         struct scatterlist *sge = sk_msg_elem(src, i);
82         struct scatterlist *sgd = NULL;
83         u32 sge_len, sge_off;
84
85         while (off) {
86                 if (sge->length > off)
87                         break;
88                 off -= sge->length;
89                 sk_msg_iter_var_next(i);
90                 if (i == src->sg.end && off)
91                         return -ENOSPC;
92                 sge = sk_msg_elem(src, i);
93         }
94
95         while (len) {
96                 sge_len = sge->length - off;
97                 if (sge_len > len)
98                         sge_len = len;
99
100                 if (dst->sg.end)
101                         sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103                 if (sgd &&
104                     (sg_page(sge) == sg_page(sgd)) &&
105                     (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106                         sgd->length += sge_len;
107                         dst->sg.size += sge_len;
108                 } else if (!sk_msg_full(dst)) {
109                         sge_off = sge->offset + off;
110                         sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111                 } else {
112                         return -ENOSPC;
113                 }
114
115                 off = 0;
116                 len -= sge_len;
117                 sk_mem_charge(sk, sge_len);
118                 sk_msg_iter_var_next(i);
119                 if (i == src->sg.end && len)
120                         return -ENOSPC;
121                 sge = sk_msg_elem(src, i);
122         }
123
124         return 0;
125 }
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129 {
130         int i = msg->sg.start;
131
132         do {
133                 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135                 if (bytes < sge->length) {
136                         sge->length -= bytes;
137                         sge->offset += bytes;
138                         sk_mem_uncharge(sk, bytes);
139                         break;
140                 }
141
142                 sk_mem_uncharge(sk, sge->length);
143                 bytes -= sge->length;
144                 sge->length = 0;
145                 sge->offset = 0;
146                 sk_msg_iter_var_next(i);
147         } while (bytes && i != msg->sg.end);
148         msg->sg.start = i;
149 }
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153 {
154         int i = msg->sg.start;
155
156         do {
157                 struct scatterlist *sge = &msg->sg.data[i];
158                 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160                 sk_mem_uncharge(sk, uncharge);
161                 bytes -= uncharge;
162                 sk_msg_iter_var_next(i);
163         } while (i != msg->sg.end);
164 }
165 EXPORT_SYMBOL_GPL(sk_msg_return);
166
167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168                             bool charge)
169 {
170         struct scatterlist *sge = sk_msg_elem(msg, i);
171         u32 len = sge->length;
172
173         /* When the skb owns the memory we free it from consume_skb path. */
174         if (!msg->skb) {
175                 if (charge)
176                         sk_mem_uncharge(sk, len);
177                 put_page(sg_page(sge));
178         }
179         memset(sge, 0, sizeof(*sge));
180         return len;
181 }
182
183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184                          bool charge)
185 {
186         struct scatterlist *sge = sk_msg_elem(msg, i);
187         int freed = 0;
188
189         while (msg->sg.size) {
190                 msg->sg.size -= sge->length;
191                 freed += sk_msg_free_elem(sk, msg, i, charge);
192                 sk_msg_iter_var_next(i);
193                 sk_msg_check_to_free(msg, i, msg->sg.size);
194                 sge = sk_msg_elem(msg, i);
195         }
196         consume_skb(msg->skb);
197         sk_msg_init(msg);
198         return freed;
199 }
200
201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202 {
203         return __sk_msg_free(sk, msg, msg->sg.start, false);
204 }
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208 {
209         return __sk_msg_free(sk, msg, msg->sg.start, true);
210 }
211 EXPORT_SYMBOL_GPL(sk_msg_free);
212
213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214                                   u32 bytes, bool charge)
215 {
216         struct scatterlist *sge;
217         u32 i = msg->sg.start;
218
219         while (bytes) {
220                 sge = sk_msg_elem(msg, i);
221                 if (!sge->length)
222                         break;
223                 if (bytes < sge->length) {
224                         if (charge)
225                                 sk_mem_uncharge(sk, bytes);
226                         sge->length -= bytes;
227                         sge->offset += bytes;
228                         msg->sg.size -= bytes;
229                         break;
230                 }
231
232                 msg->sg.size -= sge->length;
233                 bytes -= sge->length;
234                 sk_msg_free_elem(sk, msg, i, charge);
235                 sk_msg_iter_var_next(i);
236                 sk_msg_check_to_free(msg, i, bytes);
237         }
238         msg->sg.start = i;
239 }
240
241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242 {
243         __sk_msg_free_partial(sk, msg, bytes, true);
244 }
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248                                   u32 bytes)
249 {
250         __sk_msg_free_partial(sk, msg, bytes, false);
251 }
252
253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254 {
255         int trim = msg->sg.size - len;
256         u32 i = msg->sg.end;
257
258         if (trim <= 0) {
259                 WARN_ON(trim < 0);
260                 return;
261         }
262
263         sk_msg_iter_var_prev(i);
264         msg->sg.size = len;
265         while (msg->sg.data[i].length &&
266                trim >= msg->sg.data[i].length) {
267                 trim -= msg->sg.data[i].length;
268                 sk_msg_free_elem(sk, msg, i, true);
269                 sk_msg_iter_var_prev(i);
270                 if (!trim)
271                         goto out;
272         }
273
274         msg->sg.data[i].length -= trim;
275         sk_mem_uncharge(sk, trim);
276         /* Adjust copybreak if it falls into the trimmed part of last buf */
277         if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278                 msg->sg.copybreak = msg->sg.data[i].length;
279 out:
280         sk_msg_iter_var_next(i);
281         msg->sg.end = i;
282
283         /* If we trim data a full sg elem before curr pointer update
284          * copybreak and current so that any future copy operations
285          * start at new copy location.
286          * However trimed data that has not yet been used in a copy op
287          * does not require an update.
288          */
289         if (!msg->sg.size) {
290                 msg->sg.curr = msg->sg.start;
291                 msg->sg.copybreak = 0;
292         } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293                    sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294                 sk_msg_iter_var_prev(i);
295                 msg->sg.curr = i;
296                 msg->sg.copybreak = msg->sg.data[i].length;
297         }
298 }
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302                               struct sk_msg *msg, u32 bytes)
303 {
304         int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305         const int to_max_pages = MAX_MSG_FRAGS;
306         struct page *pages[MAX_MSG_FRAGS];
307         ssize_t orig, copied, use, offset;
308
309         orig = msg->sg.size;
310         while (bytes > 0) {
311                 i = 0;
312                 maxpages = to_max_pages - num_elems;
313                 if (maxpages == 0) {
314                         ret = -EFAULT;
315                         goto out;
316                 }
317
318                 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319                                             &offset);
320                 if (copied <= 0) {
321                         ret = -EFAULT;
322                         goto out;
323                 }
324
325                 iov_iter_advance(from, copied);
326                 bytes -= copied;
327                 msg->sg.size += copied;
328
329                 while (copied) {
330                         use = min_t(int, copied, PAGE_SIZE - offset);
331                         sg_set_page(&msg->sg.data[msg->sg.end],
332                                     pages[i], use, offset);
333                         sg_unmark_end(&msg->sg.data[msg->sg.end]);
334                         sk_mem_charge(sk, use);
335
336                         offset = 0;
337                         copied -= use;
338                         sk_msg_iter_next(msg, end);
339                         num_elems++;
340                         i++;
341                 }
342                 /* When zerocopy is mixed with sk_msg_*copy* operations we
343                  * may have a copybreak set in this case clear and prefer
344                  * zerocopy remainder when possible.
345                  */
346                 msg->sg.copybreak = 0;
347                 msg->sg.curr = msg->sg.end;
348         }
349 out:
350         /* Revert iov_iter updates, msg will need to use 'trim' later if it
351          * also needs to be cleared.
352          */
353         if (ret)
354                 iov_iter_revert(from, msg->sg.size - orig);
355         return ret;
356 }
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360                              struct sk_msg *msg, u32 bytes)
361 {
362         int ret = -ENOSPC, i = msg->sg.curr;
363         struct scatterlist *sge;
364         u32 copy, buf_size;
365         void *to;
366
367         do {
368                 sge = sk_msg_elem(msg, i);
369                 /* This is possible if a trim operation shrunk the buffer */
370                 if (msg->sg.copybreak >= sge->length) {
371                         msg->sg.copybreak = 0;
372                         sk_msg_iter_var_next(i);
373                         if (i == msg->sg.end)
374                                 break;
375                         sge = sk_msg_elem(msg, i);
376                 }
377
378                 buf_size = sge->length - msg->sg.copybreak;
379                 copy = (buf_size > bytes) ? bytes : buf_size;
380                 to = sg_virt(sge) + msg->sg.copybreak;
381                 msg->sg.copybreak += copy;
382                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383                         ret = copy_from_iter_nocache(to, copy, from);
384                 else
385                         ret = copy_from_iter(to, copy, from);
386                 if (ret != copy) {
387                         ret = -EFAULT;
388                         goto out;
389                 }
390                 bytes -= copy;
391                 if (!bytes)
392                         break;
393                 msg->sg.copybreak = 0;
394                 sk_msg_iter_var_next(i);
395         } while (i != msg->sg.end);
396 out:
397         msg->sg.curr = i;
398         return ret;
399 }
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
402 /* Receive sk_msg from psock->ingress_msg to @msg. */
403 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
404                    int len, int flags)
405 {
406         struct iov_iter *iter = &msg->msg_iter;
407         int peek = flags & MSG_PEEK;
408         struct sk_msg *msg_rx;
409         int i, copied = 0;
410
411         msg_rx = sk_psock_peek_msg(psock);
412         while (copied != len) {
413                 struct scatterlist *sge;
414
415                 if (unlikely(!msg_rx))
416                         break;
417
418                 i = msg_rx->sg.start;
419                 do {
420                         struct page *page;
421                         int copy;
422
423                         sge = sk_msg_elem(msg_rx, i);
424                         copy = sge->length;
425                         page = sg_page(sge);
426                         if (copied + copy > len)
427                                 copy = len - copied;
428                         copy = copy_page_to_iter(page, sge->offset, copy, iter);
429                         if (!copy)
430                                 return copied ? copied : -EFAULT;
431
432                         copied += copy;
433                         if (likely(!peek)) {
434                                 sge->offset += copy;
435                                 sge->length -= copy;
436                                 if (!msg_rx->skb)
437                                         sk_mem_uncharge(sk, copy);
438                                 msg_rx->sg.size -= copy;
439
440                                 if (!sge->length) {
441                                         sk_msg_iter_var_next(i);
442                                         if (!msg_rx->skb)
443                                                 put_page(page);
444                                 }
445                         } else {
446                                 /* Lets not optimize peek case if copy_page_to_iter
447                                  * didn't copy the entire length lets just break.
448                                  */
449                                 if (copy != sge->length)
450                                         return copied;
451                                 sk_msg_iter_var_next(i);
452                         }
453
454                         if (copied == len)
455                                 break;
456                 } while (i != msg_rx->sg.end);
457
458                 if (unlikely(peek)) {
459                         msg_rx = sk_psock_next_msg(psock, msg_rx);
460                         if (!msg_rx)
461                                 break;
462                         continue;
463                 }
464
465                 msg_rx->sg.start = i;
466                 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
467                         msg_rx = sk_psock_dequeue_msg(psock);
468                         kfree_sk_msg(msg_rx);
469                 }
470                 msg_rx = sk_psock_peek_msg(psock);
471         }
472
473         return copied;
474 }
475 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
476
477 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
478                                                   struct sk_buff *skb)
479 {
480         struct sk_msg *msg;
481
482         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
483                 return NULL;
484
485         if (!sk_rmem_schedule(sk, skb, skb->truesize))
486                 return NULL;
487
488         msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
489         if (unlikely(!msg))
490                 return NULL;
491
492         sk_msg_init(msg);
493         return msg;
494 }
495
496 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
497                                         struct sk_psock *psock,
498                                         struct sock *sk,
499                                         struct sk_msg *msg)
500 {
501         int num_sge, copied;
502
503         /* skb linearize may fail with ENOMEM, but lets simply try again
504          * later if this happens. Under memory pressure we don't want to
505          * drop the skb. We need to linearize the skb so that the mapping
506          * in skb_to_sgvec can not error.
507          */
508         if (skb_linearize(skb))
509                 return -EAGAIN;
510         num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
511         if (unlikely(num_sge < 0))
512                 return num_sge;
513
514         copied = skb->len;
515         msg->sg.start = 0;
516         msg->sg.size = copied;
517         msg->sg.end = num_sge;
518         msg->skb = skb;
519
520         sk_psock_queue_msg(psock, msg);
521         sk_psock_data_ready(sk, psock);
522         return copied;
523 }
524
525 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
526
527 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
528 {
529         struct sock *sk = psock->sk;
530         struct sk_msg *msg;
531         int err;
532
533         /* If we are receiving on the same sock skb->sk is already assigned,
534          * skip memory accounting and owner transition seeing it already set
535          * correctly.
536          */
537         if (unlikely(skb->sk == sk))
538                 return sk_psock_skb_ingress_self(psock, skb);
539         msg = sk_psock_create_ingress_msg(sk, skb);
540         if (!msg)
541                 return -EAGAIN;
542
543         /* This will transition ownership of the data from the socket where
544          * the BPF program was run initiating the redirect to the socket
545          * we will eventually receive this data on. The data will be released
546          * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
547          * into user buffers.
548          */
549         skb_set_owner_r(skb, sk);
550         err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
551         if (err < 0)
552                 kfree(msg);
553         return err;
554 }
555
556 /* Puts an skb on the ingress queue of the socket already assigned to the
557  * skb. In this case we do not need to check memory limits or skb_set_owner_r
558  * because the skb is already accounted for here.
559  */
560 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
561 {
562         struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
563         struct sock *sk = psock->sk;
564         int err;
565
566         if (unlikely(!msg))
567                 return -EAGAIN;
568         sk_msg_init(msg);
569         skb_set_owner_r(skb, sk);
570         err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
571         if (err < 0)
572                 kfree(msg);
573         return err;
574 }
575
576 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
577                                u32 off, u32 len, bool ingress)
578 {
579         if (!ingress) {
580                 if (!sock_writeable(psock->sk))
581                         return -EAGAIN;
582                 return skb_send_sock(psock->sk, skb, off, len);
583         }
584         return sk_psock_skb_ingress(psock, skb);
585 }
586
587 static void sk_psock_skb_state(struct sk_psock *psock,
588                                struct sk_psock_work_state *state,
589                                struct sk_buff *skb,
590                                int len, int off)
591 {
592         spin_lock_bh(&psock->ingress_lock);
593         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
594                 state->skb = skb;
595                 state->len = len;
596                 state->off = off;
597         } else {
598                 sock_drop(psock->sk, skb);
599         }
600         spin_unlock_bh(&psock->ingress_lock);
601 }
602
603 static void sk_psock_backlog(struct work_struct *work)
604 {
605         struct sk_psock *psock = container_of(work, struct sk_psock, work);
606         struct sk_psock_work_state *state = &psock->work_state;
607         struct sk_buff *skb = NULL;
608         bool ingress;
609         u32 len, off;
610         int ret;
611
612         mutex_lock(&psock->work_mutex);
613         if (unlikely(state->skb)) {
614                 spin_lock_bh(&psock->ingress_lock);
615                 skb = state->skb;
616                 len = state->len;
617                 off = state->off;
618                 state->skb = NULL;
619                 spin_unlock_bh(&psock->ingress_lock);
620         }
621         if (skb)
622                 goto start;
623
624         while ((skb = skb_dequeue(&psock->ingress_skb))) {
625                 len = skb->len;
626                 off = 0;
627 start:
628                 ingress = skb_bpf_ingress(skb);
629                 skb_bpf_redirect_clear(skb);
630                 do {
631                         ret = -EIO;
632                         if (!sock_flag(psock->sk, SOCK_DEAD))
633                                 ret = sk_psock_handle_skb(psock, skb, off,
634                                                           len, ingress);
635                         if (ret <= 0) {
636                                 if (ret == -EAGAIN) {
637                                         sk_psock_skb_state(psock, state, skb,
638                                                            len, off);
639                                         goto end;
640                                 }
641                                 /* Hard errors break pipe and stop xmit. */
642                                 sk_psock_report_error(psock, ret ? -ret : EPIPE);
643                                 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
644                                 sock_drop(psock->sk, skb);
645                                 goto end;
646                         }
647                         off += ret;
648                         len -= ret;
649                 } while (len);
650
651                 if (!ingress)
652                         kfree_skb(skb);
653         }
654 end:
655         mutex_unlock(&psock->work_mutex);
656 }
657
658 struct sk_psock *sk_psock_init(struct sock *sk, int node)
659 {
660         struct sk_psock *psock;
661         struct proto *prot;
662
663         write_lock_bh(&sk->sk_callback_lock);
664
665         if (sk->sk_user_data) {
666                 psock = ERR_PTR(-EBUSY);
667                 goto out;
668         }
669
670         psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
671         if (!psock) {
672                 psock = ERR_PTR(-ENOMEM);
673                 goto out;
674         }
675
676         prot = READ_ONCE(sk->sk_prot);
677         psock->sk = sk;
678         psock->eval = __SK_NONE;
679         psock->sk_proto = prot;
680         psock->saved_unhash = prot->unhash;
681         psock->saved_close = prot->close;
682         psock->saved_write_space = sk->sk_write_space;
683
684         INIT_LIST_HEAD(&psock->link);
685         spin_lock_init(&psock->link_lock);
686
687         INIT_WORK(&psock->work, sk_psock_backlog);
688         mutex_init(&psock->work_mutex);
689         INIT_LIST_HEAD(&psock->ingress_msg);
690         spin_lock_init(&psock->ingress_lock);
691         skb_queue_head_init(&psock->ingress_skb);
692
693         sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
694         refcount_set(&psock->refcnt, 1);
695
696         rcu_assign_sk_user_data_nocopy(sk, psock);
697         sock_hold(sk);
698
699 out:
700         write_unlock_bh(&sk->sk_callback_lock);
701         return psock;
702 }
703 EXPORT_SYMBOL_GPL(sk_psock_init);
704
705 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
706 {
707         struct sk_psock_link *link;
708
709         spin_lock_bh(&psock->link_lock);
710         link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
711                                         list);
712         if (link)
713                 list_del(&link->list);
714         spin_unlock_bh(&psock->link_lock);
715         return link;
716 }
717
718 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
719 {
720         struct sk_msg *msg, *tmp;
721
722         list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
723                 list_del(&msg->list);
724                 sk_msg_free(psock->sk, msg);
725                 kfree(msg);
726         }
727 }
728
729 static void __sk_psock_zap_ingress(struct sk_psock *psock)
730 {
731         struct sk_buff *skb;
732
733         while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
734                 skb_bpf_redirect_clear(skb);
735                 sock_drop(psock->sk, skb);
736         }
737         kfree_skb(psock->work_state.skb);
738         /* We null the skb here to ensure that calls to sk_psock_backlog
739          * do not pick up the free'd skb.
740          */
741         psock->work_state.skb = NULL;
742         __sk_psock_purge_ingress_msg(psock);
743 }
744
745 static void sk_psock_link_destroy(struct sk_psock *psock)
746 {
747         struct sk_psock_link *link, *tmp;
748
749         list_for_each_entry_safe(link, tmp, &psock->link, list) {
750                 list_del(&link->list);
751                 sk_psock_free_link(link);
752         }
753 }
754
755 void sk_psock_stop(struct sk_psock *psock, bool wait)
756 {
757         spin_lock_bh(&psock->ingress_lock);
758         sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
759         sk_psock_cork_free(psock);
760         __sk_psock_zap_ingress(psock);
761         spin_unlock_bh(&psock->ingress_lock);
762
763         if (wait)
764                 cancel_work_sync(&psock->work);
765 }
766
767 static void sk_psock_done_strp(struct sk_psock *psock);
768
769 static void sk_psock_destroy(struct work_struct *work)
770 {
771         struct sk_psock *psock = container_of(to_rcu_work(work),
772                                               struct sk_psock, rwork);
773         /* No sk_callback_lock since already detached. */
774
775         sk_psock_done_strp(psock);
776
777         cancel_work_sync(&psock->work);
778         mutex_destroy(&psock->work_mutex);
779
780         psock_progs_drop(&psock->progs);
781
782         sk_psock_link_destroy(psock);
783         sk_psock_cork_free(psock);
784
785         if (psock->sk_redir)
786                 sock_put(psock->sk_redir);
787         sock_put(psock->sk);
788         kfree(psock);
789 }
790
791 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
792 {
793         write_lock_bh(&sk->sk_callback_lock);
794         sk_psock_restore_proto(sk, psock);
795         rcu_assign_sk_user_data(sk, NULL);
796         if (psock->progs.stream_parser)
797                 sk_psock_stop_strp(sk, psock);
798         else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
799                 sk_psock_stop_verdict(sk, psock);
800         write_unlock_bh(&sk->sk_callback_lock);
801
802         sk_psock_stop(psock, false);
803
804         INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
805         queue_rcu_work(system_wq, &psock->rwork);
806 }
807 EXPORT_SYMBOL_GPL(sk_psock_drop);
808
809 static int sk_psock_map_verd(int verdict, bool redir)
810 {
811         switch (verdict) {
812         case SK_PASS:
813                 return redir ? __SK_REDIRECT : __SK_PASS;
814         case SK_DROP:
815         default:
816                 break;
817         }
818
819         return __SK_DROP;
820 }
821
822 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
823                          struct sk_msg *msg)
824 {
825         struct bpf_prog *prog;
826         int ret;
827
828         rcu_read_lock();
829         prog = READ_ONCE(psock->progs.msg_parser);
830         if (unlikely(!prog)) {
831                 ret = __SK_PASS;
832                 goto out;
833         }
834
835         sk_msg_compute_data_pointers(msg);
836         msg->sk = sk;
837         ret = bpf_prog_run_pin_on_cpu(prog, msg);
838         ret = sk_psock_map_verd(ret, msg->sk_redir);
839         psock->apply_bytes = msg->apply_bytes;
840         if (ret == __SK_REDIRECT) {
841                 if (psock->sk_redir)
842                         sock_put(psock->sk_redir);
843                 psock->sk_redir = msg->sk_redir;
844                 if (!psock->sk_redir) {
845                         ret = __SK_DROP;
846                         goto out;
847                 }
848                 sock_hold(psock->sk_redir);
849         }
850 out:
851         rcu_read_unlock();
852         return ret;
853 }
854 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
855
856 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
857 {
858         struct sk_psock *psock_other;
859         struct sock *sk_other;
860
861         sk_other = skb_bpf_redirect_fetch(skb);
862         /* This error is a buggy BPF program, it returned a redirect
863          * return code, but then didn't set a redirect interface.
864          */
865         if (unlikely(!sk_other)) {
866                 sock_drop(from->sk, skb);
867                 return -EIO;
868         }
869         psock_other = sk_psock(sk_other);
870         /* This error indicates the socket is being torn down or had another
871          * error that caused the pipe to break. We can't send a packet on
872          * a socket that is in this state so we drop the skb.
873          */
874         if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
875                 skb_bpf_redirect_clear(skb);
876                 sock_drop(from->sk, skb);
877                 return -EIO;
878         }
879         spin_lock_bh(&psock_other->ingress_lock);
880         if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
881                 spin_unlock_bh(&psock_other->ingress_lock);
882                 skb_bpf_redirect_clear(skb);
883                 sock_drop(from->sk, skb);
884                 return -EIO;
885         }
886
887         skb_queue_tail(&psock_other->ingress_skb, skb);
888         schedule_work(&psock_other->work);
889         spin_unlock_bh(&psock_other->ingress_lock);
890         return 0;
891 }
892
893 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
894                                        struct sk_psock *from, int verdict)
895 {
896         switch (verdict) {
897         case __SK_REDIRECT:
898                 sk_psock_skb_redirect(from, skb);
899                 break;
900         case __SK_PASS:
901         case __SK_DROP:
902         default:
903                 break;
904         }
905 }
906
907 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
908 {
909         struct bpf_prog *prog;
910         int ret = __SK_PASS;
911
912         rcu_read_lock();
913         prog = READ_ONCE(psock->progs.stream_verdict);
914         if (likely(prog)) {
915                 skb->sk = psock->sk;
916                 skb_dst_drop(skb);
917                 skb_bpf_redirect_clear(skb);
918                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
919                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
920                 skb->sk = NULL;
921         }
922         sk_psock_tls_verdict_apply(skb, psock, ret);
923         rcu_read_unlock();
924         return ret;
925 }
926 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
927
928 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
929                                   int verdict)
930 {
931         struct sock *sk_other;
932         int err = 0;
933
934         switch (verdict) {
935         case __SK_PASS:
936                 err = -EIO;
937                 sk_other = psock->sk;
938                 if (sock_flag(sk_other, SOCK_DEAD) ||
939                     !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
940                         goto out_free;
941                 }
942
943                 skb_bpf_set_ingress(skb);
944
945                 /* If the queue is empty then we can submit directly
946                  * into the msg queue. If its not empty we have to
947                  * queue work otherwise we may get OOO data. Otherwise,
948                  * if sk_psock_skb_ingress errors will be handled by
949                  * retrying later from workqueue.
950                  */
951                 if (skb_queue_empty(&psock->ingress_skb)) {
952                         err = sk_psock_skb_ingress_self(psock, skb);
953                 }
954                 if (err < 0) {
955                         spin_lock_bh(&psock->ingress_lock);
956                         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
957                                 skb_queue_tail(&psock->ingress_skb, skb);
958                                 schedule_work(&psock->work);
959                                 err = 0;
960                         }
961                         spin_unlock_bh(&psock->ingress_lock);
962                         if (err < 0) {
963                                 skb_bpf_redirect_clear(skb);
964                                 goto out_free;
965                         }
966                 }
967                 break;
968         case __SK_REDIRECT:
969                 err = sk_psock_skb_redirect(psock, skb);
970                 break;
971         case __SK_DROP:
972         default:
973 out_free:
974                 sock_drop(psock->sk, skb);
975         }
976
977         return err;
978 }
979
980 static void sk_psock_write_space(struct sock *sk)
981 {
982         struct sk_psock *psock;
983         void (*write_space)(struct sock *sk) = NULL;
984
985         rcu_read_lock();
986         psock = sk_psock(sk);
987         if (likely(psock)) {
988                 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
989                         schedule_work(&psock->work);
990                 write_space = psock->saved_write_space;
991         }
992         rcu_read_unlock();
993         if (write_space)
994                 write_space(sk);
995 }
996
997 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
998 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
999 {
1000         struct sk_psock *psock;
1001         struct bpf_prog *prog;
1002         int ret = __SK_DROP;
1003         struct sock *sk;
1004
1005         rcu_read_lock();
1006         sk = strp->sk;
1007         psock = sk_psock(sk);
1008         if (unlikely(!psock)) {
1009                 sock_drop(sk, skb);
1010                 goto out;
1011         }
1012         prog = READ_ONCE(psock->progs.stream_verdict);
1013         if (likely(prog)) {
1014                 skb->sk = sk;
1015                 skb_dst_drop(skb);
1016                 skb_bpf_redirect_clear(skb);
1017                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1018                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1019                 skb->sk = NULL;
1020         }
1021         sk_psock_verdict_apply(psock, skb, ret);
1022 out:
1023         rcu_read_unlock();
1024 }
1025
1026 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1027 {
1028         return err;
1029 }
1030
1031 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1032 {
1033         struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1034         struct bpf_prog *prog;
1035         int ret = skb->len;
1036
1037         rcu_read_lock();
1038         prog = READ_ONCE(psock->progs.stream_parser);
1039         if (likely(prog)) {
1040                 skb->sk = psock->sk;
1041                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1042                 skb->sk = NULL;
1043         }
1044         rcu_read_unlock();
1045         return ret;
1046 }
1047
1048 /* Called with socket lock held. */
1049 static void sk_psock_strp_data_ready(struct sock *sk)
1050 {
1051         struct sk_psock *psock;
1052
1053         rcu_read_lock();
1054         psock = sk_psock(sk);
1055         if (likely(psock)) {
1056                 if (tls_sw_has_ctx_rx(sk)) {
1057                         psock->saved_data_ready(sk);
1058                 } else {
1059                         write_lock_bh(&sk->sk_callback_lock);
1060                         strp_data_ready(&psock->strp);
1061                         write_unlock_bh(&sk->sk_callback_lock);
1062                 }
1063         }
1064         rcu_read_unlock();
1065 }
1066
1067 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1068 {
1069         static const struct strp_callbacks cb = {
1070                 .rcv_msg        = sk_psock_strp_read,
1071                 .read_sock_done = sk_psock_strp_read_done,
1072                 .parse_msg      = sk_psock_strp_parse,
1073         };
1074
1075         return strp_init(&psock->strp, sk, &cb);
1076 }
1077
1078 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1079 {
1080         if (psock->saved_data_ready)
1081                 return;
1082
1083         psock->saved_data_ready = sk->sk_data_ready;
1084         sk->sk_data_ready = sk_psock_strp_data_ready;
1085         sk->sk_write_space = sk_psock_write_space;
1086 }
1087
1088 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1089 {
1090         if (!psock->saved_data_ready)
1091                 return;
1092
1093         sk->sk_data_ready = psock->saved_data_ready;
1094         psock->saved_data_ready = NULL;
1095         strp_stop(&psock->strp);
1096 }
1097
1098 static void sk_psock_done_strp(struct sk_psock *psock)
1099 {
1100         /* Parser has been stopped */
1101         if (psock->progs.stream_parser)
1102                 strp_done(&psock->strp);
1103 }
1104 #else
1105 static void sk_psock_done_strp(struct sk_psock *psock)
1106 {
1107 }
1108 #endif /* CONFIG_BPF_STREAM_PARSER */
1109
1110 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1111                                  unsigned int offset, size_t orig_len)
1112 {
1113         struct sock *sk = (struct sock *)desc->arg.data;
1114         struct sk_psock *psock;
1115         struct bpf_prog *prog;
1116         int ret = __SK_DROP;
1117         int len = skb->len;
1118
1119         /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1120         skb = skb_clone(skb, GFP_ATOMIC);
1121         if (!skb) {
1122                 desc->error = -ENOMEM;
1123                 return 0;
1124         }
1125
1126         rcu_read_lock();
1127         psock = sk_psock(sk);
1128         if (unlikely(!psock)) {
1129                 len = 0;
1130                 sock_drop(sk, skb);
1131                 goto out;
1132         }
1133         prog = READ_ONCE(psock->progs.stream_verdict);
1134         if (!prog)
1135                 prog = READ_ONCE(psock->progs.skb_verdict);
1136         if (likely(prog)) {
1137                 skb->sk = sk;
1138                 skb_dst_drop(skb);
1139                 skb_bpf_redirect_clear(skb);
1140                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1141                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1142                 skb->sk = NULL;
1143         }
1144         if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1145                 len = 0;
1146 out:
1147         rcu_read_unlock();
1148         return len;
1149 }
1150
1151 static void sk_psock_verdict_data_ready(struct sock *sk)
1152 {
1153         struct socket *sock = sk->sk_socket;
1154         read_descriptor_t desc;
1155
1156         if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1157                 return;
1158
1159         desc.arg.data = sk;
1160         desc.error = 0;
1161         desc.count = 1;
1162
1163         sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1164 }
1165
1166 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1167 {
1168         if (psock->saved_data_ready)
1169                 return;
1170
1171         psock->saved_data_ready = sk->sk_data_ready;
1172         sk->sk_data_ready = sk_psock_verdict_data_ready;
1173         sk->sk_write_space = sk_psock_write_space;
1174 }
1175
1176 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1177 {
1178         if (!psock->saved_data_ready)
1179                 return;
1180
1181         sk->sk_data_ready = psock->saved_data_ready;
1182         psock->saved_data_ready = NULL;
1183 }