PCI: dwc: Fix inverted condition of DMA mask setup warning
[linux-2.6-microblaze.git] / net / tls / tls_sw.c
1 /*
2  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  */
37
38 #include <linux/sched/signal.h>
39 #include <linux/module.h>
40 #include <crypto/aead.h>
41
42 #include <net/strparser.h>
43 #include <net/tls.h>
44
45 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46                      unsigned int recursion_level)
47 {
48         int start = skb_headlen(skb);
49         int i, chunk = start - offset;
50         struct sk_buff *frag_iter;
51         int elt = 0;
52
53         if (unlikely(recursion_level >= 24))
54                 return -EMSGSIZE;
55
56         if (chunk > 0) {
57                 if (chunk > len)
58                         chunk = len;
59                 elt++;
60                 len -= chunk;
61                 if (len == 0)
62                         return elt;
63                 offset += chunk;
64         }
65
66         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
67                 int end;
68
69                 WARN_ON(start > offset + len);
70
71                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
72                 chunk = end - offset;
73                 if (chunk > 0) {
74                         if (chunk > len)
75                                 chunk = len;
76                         elt++;
77                         len -= chunk;
78                         if (len == 0)
79                                 return elt;
80                         offset += chunk;
81                 }
82                 start = end;
83         }
84
85         if (unlikely(skb_has_frag_list(skb))) {
86                 skb_walk_frags(skb, frag_iter) {
87                         int end, ret;
88
89                         WARN_ON(start > offset + len);
90
91                         end = start + frag_iter->len;
92                         chunk = end - offset;
93                         if (chunk > 0) {
94                                 if (chunk > len)
95                                         chunk = len;
96                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
97                                                 recursion_level + 1);
98                                 if (unlikely(ret < 0))
99                                         return ret;
100                                 elt += ret;
101                                 len -= chunk;
102                                 if (len == 0)
103                                         return elt;
104                                 offset += chunk;
105                         }
106                         start = end;
107                 }
108         }
109         BUG_ON(len);
110         return elt;
111 }
112
113 /* Return the number of scatterlist elements required to completely map the
114  * skb, or -EMSGSIZE if the recursion depth is exceeded.
115  */
116 static int skb_nsg(struct sk_buff *skb, int offset, int len)
117 {
118         return __skb_nsg(skb, offset, len, 0);
119 }
120
121 static int padding_length(struct tls_sw_context_rx *ctx,
122                           struct tls_prot_info *prot, struct sk_buff *skb)
123 {
124         struct strp_msg *rxm = strp_msg(skb);
125         int sub = 0;
126
127         /* Determine zero-padding length */
128         if (prot->version == TLS_1_3_VERSION) {
129                 char content_type = 0;
130                 int err;
131                 int back = 17;
132
133                 while (content_type == 0) {
134                         if (back > rxm->full_len - prot->prepend_size)
135                                 return -EBADMSG;
136                         err = skb_copy_bits(skb,
137                                             rxm->offset + rxm->full_len - back,
138                                             &content_type, 1);
139                         if (err)
140                                 return err;
141                         if (content_type)
142                                 break;
143                         sub++;
144                         back++;
145                 }
146                 ctx->control = content_type;
147         }
148         return sub;
149 }
150
151 static void tls_decrypt_done(struct crypto_async_request *req, int err)
152 {
153         struct aead_request *aead_req = (struct aead_request *)req;
154         struct scatterlist *sgout = aead_req->dst;
155         struct scatterlist *sgin = aead_req->src;
156         struct tls_sw_context_rx *ctx;
157         struct tls_context *tls_ctx;
158         struct tls_prot_info *prot;
159         struct scatterlist *sg;
160         struct sk_buff *skb;
161         unsigned int pages;
162         int pending;
163
164         skb = (struct sk_buff *)req->data;
165         tls_ctx = tls_get_ctx(skb->sk);
166         ctx = tls_sw_ctx_rx(tls_ctx);
167         prot = &tls_ctx->prot_info;
168
169         /* Propagate if there was an err */
170         if (err) {
171                 if (err == -EBADMSG)
172                         TLS_INC_STATS(sock_net(skb->sk),
173                                       LINUX_MIB_TLSDECRYPTERROR);
174                 ctx->async_wait.err = err;
175                 tls_err_abort(skb->sk, err);
176         } else {
177                 struct strp_msg *rxm = strp_msg(skb);
178                 int pad;
179
180                 pad = padding_length(ctx, prot, skb);
181                 if (pad < 0) {
182                         ctx->async_wait.err = pad;
183                         tls_err_abort(skb->sk, pad);
184                 } else {
185                         rxm->full_len -= pad;
186                         rxm->offset += prot->prepend_size;
187                         rxm->full_len -= prot->overhead_size;
188                 }
189         }
190
191         /* After using skb->sk to propagate sk through crypto async callback
192          * we need to NULL it again.
193          */
194         skb->sk = NULL;
195
196
197         /* Free the destination pages if skb was not decrypted inplace */
198         if (sgout != sgin) {
199                 /* Skip the first S/G entry as it points to AAD */
200                 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
201                         if (!sg)
202                                 break;
203                         put_page(sg_page(sg));
204                 }
205         }
206
207         kfree(aead_req);
208
209         spin_lock_bh(&ctx->decrypt_compl_lock);
210         pending = atomic_dec_return(&ctx->decrypt_pending);
211
212         if (!pending && ctx->async_notify)
213                 complete(&ctx->async_wait.completion);
214         spin_unlock_bh(&ctx->decrypt_compl_lock);
215 }
216
217 static int tls_do_decryption(struct sock *sk,
218                              struct sk_buff *skb,
219                              struct scatterlist *sgin,
220                              struct scatterlist *sgout,
221                              char *iv_recv,
222                              size_t data_len,
223                              struct aead_request *aead_req,
224                              bool async)
225 {
226         struct tls_context *tls_ctx = tls_get_ctx(sk);
227         struct tls_prot_info *prot = &tls_ctx->prot_info;
228         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
229         int ret;
230
231         aead_request_set_tfm(aead_req, ctx->aead_recv);
232         aead_request_set_ad(aead_req, prot->aad_size);
233         aead_request_set_crypt(aead_req, sgin, sgout,
234                                data_len + prot->tag_size,
235                                (u8 *)iv_recv);
236
237         if (async) {
238                 /* Using skb->sk to push sk through to crypto async callback
239                  * handler. This allows propagating errors up to the socket
240                  * if needed. It _must_ be cleared in the async handler
241                  * before consume_skb is called. We _know_ skb->sk is NULL
242                  * because it is a clone from strparser.
243                  */
244                 skb->sk = sk;
245                 aead_request_set_callback(aead_req,
246                                           CRYPTO_TFM_REQ_MAY_BACKLOG,
247                                           tls_decrypt_done, skb);
248                 atomic_inc(&ctx->decrypt_pending);
249         } else {
250                 aead_request_set_callback(aead_req,
251                                           CRYPTO_TFM_REQ_MAY_BACKLOG,
252                                           crypto_req_done, &ctx->async_wait);
253         }
254
255         ret = crypto_aead_decrypt(aead_req);
256         if (ret == -EINPROGRESS) {
257                 if (async)
258                         return ret;
259
260                 ret = crypto_wait_req(ret, &ctx->async_wait);
261         }
262
263         if (async)
264                 atomic_dec(&ctx->decrypt_pending);
265
266         return ret;
267 }
268
269 static void tls_trim_both_msgs(struct sock *sk, int target_size)
270 {
271         struct tls_context *tls_ctx = tls_get_ctx(sk);
272         struct tls_prot_info *prot = &tls_ctx->prot_info;
273         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
274         struct tls_rec *rec = ctx->open_rec;
275
276         sk_msg_trim(sk, &rec->msg_plaintext, target_size);
277         if (target_size > 0)
278                 target_size += prot->overhead_size;
279         sk_msg_trim(sk, &rec->msg_encrypted, target_size);
280 }
281
282 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
283 {
284         struct tls_context *tls_ctx = tls_get_ctx(sk);
285         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
286         struct tls_rec *rec = ctx->open_rec;
287         struct sk_msg *msg_en = &rec->msg_encrypted;
288
289         return sk_msg_alloc(sk, msg_en, len, 0);
290 }
291
292 static int tls_clone_plaintext_msg(struct sock *sk, int required)
293 {
294         struct tls_context *tls_ctx = tls_get_ctx(sk);
295         struct tls_prot_info *prot = &tls_ctx->prot_info;
296         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
297         struct tls_rec *rec = ctx->open_rec;
298         struct sk_msg *msg_pl = &rec->msg_plaintext;
299         struct sk_msg *msg_en = &rec->msg_encrypted;
300         int skip, len;
301
302         /* We add page references worth len bytes from encrypted sg
303          * at the end of plaintext sg. It is guaranteed that msg_en
304          * has enough required room (ensured by caller).
305          */
306         len = required - msg_pl->sg.size;
307
308         /* Skip initial bytes in msg_en's data to be able to use
309          * same offset of both plain and encrypted data.
310          */
311         skip = prot->prepend_size + msg_pl->sg.size;
312
313         return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
314 }
315
316 static struct tls_rec *tls_get_rec(struct sock *sk)
317 {
318         struct tls_context *tls_ctx = tls_get_ctx(sk);
319         struct tls_prot_info *prot = &tls_ctx->prot_info;
320         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
321         struct sk_msg *msg_pl, *msg_en;
322         struct tls_rec *rec;
323         int mem_size;
324
325         mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
326
327         rec = kzalloc(mem_size, sk->sk_allocation);
328         if (!rec)
329                 return NULL;
330
331         msg_pl = &rec->msg_plaintext;
332         msg_en = &rec->msg_encrypted;
333
334         sk_msg_init(msg_pl);
335         sk_msg_init(msg_en);
336
337         sg_init_table(rec->sg_aead_in, 2);
338         sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
339         sg_unmark_end(&rec->sg_aead_in[1]);
340
341         sg_init_table(rec->sg_aead_out, 2);
342         sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
343         sg_unmark_end(&rec->sg_aead_out[1]);
344
345         return rec;
346 }
347
348 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
349 {
350         sk_msg_free(sk, &rec->msg_encrypted);
351         sk_msg_free(sk, &rec->msg_plaintext);
352         kfree(rec);
353 }
354
355 static void tls_free_open_rec(struct sock *sk)
356 {
357         struct tls_context *tls_ctx = tls_get_ctx(sk);
358         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
359         struct tls_rec *rec = ctx->open_rec;
360
361         if (rec) {
362                 tls_free_rec(sk, rec);
363                 ctx->open_rec = NULL;
364         }
365 }
366
367 int tls_tx_records(struct sock *sk, int flags)
368 {
369         struct tls_context *tls_ctx = tls_get_ctx(sk);
370         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
371         struct tls_rec *rec, *tmp;
372         struct sk_msg *msg_en;
373         int tx_flags, rc = 0;
374
375         if (tls_is_partially_sent_record(tls_ctx)) {
376                 rec = list_first_entry(&ctx->tx_list,
377                                        struct tls_rec, list);
378
379                 if (flags == -1)
380                         tx_flags = rec->tx_flags;
381                 else
382                         tx_flags = flags;
383
384                 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
385                 if (rc)
386                         goto tx_err;
387
388                 /* Full record has been transmitted.
389                  * Remove the head of tx_list
390                  */
391                 list_del(&rec->list);
392                 sk_msg_free(sk, &rec->msg_plaintext);
393                 kfree(rec);
394         }
395
396         /* Tx all ready records */
397         list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
398                 if (READ_ONCE(rec->tx_ready)) {
399                         if (flags == -1)
400                                 tx_flags = rec->tx_flags;
401                         else
402                                 tx_flags = flags;
403
404                         msg_en = &rec->msg_encrypted;
405                         rc = tls_push_sg(sk, tls_ctx,
406                                          &msg_en->sg.data[msg_en->sg.curr],
407                                          0, tx_flags);
408                         if (rc)
409                                 goto tx_err;
410
411                         list_del(&rec->list);
412                         sk_msg_free(sk, &rec->msg_plaintext);
413                         kfree(rec);
414                 } else {
415                         break;
416                 }
417         }
418
419 tx_err:
420         if (rc < 0 && rc != -EAGAIN)
421                 tls_err_abort(sk, EBADMSG);
422
423         return rc;
424 }
425
426 static void tls_encrypt_done(struct crypto_async_request *req, int err)
427 {
428         struct aead_request *aead_req = (struct aead_request *)req;
429         struct sock *sk = req->data;
430         struct tls_context *tls_ctx = tls_get_ctx(sk);
431         struct tls_prot_info *prot = &tls_ctx->prot_info;
432         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
433         struct scatterlist *sge;
434         struct sk_msg *msg_en;
435         struct tls_rec *rec;
436         bool ready = false;
437         int pending;
438
439         rec = container_of(aead_req, struct tls_rec, aead_req);
440         msg_en = &rec->msg_encrypted;
441
442         sge = sk_msg_elem(msg_en, msg_en->sg.curr);
443         sge->offset -= prot->prepend_size;
444         sge->length += prot->prepend_size;
445
446         /* Check if error is previously set on socket */
447         if (err || sk->sk_err) {
448                 rec = NULL;
449
450                 /* If err is already set on socket, return the same code */
451                 if (sk->sk_err) {
452                         ctx->async_wait.err = sk->sk_err;
453                 } else {
454                         ctx->async_wait.err = err;
455                         tls_err_abort(sk, err);
456                 }
457         }
458
459         if (rec) {
460                 struct tls_rec *first_rec;
461
462                 /* Mark the record as ready for transmission */
463                 smp_store_mb(rec->tx_ready, true);
464
465                 /* If received record is at head of tx_list, schedule tx */
466                 first_rec = list_first_entry(&ctx->tx_list,
467                                              struct tls_rec, list);
468                 if (rec == first_rec)
469                         ready = true;
470         }
471
472         spin_lock_bh(&ctx->encrypt_compl_lock);
473         pending = atomic_dec_return(&ctx->encrypt_pending);
474
475         if (!pending && ctx->async_notify)
476                 complete(&ctx->async_wait.completion);
477         spin_unlock_bh(&ctx->encrypt_compl_lock);
478
479         if (!ready)
480                 return;
481
482         /* Schedule the transmission */
483         if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
484                 schedule_delayed_work(&ctx->tx_work.work, 1);
485 }
486
487 static int tls_do_encryption(struct sock *sk,
488                              struct tls_context *tls_ctx,
489                              struct tls_sw_context_tx *ctx,
490                              struct aead_request *aead_req,
491                              size_t data_len, u32 start)
492 {
493         struct tls_prot_info *prot = &tls_ctx->prot_info;
494         struct tls_rec *rec = ctx->open_rec;
495         struct sk_msg *msg_en = &rec->msg_encrypted;
496         struct scatterlist *sge = sk_msg_elem(msg_en, start);
497         int rc, iv_offset = 0;
498
499         /* For CCM based ciphers, first byte of IV is a constant */
500         if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
501                 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
502                 iv_offset = 1;
503         }
504
505         memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
506                prot->iv_size + prot->salt_size);
507
508         xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
509
510         sge->offset += prot->prepend_size;
511         sge->length -= prot->prepend_size;
512
513         msg_en->sg.curr = start;
514
515         aead_request_set_tfm(aead_req, ctx->aead_send);
516         aead_request_set_ad(aead_req, prot->aad_size);
517         aead_request_set_crypt(aead_req, rec->sg_aead_in,
518                                rec->sg_aead_out,
519                                data_len, rec->iv_data);
520
521         aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
522                                   tls_encrypt_done, sk);
523
524         /* Add the record in tx_list */
525         list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
526         atomic_inc(&ctx->encrypt_pending);
527
528         rc = crypto_aead_encrypt(aead_req);
529         if (!rc || rc != -EINPROGRESS) {
530                 atomic_dec(&ctx->encrypt_pending);
531                 sge->offset -= prot->prepend_size;
532                 sge->length += prot->prepend_size;
533         }
534
535         if (!rc) {
536                 WRITE_ONCE(rec->tx_ready, true);
537         } else if (rc != -EINPROGRESS) {
538                 list_del(&rec->list);
539                 return rc;
540         }
541
542         /* Unhook the record from context if encryption is not failure */
543         ctx->open_rec = NULL;
544         tls_advance_record_sn(sk, prot, &tls_ctx->tx);
545         return rc;
546 }
547
548 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
549                                  struct tls_rec **to, struct sk_msg *msg_opl,
550                                  struct sk_msg *msg_oen, u32 split_point,
551                                  u32 tx_overhead_size, u32 *orig_end)
552 {
553         u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
554         struct scatterlist *sge, *osge, *nsge;
555         u32 orig_size = msg_opl->sg.size;
556         struct scatterlist tmp = { };
557         struct sk_msg *msg_npl;
558         struct tls_rec *new;
559         int ret;
560
561         new = tls_get_rec(sk);
562         if (!new)
563                 return -ENOMEM;
564         ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
565                            tx_overhead_size, 0);
566         if (ret < 0) {
567                 tls_free_rec(sk, new);
568                 return ret;
569         }
570
571         *orig_end = msg_opl->sg.end;
572         i = msg_opl->sg.start;
573         sge = sk_msg_elem(msg_opl, i);
574         while (apply && sge->length) {
575                 if (sge->length > apply) {
576                         u32 len = sge->length - apply;
577
578                         get_page(sg_page(sge));
579                         sg_set_page(&tmp, sg_page(sge), len,
580                                     sge->offset + apply);
581                         sge->length = apply;
582                         bytes += apply;
583                         apply = 0;
584                 } else {
585                         apply -= sge->length;
586                         bytes += sge->length;
587                 }
588
589                 sk_msg_iter_var_next(i);
590                 if (i == msg_opl->sg.end)
591                         break;
592                 sge = sk_msg_elem(msg_opl, i);
593         }
594
595         msg_opl->sg.end = i;
596         msg_opl->sg.curr = i;
597         msg_opl->sg.copybreak = 0;
598         msg_opl->apply_bytes = 0;
599         msg_opl->sg.size = bytes;
600
601         msg_npl = &new->msg_plaintext;
602         msg_npl->apply_bytes = apply;
603         msg_npl->sg.size = orig_size - bytes;
604
605         j = msg_npl->sg.start;
606         nsge = sk_msg_elem(msg_npl, j);
607         if (tmp.length) {
608                 memcpy(nsge, &tmp, sizeof(*nsge));
609                 sk_msg_iter_var_next(j);
610                 nsge = sk_msg_elem(msg_npl, j);
611         }
612
613         osge = sk_msg_elem(msg_opl, i);
614         while (osge->length) {
615                 memcpy(nsge, osge, sizeof(*nsge));
616                 sg_unmark_end(nsge);
617                 sk_msg_iter_var_next(i);
618                 sk_msg_iter_var_next(j);
619                 if (i == *orig_end)
620                         break;
621                 osge = sk_msg_elem(msg_opl, i);
622                 nsge = sk_msg_elem(msg_npl, j);
623         }
624
625         msg_npl->sg.end = j;
626         msg_npl->sg.curr = j;
627         msg_npl->sg.copybreak = 0;
628
629         *to = new;
630         return 0;
631 }
632
633 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
634                                   struct tls_rec *from, u32 orig_end)
635 {
636         struct sk_msg *msg_npl = &from->msg_plaintext;
637         struct sk_msg *msg_opl = &to->msg_plaintext;
638         struct scatterlist *osge, *nsge;
639         u32 i, j;
640
641         i = msg_opl->sg.end;
642         sk_msg_iter_var_prev(i);
643         j = msg_npl->sg.start;
644
645         osge = sk_msg_elem(msg_opl, i);
646         nsge = sk_msg_elem(msg_npl, j);
647
648         if (sg_page(osge) == sg_page(nsge) &&
649             osge->offset + osge->length == nsge->offset) {
650                 osge->length += nsge->length;
651                 put_page(sg_page(nsge));
652         }
653
654         msg_opl->sg.end = orig_end;
655         msg_opl->sg.curr = orig_end;
656         msg_opl->sg.copybreak = 0;
657         msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
658         msg_opl->sg.size += msg_npl->sg.size;
659
660         sk_msg_free(sk, &to->msg_encrypted);
661         sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
662
663         kfree(from);
664 }
665
666 static int tls_push_record(struct sock *sk, int flags,
667                            unsigned char record_type)
668 {
669         struct tls_context *tls_ctx = tls_get_ctx(sk);
670         struct tls_prot_info *prot = &tls_ctx->prot_info;
671         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
672         struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
673         u32 i, split_point, orig_end;
674         struct sk_msg *msg_pl, *msg_en;
675         struct aead_request *req;
676         bool split;
677         int rc;
678
679         if (!rec)
680                 return 0;
681
682         msg_pl = &rec->msg_plaintext;
683         msg_en = &rec->msg_encrypted;
684
685         split_point = msg_pl->apply_bytes;
686         split = split_point && split_point < msg_pl->sg.size;
687         if (unlikely((!split &&
688                       msg_pl->sg.size +
689                       prot->overhead_size > msg_en->sg.size) ||
690                      (split &&
691                       split_point +
692                       prot->overhead_size > msg_en->sg.size))) {
693                 split = true;
694                 split_point = msg_en->sg.size;
695         }
696         if (split) {
697                 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
698                                            split_point, prot->overhead_size,
699                                            &orig_end);
700                 if (rc < 0)
701                         return rc;
702                 /* This can happen if above tls_split_open_record allocates
703                  * a single large encryption buffer instead of two smaller
704                  * ones. In this case adjust pointers and continue without
705                  * split.
706                  */
707                 if (!msg_pl->sg.size) {
708                         tls_merge_open_record(sk, rec, tmp, orig_end);
709                         msg_pl = &rec->msg_plaintext;
710                         msg_en = &rec->msg_encrypted;
711                         split = false;
712                 }
713                 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
714                             prot->overhead_size);
715         }
716
717         rec->tx_flags = flags;
718         req = &rec->aead_req;
719
720         i = msg_pl->sg.end;
721         sk_msg_iter_var_prev(i);
722
723         rec->content_type = record_type;
724         if (prot->version == TLS_1_3_VERSION) {
725                 /* Add content type to end of message.  No padding added */
726                 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
727                 sg_mark_end(&rec->sg_content_type);
728                 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
729                          &rec->sg_content_type);
730         } else {
731                 sg_mark_end(sk_msg_elem(msg_pl, i));
732         }
733
734         if (msg_pl->sg.end < msg_pl->sg.start) {
735                 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
736                          MAX_SKB_FRAGS - msg_pl->sg.start + 1,
737                          msg_pl->sg.data);
738         }
739
740         i = msg_pl->sg.start;
741         sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
742
743         i = msg_en->sg.end;
744         sk_msg_iter_var_prev(i);
745         sg_mark_end(sk_msg_elem(msg_en, i));
746
747         i = msg_en->sg.start;
748         sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
749
750         tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
751                      tls_ctx->tx.rec_seq, prot->rec_seq_size,
752                      record_type, prot->version);
753
754         tls_fill_prepend(tls_ctx,
755                          page_address(sg_page(&msg_en->sg.data[i])) +
756                          msg_en->sg.data[i].offset,
757                          msg_pl->sg.size + prot->tail_size,
758                          record_type, prot->version);
759
760         tls_ctx->pending_open_record_frags = false;
761
762         rc = tls_do_encryption(sk, tls_ctx, ctx, req,
763                                msg_pl->sg.size + prot->tail_size, i);
764         if (rc < 0) {
765                 if (rc != -EINPROGRESS) {
766                         tls_err_abort(sk, EBADMSG);
767                         if (split) {
768                                 tls_ctx->pending_open_record_frags = true;
769                                 tls_merge_open_record(sk, rec, tmp, orig_end);
770                         }
771                 }
772                 ctx->async_capable = 1;
773                 return rc;
774         } else if (split) {
775                 msg_pl = &tmp->msg_plaintext;
776                 msg_en = &tmp->msg_encrypted;
777                 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
778                 tls_ctx->pending_open_record_frags = true;
779                 ctx->open_rec = tmp;
780         }
781
782         return tls_tx_records(sk, flags);
783 }
784
785 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
786                                bool full_record, u8 record_type,
787                                ssize_t *copied, int flags)
788 {
789         struct tls_context *tls_ctx = tls_get_ctx(sk);
790         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
791         struct sk_msg msg_redir = { };
792         struct sk_psock *psock;
793         struct sock *sk_redir;
794         struct tls_rec *rec;
795         bool enospc, policy;
796         int err = 0, send;
797         u32 delta = 0;
798
799         policy = !(flags & MSG_SENDPAGE_NOPOLICY);
800         psock = sk_psock_get(sk);
801         if (!psock || !policy) {
802                 err = tls_push_record(sk, flags, record_type);
803                 if (err && sk->sk_err == EBADMSG) {
804                         *copied -= sk_msg_free(sk, msg);
805                         tls_free_open_rec(sk);
806                         err = -sk->sk_err;
807                 }
808                 if (psock)
809                         sk_psock_put(sk, psock);
810                 return err;
811         }
812 more_data:
813         enospc = sk_msg_full(msg);
814         if (psock->eval == __SK_NONE) {
815                 delta = msg->sg.size;
816                 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
817                 delta -= msg->sg.size;
818         }
819         if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
820             !enospc && !full_record) {
821                 err = -ENOSPC;
822                 goto out_err;
823         }
824         msg->cork_bytes = 0;
825         send = msg->sg.size;
826         if (msg->apply_bytes && msg->apply_bytes < send)
827                 send = msg->apply_bytes;
828
829         switch (psock->eval) {
830         case __SK_PASS:
831                 err = tls_push_record(sk, flags, record_type);
832                 if (err && sk->sk_err == EBADMSG) {
833                         *copied -= sk_msg_free(sk, msg);
834                         tls_free_open_rec(sk);
835                         err = -sk->sk_err;
836                         goto out_err;
837                 }
838                 break;
839         case __SK_REDIRECT:
840                 sk_redir = psock->sk_redir;
841                 memcpy(&msg_redir, msg, sizeof(*msg));
842                 if (msg->apply_bytes < send)
843                         msg->apply_bytes = 0;
844                 else
845                         msg->apply_bytes -= send;
846                 sk_msg_return_zero(sk, msg, send);
847                 msg->sg.size -= send;
848                 release_sock(sk);
849                 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
850                 lock_sock(sk);
851                 if (err < 0) {
852                         *copied -= sk_msg_free_nocharge(sk, &msg_redir);
853                         msg->sg.size = 0;
854                 }
855                 if (msg->sg.size == 0)
856                         tls_free_open_rec(sk);
857                 break;
858         case __SK_DROP:
859         default:
860                 sk_msg_free_partial(sk, msg, send);
861                 if (msg->apply_bytes < send)
862                         msg->apply_bytes = 0;
863                 else
864                         msg->apply_bytes -= send;
865                 if (msg->sg.size == 0)
866                         tls_free_open_rec(sk);
867                 *copied -= (send + delta);
868                 err = -EACCES;
869         }
870
871         if (likely(!err)) {
872                 bool reset_eval = !ctx->open_rec;
873
874                 rec = ctx->open_rec;
875                 if (rec) {
876                         msg = &rec->msg_plaintext;
877                         if (!msg->apply_bytes)
878                                 reset_eval = true;
879                 }
880                 if (reset_eval) {
881                         psock->eval = __SK_NONE;
882                         if (psock->sk_redir) {
883                                 sock_put(psock->sk_redir);
884                                 psock->sk_redir = NULL;
885                         }
886                 }
887                 if (rec)
888                         goto more_data;
889         }
890  out_err:
891         sk_psock_put(sk, psock);
892         return err;
893 }
894
895 static int tls_sw_push_pending_record(struct sock *sk, int flags)
896 {
897         struct tls_context *tls_ctx = tls_get_ctx(sk);
898         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
899         struct tls_rec *rec = ctx->open_rec;
900         struct sk_msg *msg_pl;
901         size_t copied;
902
903         if (!rec)
904                 return 0;
905
906         msg_pl = &rec->msg_plaintext;
907         copied = msg_pl->sg.size;
908         if (!copied)
909                 return 0;
910
911         return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
912                                    &copied, flags);
913 }
914
915 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
916 {
917         long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
918         struct tls_context *tls_ctx = tls_get_ctx(sk);
919         struct tls_prot_info *prot = &tls_ctx->prot_info;
920         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
921         bool async_capable = ctx->async_capable;
922         unsigned char record_type = TLS_RECORD_TYPE_DATA;
923         bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
924         bool eor = !(msg->msg_flags & MSG_MORE);
925         size_t try_to_copy;
926         ssize_t copied = 0;
927         struct sk_msg *msg_pl, *msg_en;
928         struct tls_rec *rec;
929         int required_size;
930         int num_async = 0;
931         bool full_record;
932         int record_room;
933         int num_zc = 0;
934         int orig_size;
935         int ret = 0;
936         int pending;
937
938         if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
939                                MSG_CMSG_COMPAT))
940                 return -EOPNOTSUPP;
941
942         mutex_lock(&tls_ctx->tx_lock);
943         lock_sock(sk);
944
945         if (unlikely(msg->msg_controllen)) {
946                 ret = tls_proccess_cmsg(sk, msg, &record_type);
947                 if (ret) {
948                         if (ret == -EINPROGRESS)
949                                 num_async++;
950                         else if (ret != -EAGAIN)
951                                 goto send_end;
952                 }
953         }
954
955         while (msg_data_left(msg)) {
956                 if (sk->sk_err) {
957                         ret = -sk->sk_err;
958                         goto send_end;
959                 }
960
961                 if (ctx->open_rec)
962                         rec = ctx->open_rec;
963                 else
964                         rec = ctx->open_rec = tls_get_rec(sk);
965                 if (!rec) {
966                         ret = -ENOMEM;
967                         goto send_end;
968                 }
969
970                 msg_pl = &rec->msg_plaintext;
971                 msg_en = &rec->msg_encrypted;
972
973                 orig_size = msg_pl->sg.size;
974                 full_record = false;
975                 try_to_copy = msg_data_left(msg);
976                 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
977                 if (try_to_copy >= record_room) {
978                         try_to_copy = record_room;
979                         full_record = true;
980                 }
981
982                 required_size = msg_pl->sg.size + try_to_copy +
983                                 prot->overhead_size;
984
985                 if (!sk_stream_memory_free(sk))
986                         goto wait_for_sndbuf;
987
988 alloc_encrypted:
989                 ret = tls_alloc_encrypted_msg(sk, required_size);
990                 if (ret) {
991                         if (ret != -ENOSPC)
992                                 goto wait_for_memory;
993
994                         /* Adjust try_to_copy according to the amount that was
995                          * actually allocated. The difference is due
996                          * to max sg elements limit
997                          */
998                         try_to_copy -= required_size - msg_en->sg.size;
999                         full_record = true;
1000                 }
1001
1002                 if (!is_kvec && (full_record || eor) && !async_capable) {
1003                         u32 first = msg_pl->sg.end;
1004
1005                         ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1006                                                         msg_pl, try_to_copy);
1007                         if (ret)
1008                                 goto fallback_to_reg_send;
1009
1010                         num_zc++;
1011                         copied += try_to_copy;
1012
1013                         sk_msg_sg_copy_set(msg_pl, first);
1014                         ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1015                                                   record_type, &copied,
1016                                                   msg->msg_flags);
1017                         if (ret) {
1018                                 if (ret == -EINPROGRESS)
1019                                         num_async++;
1020                                 else if (ret == -ENOMEM)
1021                                         goto wait_for_memory;
1022                                 else if (ctx->open_rec && ret == -ENOSPC)
1023                                         goto rollback_iter;
1024                                 else if (ret != -EAGAIN)
1025                                         goto send_end;
1026                         }
1027                         continue;
1028 rollback_iter:
1029                         copied -= try_to_copy;
1030                         sk_msg_sg_copy_clear(msg_pl, first);
1031                         iov_iter_revert(&msg->msg_iter,
1032                                         msg_pl->sg.size - orig_size);
1033 fallback_to_reg_send:
1034                         sk_msg_trim(sk, msg_pl, orig_size);
1035                 }
1036
1037                 required_size = msg_pl->sg.size + try_to_copy;
1038
1039                 ret = tls_clone_plaintext_msg(sk, required_size);
1040                 if (ret) {
1041                         if (ret != -ENOSPC)
1042                                 goto send_end;
1043
1044                         /* Adjust try_to_copy according to the amount that was
1045                          * actually allocated. The difference is due
1046                          * to max sg elements limit
1047                          */
1048                         try_to_copy -= required_size - msg_pl->sg.size;
1049                         full_record = true;
1050                         sk_msg_trim(sk, msg_en,
1051                                     msg_pl->sg.size + prot->overhead_size);
1052                 }
1053
1054                 if (try_to_copy) {
1055                         ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1056                                                        msg_pl, try_to_copy);
1057                         if (ret < 0)
1058                                 goto trim_sgl;
1059                 }
1060
1061                 /* Open records defined only if successfully copied, otherwise
1062                  * we would trim the sg but not reset the open record frags.
1063                  */
1064                 tls_ctx->pending_open_record_frags = true;
1065                 copied += try_to_copy;
1066                 if (full_record || eor) {
1067                         ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1068                                                   record_type, &copied,
1069                                                   msg->msg_flags);
1070                         if (ret) {
1071                                 if (ret == -EINPROGRESS)
1072                                         num_async++;
1073                                 else if (ret == -ENOMEM)
1074                                         goto wait_for_memory;
1075                                 else if (ret != -EAGAIN) {
1076                                         if (ret == -ENOSPC)
1077                                                 ret = 0;
1078                                         goto send_end;
1079                                 }
1080                         }
1081                 }
1082
1083                 continue;
1084
1085 wait_for_sndbuf:
1086                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1087 wait_for_memory:
1088                 ret = sk_stream_wait_memory(sk, &timeo);
1089                 if (ret) {
1090 trim_sgl:
1091                         if (ctx->open_rec)
1092                                 tls_trim_both_msgs(sk, orig_size);
1093                         goto send_end;
1094                 }
1095
1096                 if (ctx->open_rec && msg_en->sg.size < required_size)
1097                         goto alloc_encrypted;
1098         }
1099
1100         if (!num_async) {
1101                 goto send_end;
1102         } else if (num_zc) {
1103                 /* Wait for pending encryptions to get completed */
1104                 spin_lock_bh(&ctx->encrypt_compl_lock);
1105                 ctx->async_notify = true;
1106
1107                 pending = atomic_read(&ctx->encrypt_pending);
1108                 spin_unlock_bh(&ctx->encrypt_compl_lock);
1109                 if (pending)
1110                         crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1111                 else
1112                         reinit_completion(&ctx->async_wait.completion);
1113
1114                 /* There can be no concurrent accesses, since we have no
1115                  * pending encrypt operations
1116                  */
1117                 WRITE_ONCE(ctx->async_notify, false);
1118
1119                 if (ctx->async_wait.err) {
1120                         ret = ctx->async_wait.err;
1121                         copied = 0;
1122                 }
1123         }
1124
1125         /* Transmit if any encryptions have completed */
1126         if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1127                 cancel_delayed_work(&ctx->tx_work.work);
1128                 tls_tx_records(sk, msg->msg_flags);
1129         }
1130
1131 send_end:
1132         ret = sk_stream_error(sk, msg->msg_flags, ret);
1133
1134         release_sock(sk);
1135         mutex_unlock(&tls_ctx->tx_lock);
1136         return copied > 0 ? copied : ret;
1137 }
1138
1139 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1140                               int offset, size_t size, int flags)
1141 {
1142         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1143         struct tls_context *tls_ctx = tls_get_ctx(sk);
1144         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1145         struct tls_prot_info *prot = &tls_ctx->prot_info;
1146         unsigned char record_type = TLS_RECORD_TYPE_DATA;
1147         struct sk_msg *msg_pl;
1148         struct tls_rec *rec;
1149         int num_async = 0;
1150         ssize_t copied = 0;
1151         bool full_record;
1152         int record_room;
1153         int ret = 0;
1154         bool eor;
1155
1156         eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1157         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1158
1159         /* Call the sk_stream functions to manage the sndbuf mem. */
1160         while (size > 0) {
1161                 size_t copy, required_size;
1162
1163                 if (sk->sk_err) {
1164                         ret = -sk->sk_err;
1165                         goto sendpage_end;
1166                 }
1167
1168                 if (ctx->open_rec)
1169                         rec = ctx->open_rec;
1170                 else
1171                         rec = ctx->open_rec = tls_get_rec(sk);
1172                 if (!rec) {
1173                         ret = -ENOMEM;
1174                         goto sendpage_end;
1175                 }
1176
1177                 msg_pl = &rec->msg_plaintext;
1178
1179                 full_record = false;
1180                 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1181                 copy = size;
1182                 if (copy >= record_room) {
1183                         copy = record_room;
1184                         full_record = true;
1185                 }
1186
1187                 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1188
1189                 if (!sk_stream_memory_free(sk))
1190                         goto wait_for_sndbuf;
1191 alloc_payload:
1192                 ret = tls_alloc_encrypted_msg(sk, required_size);
1193                 if (ret) {
1194                         if (ret != -ENOSPC)
1195                                 goto wait_for_memory;
1196
1197                         /* Adjust copy according to the amount that was
1198                          * actually allocated. The difference is due
1199                          * to max sg elements limit
1200                          */
1201                         copy -= required_size - msg_pl->sg.size;
1202                         full_record = true;
1203                 }
1204
1205                 sk_msg_page_add(msg_pl, page, copy, offset);
1206                 sk_mem_charge(sk, copy);
1207
1208                 offset += copy;
1209                 size -= copy;
1210                 copied += copy;
1211
1212                 tls_ctx->pending_open_record_frags = true;
1213                 if (full_record || eor || sk_msg_full(msg_pl)) {
1214                         ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1215                                                   record_type, &copied, flags);
1216                         if (ret) {
1217                                 if (ret == -EINPROGRESS)
1218                                         num_async++;
1219                                 else if (ret == -ENOMEM)
1220                                         goto wait_for_memory;
1221                                 else if (ret != -EAGAIN) {
1222                                         if (ret == -ENOSPC)
1223                                                 ret = 0;
1224                                         goto sendpage_end;
1225                                 }
1226                         }
1227                 }
1228                 continue;
1229 wait_for_sndbuf:
1230                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1231 wait_for_memory:
1232                 ret = sk_stream_wait_memory(sk, &timeo);
1233                 if (ret) {
1234                         if (ctx->open_rec)
1235                                 tls_trim_both_msgs(sk, msg_pl->sg.size);
1236                         goto sendpage_end;
1237                 }
1238
1239                 if (ctx->open_rec)
1240                         goto alloc_payload;
1241         }
1242
1243         if (num_async) {
1244                 /* Transmit if any encryptions have completed */
1245                 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1246                         cancel_delayed_work(&ctx->tx_work.work);
1247                         tls_tx_records(sk, flags);
1248                 }
1249         }
1250 sendpage_end:
1251         ret = sk_stream_error(sk, flags, ret);
1252         return copied > 0 ? copied : ret;
1253 }
1254
1255 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1256                            int offset, size_t size, int flags)
1257 {
1258         if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1259                       MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1260                       MSG_NO_SHARED_FRAGS))
1261                 return -EOPNOTSUPP;
1262
1263         return tls_sw_do_sendpage(sk, page, offset, size, flags);
1264 }
1265
1266 int tls_sw_sendpage(struct sock *sk, struct page *page,
1267                     int offset, size_t size, int flags)
1268 {
1269         struct tls_context *tls_ctx = tls_get_ctx(sk);
1270         int ret;
1271
1272         if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1273                       MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1274                 return -EOPNOTSUPP;
1275
1276         mutex_lock(&tls_ctx->tx_lock);
1277         lock_sock(sk);
1278         ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1279         release_sock(sk);
1280         mutex_unlock(&tls_ctx->tx_lock);
1281         return ret;
1282 }
1283
1284 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1285                                      int flags, long timeo, int *err)
1286 {
1287         struct tls_context *tls_ctx = tls_get_ctx(sk);
1288         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1289         struct sk_buff *skb;
1290         DEFINE_WAIT_FUNC(wait, woken_wake_function);
1291
1292         while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1293                 if (sk->sk_err) {
1294                         *err = sock_error(sk);
1295                         return NULL;
1296                 }
1297
1298                 if (sk->sk_shutdown & RCV_SHUTDOWN)
1299                         return NULL;
1300
1301                 if (sock_flag(sk, SOCK_DONE))
1302                         return NULL;
1303
1304                 if ((flags & MSG_DONTWAIT) || !timeo) {
1305                         *err = -EAGAIN;
1306                         return NULL;
1307                 }
1308
1309                 add_wait_queue(sk_sleep(sk), &wait);
1310                 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1311                 sk_wait_event(sk, &timeo,
1312                               ctx->recv_pkt != skb ||
1313                               !sk_psock_queue_empty(psock),
1314                               &wait);
1315                 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1316                 remove_wait_queue(sk_sleep(sk), &wait);
1317
1318                 /* Handle signals */
1319                 if (signal_pending(current)) {
1320                         *err = sock_intr_errno(timeo);
1321                         return NULL;
1322                 }
1323         }
1324
1325         return skb;
1326 }
1327
1328 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1329                                int length, int *pages_used,
1330                                unsigned int *size_used,
1331                                struct scatterlist *to,
1332                                int to_max_pages)
1333 {
1334         int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1335         struct page *pages[MAX_SKB_FRAGS];
1336         unsigned int size = *size_used;
1337         ssize_t copied, use;
1338         size_t offset;
1339
1340         while (length > 0) {
1341                 i = 0;
1342                 maxpages = to_max_pages - num_elem;
1343                 if (maxpages == 0) {
1344                         rc = -EFAULT;
1345                         goto out;
1346                 }
1347                 copied = iov_iter_get_pages(from, pages,
1348                                             length,
1349                                             maxpages, &offset);
1350                 if (copied <= 0) {
1351                         rc = -EFAULT;
1352                         goto out;
1353                 }
1354
1355                 iov_iter_advance(from, copied);
1356
1357                 length -= copied;
1358                 size += copied;
1359                 while (copied) {
1360                         use = min_t(int, copied, PAGE_SIZE - offset);
1361
1362                         sg_set_page(&to[num_elem],
1363                                     pages[i], use, offset);
1364                         sg_unmark_end(&to[num_elem]);
1365                         /* We do not uncharge memory from this API */
1366
1367                         offset = 0;
1368                         copied -= use;
1369
1370                         i++;
1371                         num_elem++;
1372                 }
1373         }
1374         /* Mark the end in the last sg entry if newly added */
1375         if (num_elem > *pages_used)
1376                 sg_mark_end(&to[num_elem - 1]);
1377 out:
1378         if (rc)
1379                 iov_iter_revert(from, size - *size_used);
1380         *size_used = size;
1381         *pages_used = num_elem;
1382
1383         return rc;
1384 }
1385
1386 /* This function decrypts the input skb into either out_iov or in out_sg
1387  * or in skb buffers itself. The input parameter 'zc' indicates if
1388  * zero-copy mode needs to be tried or not. With zero-copy mode, either
1389  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1390  * NULL, then the decryption happens inside skb buffers itself, i.e.
1391  * zero-copy gets disabled and 'zc' is updated.
1392  */
1393
1394 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1395                             struct iov_iter *out_iov,
1396                             struct scatterlist *out_sg,
1397                             int *chunk, bool *zc, bool async)
1398 {
1399         struct tls_context *tls_ctx = tls_get_ctx(sk);
1400         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1401         struct tls_prot_info *prot = &tls_ctx->prot_info;
1402         struct strp_msg *rxm = strp_msg(skb);
1403         int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1404         struct aead_request *aead_req;
1405         struct sk_buff *unused;
1406         u8 *aad, *iv, *mem = NULL;
1407         struct scatterlist *sgin = NULL;
1408         struct scatterlist *sgout = NULL;
1409         const int data_len = rxm->full_len - prot->overhead_size +
1410                              prot->tail_size;
1411         int iv_offset = 0;
1412
1413         if (*zc && (out_iov || out_sg)) {
1414                 if (out_iov)
1415                         n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1416                 else
1417                         n_sgout = sg_nents(out_sg);
1418                 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1419                                  rxm->full_len - prot->prepend_size);
1420         } else {
1421                 n_sgout = 0;
1422                 *zc = false;
1423                 n_sgin = skb_cow_data(skb, 0, &unused);
1424         }
1425
1426         if (n_sgin < 1)
1427                 return -EBADMSG;
1428
1429         /* Increment to accommodate AAD */
1430         n_sgin = n_sgin + 1;
1431
1432         nsg = n_sgin + n_sgout;
1433
1434         aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1435         mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1436         mem_size = mem_size + prot->aad_size;
1437         mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1438
1439         /* Allocate a single block of memory which contains
1440          * aead_req || sgin[] || sgout[] || aad || iv.
1441          * This order achieves correct alignment for aead_req, sgin, sgout.
1442          */
1443         mem = kmalloc(mem_size, sk->sk_allocation);
1444         if (!mem)
1445                 return -ENOMEM;
1446
1447         /* Segment the allocated memory */
1448         aead_req = (struct aead_request *)mem;
1449         sgin = (struct scatterlist *)(mem + aead_size);
1450         sgout = sgin + n_sgin;
1451         aad = (u8 *)(sgout + n_sgout);
1452         iv = aad + prot->aad_size;
1453
1454         /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1455         if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1456                 iv[0] = 2;
1457                 iv_offset = 1;
1458         }
1459
1460         /* Prepare IV */
1461         err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1462                             iv + iv_offset + prot->salt_size,
1463                             prot->iv_size);
1464         if (err < 0) {
1465                 kfree(mem);
1466                 return err;
1467         }
1468         if (prot->version == TLS_1_3_VERSION)
1469                 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1470                        crypto_aead_ivsize(ctx->aead_recv));
1471         else
1472                 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1473
1474         xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
1475
1476         /* Prepare AAD */
1477         tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1478                      prot->tail_size,
1479                      tls_ctx->rx.rec_seq, prot->rec_seq_size,
1480                      ctx->control, prot->version);
1481
1482         /* Prepare sgin */
1483         sg_init_table(sgin, n_sgin);
1484         sg_set_buf(&sgin[0], aad, prot->aad_size);
1485         err = skb_to_sgvec(skb, &sgin[1],
1486                            rxm->offset + prot->prepend_size,
1487                            rxm->full_len - prot->prepend_size);
1488         if (err < 0) {
1489                 kfree(mem);
1490                 return err;
1491         }
1492
1493         if (n_sgout) {
1494                 if (out_iov) {
1495                         sg_init_table(sgout, n_sgout);
1496                         sg_set_buf(&sgout[0], aad, prot->aad_size);
1497
1498                         *chunk = 0;
1499                         err = tls_setup_from_iter(sk, out_iov, data_len,
1500                                                   &pages, chunk, &sgout[1],
1501                                                   (n_sgout - 1));
1502                         if (err < 0)
1503                                 goto fallback_to_reg_recv;
1504                 } else if (out_sg) {
1505                         memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1506                 } else {
1507                         goto fallback_to_reg_recv;
1508                 }
1509         } else {
1510 fallback_to_reg_recv:
1511                 sgout = sgin;
1512                 pages = 0;
1513                 *chunk = data_len;
1514                 *zc = false;
1515         }
1516
1517         /* Prepare and submit AEAD request */
1518         err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1519                                 data_len, aead_req, async);
1520         if (err == -EINPROGRESS)
1521                 return err;
1522
1523         /* Release the pages in case iov was mapped to pages */
1524         for (; pages > 0; pages--)
1525                 put_page(sg_page(&sgout[pages]));
1526
1527         kfree(mem);
1528         return err;
1529 }
1530
1531 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1532                               struct iov_iter *dest, int *chunk, bool *zc,
1533                               bool async)
1534 {
1535         struct tls_context *tls_ctx = tls_get_ctx(sk);
1536         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1537         struct tls_prot_info *prot = &tls_ctx->prot_info;
1538         struct strp_msg *rxm = strp_msg(skb);
1539         int pad, err = 0;
1540
1541         if (!ctx->decrypted) {
1542                 if (tls_ctx->rx_conf == TLS_HW) {
1543                         err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
1544                         if (err < 0)
1545                                 return err;
1546                 }
1547
1548                 /* Still not decrypted after tls_device */
1549                 if (!ctx->decrypted) {
1550                         err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1551                                                async);
1552                         if (err < 0) {
1553                                 if (err == -EINPROGRESS)
1554                                         tls_advance_record_sn(sk, prot,
1555                                                               &tls_ctx->rx);
1556                                 else if (err == -EBADMSG)
1557                                         TLS_INC_STATS(sock_net(sk),
1558                                                       LINUX_MIB_TLSDECRYPTERROR);
1559                                 return err;
1560                         }
1561                 } else {
1562                         *zc = false;
1563                 }
1564
1565                 pad = padding_length(ctx, prot, skb);
1566                 if (pad < 0)
1567                         return pad;
1568
1569                 rxm->full_len -= pad;
1570                 rxm->offset += prot->prepend_size;
1571                 rxm->full_len -= prot->overhead_size;
1572                 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1573                 ctx->decrypted = 1;
1574                 ctx->saved_data_ready(sk);
1575         } else {
1576                 *zc = false;
1577         }
1578
1579         return err;
1580 }
1581
1582 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1583                 struct scatterlist *sgout)
1584 {
1585         bool zc = true;
1586         int chunk;
1587
1588         return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
1589 }
1590
1591 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1592                                unsigned int len)
1593 {
1594         struct tls_context *tls_ctx = tls_get_ctx(sk);
1595         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1596
1597         if (skb) {
1598                 struct strp_msg *rxm = strp_msg(skb);
1599
1600                 if (len < rxm->full_len) {
1601                         rxm->offset += len;
1602                         rxm->full_len -= len;
1603                         return false;
1604                 }
1605                 consume_skb(skb);
1606         }
1607
1608         /* Finished with message */
1609         ctx->recv_pkt = NULL;
1610         __strp_unpause(&ctx->strp);
1611
1612         return true;
1613 }
1614
1615 /* This function traverses the rx_list in tls receive context to copies the
1616  * decrypted records into the buffer provided by caller zero copy is not
1617  * true. Further, the records are removed from the rx_list if it is not a peek
1618  * case and the record has been consumed completely.
1619  */
1620 static int process_rx_list(struct tls_sw_context_rx *ctx,
1621                            struct msghdr *msg,
1622                            u8 *control,
1623                            bool *cmsg,
1624                            size_t skip,
1625                            size_t len,
1626                            bool zc,
1627                            bool is_peek)
1628 {
1629         struct sk_buff *skb = skb_peek(&ctx->rx_list);
1630         u8 ctrl = *control;
1631         u8 msgc = *cmsg;
1632         struct tls_msg *tlm;
1633         ssize_t copied = 0;
1634
1635         /* Set the record type in 'control' if caller didn't pass it */
1636         if (!ctrl && skb) {
1637                 tlm = tls_msg(skb);
1638                 ctrl = tlm->control;
1639         }
1640
1641         while (skip && skb) {
1642                 struct strp_msg *rxm = strp_msg(skb);
1643                 tlm = tls_msg(skb);
1644
1645                 /* Cannot process a record of different type */
1646                 if (ctrl != tlm->control)
1647                         return 0;
1648
1649                 if (skip < rxm->full_len)
1650                         break;
1651
1652                 skip = skip - rxm->full_len;
1653                 skb = skb_peek_next(skb, &ctx->rx_list);
1654         }
1655
1656         while (len && skb) {
1657                 struct sk_buff *next_skb;
1658                 struct strp_msg *rxm = strp_msg(skb);
1659                 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1660
1661                 tlm = tls_msg(skb);
1662
1663                 /* Cannot process a record of different type */
1664                 if (ctrl != tlm->control)
1665                         return 0;
1666
1667                 /* Set record type if not already done. For a non-data record,
1668                  * do not proceed if record type could not be copied.
1669                  */
1670                 if (!msgc) {
1671                         int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1672                                             sizeof(ctrl), &ctrl);
1673                         msgc = true;
1674                         if (ctrl != TLS_RECORD_TYPE_DATA) {
1675                                 if (cerr || msg->msg_flags & MSG_CTRUNC)
1676                                         return -EIO;
1677
1678                                 *cmsg = msgc;
1679                         }
1680                 }
1681
1682                 if (!zc || (rxm->full_len - skip) > len) {
1683                         int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1684                                                     msg, chunk);
1685                         if (err < 0)
1686                                 return err;
1687                 }
1688
1689                 len = len - chunk;
1690                 copied = copied + chunk;
1691
1692                 /* Consume the data from record if it is non-peek case*/
1693                 if (!is_peek) {
1694                         rxm->offset = rxm->offset + chunk;
1695                         rxm->full_len = rxm->full_len - chunk;
1696
1697                         /* Return if there is unconsumed data in the record */
1698                         if (rxm->full_len - skip)
1699                                 break;
1700                 }
1701
1702                 /* The remaining skip-bytes must lie in 1st record in rx_list.
1703                  * So from the 2nd record, 'skip' should be 0.
1704                  */
1705                 skip = 0;
1706
1707                 if (msg)
1708                         msg->msg_flags |= MSG_EOR;
1709
1710                 next_skb = skb_peek_next(skb, &ctx->rx_list);
1711
1712                 if (!is_peek) {
1713                         skb_unlink(skb, &ctx->rx_list);
1714                         consume_skb(skb);
1715                 }
1716
1717                 skb = next_skb;
1718         }
1719
1720         *control = ctrl;
1721         return copied;
1722 }
1723
1724 int tls_sw_recvmsg(struct sock *sk,
1725                    struct msghdr *msg,
1726                    size_t len,
1727                    int nonblock,
1728                    int flags,
1729                    int *addr_len)
1730 {
1731         struct tls_context *tls_ctx = tls_get_ctx(sk);
1732         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1733         struct tls_prot_info *prot = &tls_ctx->prot_info;
1734         struct sk_psock *psock;
1735         unsigned char control = 0;
1736         ssize_t decrypted = 0;
1737         struct strp_msg *rxm;
1738         struct tls_msg *tlm;
1739         struct sk_buff *skb;
1740         ssize_t copied = 0;
1741         bool cmsg = false;
1742         int target, err = 0;
1743         long timeo;
1744         bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1745         bool is_peek = flags & MSG_PEEK;
1746         bool bpf_strp_enabled;
1747         int num_async = 0;
1748         int pending;
1749
1750         flags |= nonblock;
1751
1752         if (unlikely(flags & MSG_ERRQUEUE))
1753                 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1754
1755         psock = sk_psock_get(sk);
1756         lock_sock(sk);
1757         bpf_strp_enabled = sk_psock_strp_enabled(psock);
1758
1759         /* Process pending decrypted records. It must be non-zero-copy */
1760         err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1761                               is_peek);
1762         if (err < 0) {
1763                 tls_err_abort(sk, err);
1764                 goto end;
1765         } else {
1766                 copied = err;
1767         }
1768
1769         if (len <= copied)
1770                 goto recv_end;
1771
1772         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1773         len = len - copied;
1774         timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1775
1776         while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1777                 bool retain_skb = false;
1778                 bool zc = false;
1779                 int to_decrypt;
1780                 int chunk = 0;
1781                 bool async_capable;
1782                 bool async = false;
1783
1784                 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1785                 if (!skb) {
1786                         if (psock) {
1787                                 int ret = __tcp_bpf_recvmsg(sk, psock,
1788                                                             msg, len, flags);
1789
1790                                 if (ret > 0) {
1791                                         decrypted += ret;
1792                                         len -= ret;
1793                                         continue;
1794                                 }
1795                         }
1796                         goto recv_end;
1797                 } else {
1798                         tlm = tls_msg(skb);
1799                         if (prot->version == TLS_1_3_VERSION)
1800                                 tlm->control = 0;
1801                         else
1802                                 tlm->control = ctx->control;
1803                 }
1804
1805                 rxm = strp_msg(skb);
1806
1807                 to_decrypt = rxm->full_len - prot->overhead_size;
1808
1809                 if (to_decrypt <= len && !is_kvec && !is_peek &&
1810                     ctx->control == TLS_RECORD_TYPE_DATA &&
1811                     prot->version != TLS_1_3_VERSION &&
1812                     !bpf_strp_enabled)
1813                         zc = true;
1814
1815                 /* Do not use async mode if record is non-data */
1816                 if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
1817                         async_capable = ctx->async_capable;
1818                 else
1819                         async_capable = false;
1820
1821                 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1822                                          &chunk, &zc, async_capable);
1823                 if (err < 0 && err != -EINPROGRESS) {
1824                         tls_err_abort(sk, EBADMSG);
1825                         goto recv_end;
1826                 }
1827
1828                 if (err == -EINPROGRESS) {
1829                         async = true;
1830                         num_async++;
1831                 } else if (prot->version == TLS_1_3_VERSION) {
1832                         tlm->control = ctx->control;
1833                 }
1834
1835                 /* If the type of records being processed is not known yet,
1836                  * set it to record type just dequeued. If it is already known,
1837                  * but does not match the record type just dequeued, go to end.
1838                  * We always get record type here since for tls1.2, record type
1839                  * is known just after record is dequeued from stream parser.
1840                  * For tls1.3, we disable async.
1841                  */
1842
1843                 if (!control)
1844                         control = tlm->control;
1845                 else if (control != tlm->control)
1846                         goto recv_end;
1847
1848                 if (!cmsg) {
1849                         int cerr;
1850
1851                         cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1852                                         sizeof(control), &control);
1853                         cmsg = true;
1854                         if (control != TLS_RECORD_TYPE_DATA) {
1855                                 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1856                                         err = -EIO;
1857                                         goto recv_end;
1858                                 }
1859                         }
1860                 }
1861
1862                 if (async)
1863                         goto pick_next_record;
1864
1865                 if (!zc) {
1866                         if (bpf_strp_enabled) {
1867                                 err = sk_psock_tls_strp_read(psock, skb);
1868                                 if (err != __SK_PASS) {
1869                                         rxm->offset = rxm->offset + rxm->full_len;
1870                                         rxm->full_len = 0;
1871                                         if (err == __SK_DROP)
1872                                                 consume_skb(skb);
1873                                         ctx->recv_pkt = NULL;
1874                                         __strp_unpause(&ctx->strp);
1875                                         continue;
1876                                 }
1877                         }
1878
1879                         if (rxm->full_len > len) {
1880                                 retain_skb = true;
1881                                 chunk = len;
1882                         } else {
1883                                 chunk = rxm->full_len;
1884                         }
1885
1886                         err = skb_copy_datagram_msg(skb, rxm->offset,
1887                                                     msg, chunk);
1888                         if (err < 0)
1889                                 goto recv_end;
1890
1891                         if (!is_peek) {
1892                                 rxm->offset = rxm->offset + chunk;
1893                                 rxm->full_len = rxm->full_len - chunk;
1894                         }
1895                 }
1896
1897 pick_next_record:
1898                 if (chunk > len)
1899                         chunk = len;
1900
1901                 decrypted += chunk;
1902                 len -= chunk;
1903
1904                 /* For async or peek case, queue the current skb */
1905                 if (async || is_peek || retain_skb) {
1906                         skb_queue_tail(&ctx->rx_list, skb);
1907                         skb = NULL;
1908                 }
1909
1910                 if (tls_sw_advance_skb(sk, skb, chunk)) {
1911                         /* Return full control message to
1912                          * userspace before trying to parse
1913                          * another message type
1914                          */
1915                         msg->msg_flags |= MSG_EOR;
1916                         if (ctx->control != TLS_RECORD_TYPE_DATA)
1917                                 goto recv_end;
1918                 } else {
1919                         break;
1920                 }
1921         }
1922
1923 recv_end:
1924         if (num_async) {
1925                 /* Wait for all previously submitted records to be decrypted */
1926                 spin_lock_bh(&ctx->decrypt_compl_lock);
1927                 ctx->async_notify = true;
1928                 pending = atomic_read(&ctx->decrypt_pending);
1929                 spin_unlock_bh(&ctx->decrypt_compl_lock);
1930                 if (pending) {
1931                         err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1932                         if (err) {
1933                                 /* one of async decrypt failed */
1934                                 tls_err_abort(sk, err);
1935                                 copied = 0;
1936                                 decrypted = 0;
1937                                 goto end;
1938                         }
1939                 } else {
1940                         reinit_completion(&ctx->async_wait.completion);
1941                 }
1942
1943                 /* There can be no concurrent accesses, since we have no
1944                  * pending decrypt operations
1945                  */
1946                 WRITE_ONCE(ctx->async_notify, false);
1947
1948                 /* Drain records from the rx_list & copy if required */
1949                 if (is_peek || is_kvec)
1950                         err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1951                                               decrypted, false, is_peek);
1952                 else
1953                         err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1954                                               decrypted, true, is_peek);
1955                 if (err < 0) {
1956                         tls_err_abort(sk, err);
1957                         copied = 0;
1958                         goto end;
1959                 }
1960         }
1961
1962         copied += decrypted;
1963
1964 end:
1965         release_sock(sk);
1966         if (psock)
1967                 sk_psock_put(sk, psock);
1968         return copied ? : err;
1969 }
1970
1971 ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
1972                            struct pipe_inode_info *pipe,
1973                            size_t len, unsigned int flags)
1974 {
1975         struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1976         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1977         struct strp_msg *rxm = NULL;
1978         struct sock *sk = sock->sk;
1979         struct sk_buff *skb;
1980         ssize_t copied = 0;
1981         int err = 0;
1982         long timeo;
1983         int chunk;
1984         bool zc = false;
1985
1986         lock_sock(sk);
1987
1988         timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1989
1990         skb = tls_wait_data(sk, NULL, flags, timeo, &err);
1991         if (!skb)
1992                 goto splice_read_end;
1993
1994         if (!ctx->decrypted) {
1995                 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
1996
1997                 /* splice does not support reading control messages */
1998                 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1999                         err = -EINVAL;
2000                         goto splice_read_end;
2001                 }
2002
2003                 if (err < 0) {
2004                         tls_err_abort(sk, EBADMSG);
2005                         goto splice_read_end;
2006                 }
2007                 ctx->decrypted = 1;
2008         }
2009         rxm = strp_msg(skb);
2010
2011         chunk = min_t(unsigned int, rxm->full_len, len);
2012         copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2013         if (copied < 0)
2014                 goto splice_read_end;
2015
2016         if (likely(!(flags & MSG_PEEK)))
2017                 tls_sw_advance_skb(sk, skb, copied);
2018
2019 splice_read_end:
2020         release_sock(sk);
2021         return copied ? : err;
2022 }
2023
2024 bool tls_sw_stream_read(const struct sock *sk)
2025 {
2026         struct tls_context *tls_ctx = tls_get_ctx(sk);
2027         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2028         bool ingress_empty = true;
2029         struct sk_psock *psock;
2030
2031         rcu_read_lock();
2032         psock = sk_psock(sk);
2033         if (psock)
2034                 ingress_empty = list_empty(&psock->ingress_msg);
2035         rcu_read_unlock();
2036
2037         return !ingress_empty || ctx->recv_pkt ||
2038                 !skb_queue_empty(&ctx->rx_list);
2039 }
2040
2041 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2042 {
2043         struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2044         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2045         struct tls_prot_info *prot = &tls_ctx->prot_info;
2046         char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2047         struct strp_msg *rxm = strp_msg(skb);
2048         size_t cipher_overhead;
2049         size_t data_len = 0;
2050         int ret;
2051
2052         /* Verify that we have a full TLS header, or wait for more data */
2053         if (rxm->offset + prot->prepend_size > skb->len)
2054                 return 0;
2055
2056         /* Sanity-check size of on-stack buffer. */
2057         if (WARN_ON(prot->prepend_size > sizeof(header))) {
2058                 ret = -EINVAL;
2059                 goto read_failure;
2060         }
2061
2062         /* Linearize header to local buffer */
2063         ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2064
2065         if (ret < 0)
2066                 goto read_failure;
2067
2068         ctx->control = header[0];
2069
2070         data_len = ((header[4] & 0xFF) | (header[3] << 8));
2071
2072         cipher_overhead = prot->tag_size;
2073         if (prot->version != TLS_1_3_VERSION)
2074                 cipher_overhead += prot->iv_size;
2075
2076         if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2077             prot->tail_size) {
2078                 ret = -EMSGSIZE;
2079                 goto read_failure;
2080         }
2081         if (data_len < cipher_overhead) {
2082                 ret = -EBADMSG;
2083                 goto read_failure;
2084         }
2085
2086         /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2087         if (header[1] != TLS_1_2_VERSION_MINOR ||
2088             header[2] != TLS_1_2_VERSION_MAJOR) {
2089                 ret = -EINVAL;
2090                 goto read_failure;
2091         }
2092
2093         tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2094                                      TCP_SKB_CB(skb)->seq + rxm->offset);
2095         return data_len + TLS_HEADER_SIZE;
2096
2097 read_failure:
2098         tls_err_abort(strp->sk, ret);
2099
2100         return ret;
2101 }
2102
2103 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2104 {
2105         struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2106         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2107
2108         ctx->decrypted = 0;
2109
2110         ctx->recv_pkt = skb;
2111         strp_pause(strp);
2112
2113         ctx->saved_data_ready(strp->sk);
2114 }
2115
2116 static void tls_data_ready(struct sock *sk)
2117 {
2118         struct tls_context *tls_ctx = tls_get_ctx(sk);
2119         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2120         struct sk_psock *psock;
2121
2122         strp_data_ready(&ctx->strp);
2123
2124         psock = sk_psock_get(sk);
2125         if (psock) {
2126                 if (!list_empty(&psock->ingress_msg))
2127                         ctx->saved_data_ready(sk);
2128                 sk_psock_put(sk, psock);
2129         }
2130 }
2131
2132 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2133 {
2134         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2135
2136         set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2137         set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2138         cancel_delayed_work_sync(&ctx->tx_work.work);
2139 }
2140
2141 void tls_sw_release_resources_tx(struct sock *sk)
2142 {
2143         struct tls_context *tls_ctx = tls_get_ctx(sk);
2144         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2145         struct tls_rec *rec, *tmp;
2146         int pending;
2147
2148         /* Wait for any pending async encryptions to complete */
2149         spin_lock_bh(&ctx->encrypt_compl_lock);
2150         ctx->async_notify = true;
2151         pending = atomic_read(&ctx->encrypt_pending);
2152         spin_unlock_bh(&ctx->encrypt_compl_lock);
2153
2154         if (pending)
2155                 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2156
2157         tls_tx_records(sk, -1);
2158
2159         /* Free up un-sent records in tx_list. First, free
2160          * the partially sent record if any at head of tx_list.
2161          */
2162         if (tls_ctx->partially_sent_record) {
2163                 tls_free_partial_record(sk, tls_ctx);
2164                 rec = list_first_entry(&ctx->tx_list,
2165                                        struct tls_rec, list);
2166                 list_del(&rec->list);
2167                 sk_msg_free(sk, &rec->msg_plaintext);
2168                 kfree(rec);
2169         }
2170
2171         list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2172                 list_del(&rec->list);
2173                 sk_msg_free(sk, &rec->msg_encrypted);
2174                 sk_msg_free(sk, &rec->msg_plaintext);
2175                 kfree(rec);
2176         }
2177
2178         crypto_free_aead(ctx->aead_send);
2179         tls_free_open_rec(sk);
2180 }
2181
2182 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2183 {
2184         struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2185
2186         kfree(ctx);
2187 }
2188
2189 void tls_sw_release_resources_rx(struct sock *sk)
2190 {
2191         struct tls_context *tls_ctx = tls_get_ctx(sk);
2192         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2193
2194         kfree(tls_ctx->rx.rec_seq);
2195         kfree(tls_ctx->rx.iv);
2196
2197         if (ctx->aead_recv) {
2198                 kfree_skb(ctx->recv_pkt);
2199                 ctx->recv_pkt = NULL;
2200                 skb_queue_purge(&ctx->rx_list);
2201                 crypto_free_aead(ctx->aead_recv);
2202                 strp_stop(&ctx->strp);
2203                 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2204                  * we still want to strp_stop(), but sk->sk_data_ready was
2205                  * never swapped.
2206                  */
2207                 if (ctx->saved_data_ready) {
2208                         write_lock_bh(&sk->sk_callback_lock);
2209                         sk->sk_data_ready = ctx->saved_data_ready;
2210                         write_unlock_bh(&sk->sk_callback_lock);
2211                 }
2212         }
2213 }
2214
2215 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2216 {
2217         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2218
2219         strp_done(&ctx->strp);
2220 }
2221
2222 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2223 {
2224         struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2225
2226         kfree(ctx);
2227 }
2228
2229 void tls_sw_free_resources_rx(struct sock *sk)
2230 {
2231         struct tls_context *tls_ctx = tls_get_ctx(sk);
2232
2233         tls_sw_release_resources_rx(sk);
2234         tls_sw_free_ctx_rx(tls_ctx);
2235 }
2236
2237 /* The work handler to transmitt the encrypted records in tx_list */
2238 static void tx_work_handler(struct work_struct *work)
2239 {
2240         struct delayed_work *delayed_work = to_delayed_work(work);
2241         struct tx_work *tx_work = container_of(delayed_work,
2242                                                struct tx_work, work);
2243         struct sock *sk = tx_work->sk;
2244         struct tls_context *tls_ctx = tls_get_ctx(sk);
2245         struct tls_sw_context_tx *ctx;
2246
2247         if (unlikely(!tls_ctx))
2248                 return;
2249
2250         ctx = tls_sw_ctx_tx(tls_ctx);
2251         if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2252                 return;
2253
2254         if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2255                 return;
2256         mutex_lock(&tls_ctx->tx_lock);
2257         lock_sock(sk);
2258         tls_tx_records(sk, -1);
2259         release_sock(sk);
2260         mutex_unlock(&tls_ctx->tx_lock);
2261 }
2262
2263 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2264 {
2265         struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2266
2267         /* Schedule the transmission if tx list is ready */
2268         if (is_tx_ready(tx_ctx) &&
2269             !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2270                 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2271 }
2272
2273 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2274 {
2275         struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2276
2277         write_lock_bh(&sk->sk_callback_lock);
2278         rx_ctx->saved_data_ready = sk->sk_data_ready;
2279         sk->sk_data_ready = tls_data_ready;
2280         write_unlock_bh(&sk->sk_callback_lock);
2281
2282         strp_check_rcv(&rx_ctx->strp);
2283 }
2284
2285 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2286 {
2287         struct tls_context *tls_ctx = tls_get_ctx(sk);
2288         struct tls_prot_info *prot = &tls_ctx->prot_info;
2289         struct tls_crypto_info *crypto_info;
2290         struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2291         struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2292         struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2293         struct tls_sw_context_tx *sw_ctx_tx = NULL;
2294         struct tls_sw_context_rx *sw_ctx_rx = NULL;
2295         struct cipher_context *cctx;
2296         struct crypto_aead **aead;
2297         struct strp_callbacks cb;
2298         u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2299         struct crypto_tfm *tfm;
2300         char *iv, *rec_seq, *key, *salt, *cipher_name;
2301         size_t keysize;
2302         int rc = 0;
2303
2304         if (!ctx) {
2305                 rc = -EINVAL;
2306                 goto out;
2307         }
2308
2309         if (tx) {
2310                 if (!ctx->priv_ctx_tx) {
2311                         sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2312                         if (!sw_ctx_tx) {
2313                                 rc = -ENOMEM;
2314                                 goto out;
2315                         }
2316                         ctx->priv_ctx_tx = sw_ctx_tx;
2317                 } else {
2318                         sw_ctx_tx =
2319                                 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2320                 }
2321         } else {
2322                 if (!ctx->priv_ctx_rx) {
2323                         sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2324                         if (!sw_ctx_rx) {
2325                                 rc = -ENOMEM;
2326                                 goto out;
2327                         }
2328                         ctx->priv_ctx_rx = sw_ctx_rx;
2329                 } else {
2330                         sw_ctx_rx =
2331                                 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2332                 }
2333         }
2334
2335         if (tx) {
2336                 crypto_init_wait(&sw_ctx_tx->async_wait);
2337                 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2338                 crypto_info = &ctx->crypto_send.info;
2339                 cctx = &ctx->tx;
2340                 aead = &sw_ctx_tx->aead_send;
2341                 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2342                 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2343                 sw_ctx_tx->tx_work.sk = sk;
2344         } else {
2345                 crypto_init_wait(&sw_ctx_rx->async_wait);
2346                 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2347                 crypto_info = &ctx->crypto_recv.info;
2348                 cctx = &ctx->rx;
2349                 skb_queue_head_init(&sw_ctx_rx->rx_list);
2350                 aead = &sw_ctx_rx->aead_recv;
2351         }
2352
2353         switch (crypto_info->cipher_type) {
2354         case TLS_CIPHER_AES_GCM_128: {
2355                 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2356                 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2357                 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2358                 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2359                 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2360                 rec_seq =
2361                  ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2362                 gcm_128_info =
2363                         (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
2364                 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2365                 key = gcm_128_info->key;
2366                 salt = gcm_128_info->salt;
2367                 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2368                 cipher_name = "gcm(aes)";
2369                 break;
2370         }
2371         case TLS_CIPHER_AES_GCM_256: {
2372                 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2373                 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2374                 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2375                 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2376                 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2377                 rec_seq =
2378                  ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2379                 gcm_256_info =
2380                         (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2381                 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2382                 key = gcm_256_info->key;
2383                 salt = gcm_256_info->salt;
2384                 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2385                 cipher_name = "gcm(aes)";
2386                 break;
2387         }
2388         case TLS_CIPHER_AES_CCM_128: {
2389                 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2390                 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2391                 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2392                 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2393                 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2394                 rec_seq =
2395                 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2396                 ccm_128_info =
2397                 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2398                 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2399                 key = ccm_128_info->key;
2400                 salt = ccm_128_info->salt;
2401                 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2402                 cipher_name = "ccm(aes)";
2403                 break;
2404         }
2405         default:
2406                 rc = -EINVAL;
2407                 goto free_priv;
2408         }
2409
2410         /* Sanity-check the sizes for stack allocations. */
2411         if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2412             rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
2413                 rc = -EINVAL;
2414                 goto free_priv;
2415         }
2416
2417         if (crypto_info->version == TLS_1_3_VERSION) {
2418                 nonce_size = 0;
2419                 prot->aad_size = TLS_HEADER_SIZE;
2420                 prot->tail_size = 1;
2421         } else {
2422                 prot->aad_size = TLS_AAD_SPACE_SIZE;
2423                 prot->tail_size = 0;
2424         }
2425
2426         prot->version = crypto_info->version;
2427         prot->cipher_type = crypto_info->cipher_type;
2428         prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2429         prot->tag_size = tag_size;
2430         prot->overhead_size = prot->prepend_size +
2431                               prot->tag_size + prot->tail_size;
2432         prot->iv_size = iv_size;
2433         prot->salt_size = salt_size;
2434         cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2435         if (!cctx->iv) {
2436                 rc = -ENOMEM;
2437                 goto free_priv;
2438         }
2439         /* Note: 128 & 256 bit salt are the same size */
2440         prot->rec_seq_size = rec_seq_size;
2441         memcpy(cctx->iv, salt, salt_size);
2442         memcpy(cctx->iv + salt_size, iv, iv_size);
2443         cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2444         if (!cctx->rec_seq) {
2445                 rc = -ENOMEM;
2446                 goto free_iv;
2447         }
2448
2449         if (!*aead) {
2450                 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2451                 if (IS_ERR(*aead)) {
2452                         rc = PTR_ERR(*aead);
2453                         *aead = NULL;
2454                         goto free_rec_seq;
2455                 }
2456         }
2457
2458         ctx->push_pending_record = tls_sw_push_pending_record;
2459
2460         rc = crypto_aead_setkey(*aead, key, keysize);
2461
2462         if (rc)
2463                 goto free_aead;
2464
2465         rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2466         if (rc)
2467                 goto free_aead;
2468
2469         if (sw_ctx_rx) {
2470                 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2471
2472                 if (crypto_info->version == TLS_1_3_VERSION)
2473                         sw_ctx_rx->async_capable = 0;
2474                 else
2475                         sw_ctx_rx->async_capable =
2476                                 !!(tfm->__crt_alg->cra_flags &
2477                                    CRYPTO_ALG_ASYNC);
2478
2479                 /* Set up strparser */
2480                 memset(&cb, 0, sizeof(cb));
2481                 cb.rcv_msg = tls_queue;
2482                 cb.parse_msg = tls_read_size;
2483
2484                 strp_init(&sw_ctx_rx->strp, sk, &cb);
2485         }
2486
2487         goto out;
2488
2489 free_aead:
2490         crypto_free_aead(*aead);
2491         *aead = NULL;
2492 free_rec_seq:
2493         kfree(cctx->rec_seq);
2494         cctx->rec_seq = NULL;
2495 free_iv:
2496         kfree(cctx->iv);
2497         cctx->iv = NULL;
2498 free_priv:
2499         if (tx) {
2500                 kfree(ctx->priv_ctx_tx);
2501                 ctx->priv_ctx_tx = NULL;
2502         } else {
2503                 kfree(ctx->priv_ctx_rx);
2504                 ctx->priv_ctx_rx = NULL;
2505         }
2506 out:
2507         return rc;
2508 }