2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/splice.h>
42 #include <crypto/aead.h>
44 #include <net/strparser.h>
49 struct tls_decrypt_arg {
59 struct tls_decrypt_ctx {
61 u8 aad[TLS_MAX_AAD_SIZE];
63 struct scatterlist sg[];
66 noinline void tls_err_abort(struct sock *sk, int err)
68 WARN_ON_ONCE(err >= 0);
69 /* sk->sk_err should contain a positive error code. */
74 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
75 unsigned int recursion_level)
77 int start = skb_headlen(skb);
78 int i, chunk = start - offset;
79 struct sk_buff *frag_iter;
82 if (unlikely(recursion_level >= 24))
95 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
98 WARN_ON(start > offset + len);
100 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
101 chunk = end - offset;
114 if (unlikely(skb_has_frag_list(skb))) {
115 skb_walk_frags(skb, frag_iter) {
118 WARN_ON(start > offset + len);
120 end = start + frag_iter->len;
121 chunk = end - offset;
125 ret = __skb_nsg(frag_iter, offset - start, chunk,
126 recursion_level + 1);
127 if (unlikely(ret < 0))
142 /* Return the number of scatterlist elements required to completely map the
143 * skb, or -EMSGSIZE if the recursion depth is exceeded.
145 static int skb_nsg(struct sk_buff *skb, int offset, int len)
147 return __skb_nsg(skb, offset, len, 0);
150 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
151 struct tls_decrypt_arg *darg)
153 struct strp_msg *rxm = strp_msg(skb);
154 struct tls_msg *tlm = tls_msg(skb);
157 /* Determine zero-padding length */
158 if (prot->version == TLS_1_3_VERSION) {
159 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
160 char content_type = darg->zc ? darg->tail : 0;
163 while (content_type == 0) {
164 if (offset < prot->prepend_size)
166 err = skb_copy_bits(skb, rxm->offset + offset,
175 tlm->control = content_type;
180 static void tls_decrypt_done(struct crypto_async_request *req, int err)
182 struct aead_request *aead_req = (struct aead_request *)req;
183 struct scatterlist *sgout = aead_req->dst;
184 struct scatterlist *sgin = aead_req->src;
185 struct tls_sw_context_rx *ctx;
186 struct tls_context *tls_ctx;
187 struct scatterlist *sg;
191 sk = (struct sock *)req->data;
192 tls_ctx = tls_get_ctx(sk);
193 ctx = tls_sw_ctx_rx(tls_ctx);
195 /* Propagate if there was an err */
198 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
199 ctx->async_wait.err = err;
200 tls_err_abort(sk, err);
203 /* Free the destination pages if skb was not decrypted inplace */
205 /* Skip the first S/G entry as it points to AAD */
206 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
209 put_page(sg_page(sg));
215 spin_lock_bh(&ctx->decrypt_compl_lock);
216 if (!atomic_dec_return(&ctx->decrypt_pending))
217 complete(&ctx->async_wait.completion);
218 spin_unlock_bh(&ctx->decrypt_compl_lock);
221 static int tls_do_decryption(struct sock *sk,
222 struct scatterlist *sgin,
223 struct scatterlist *sgout,
226 struct aead_request *aead_req,
227 struct tls_decrypt_arg *darg)
229 struct tls_context *tls_ctx = tls_get_ctx(sk);
230 struct tls_prot_info *prot = &tls_ctx->prot_info;
231 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
234 aead_request_set_tfm(aead_req, ctx->aead_recv);
235 aead_request_set_ad(aead_req, prot->aad_size);
236 aead_request_set_crypt(aead_req, sgin, sgout,
237 data_len + prot->tag_size,
241 aead_request_set_callback(aead_req,
242 CRYPTO_TFM_REQ_MAY_BACKLOG,
243 tls_decrypt_done, sk);
244 atomic_inc(&ctx->decrypt_pending);
246 aead_request_set_callback(aead_req,
247 CRYPTO_TFM_REQ_MAY_BACKLOG,
248 crypto_req_done, &ctx->async_wait);
251 ret = crypto_aead_decrypt(aead_req);
252 if (ret == -EINPROGRESS) {
256 ret = crypto_wait_req(ret, &ctx->async_wait);
263 static void tls_trim_both_msgs(struct sock *sk, int target_size)
265 struct tls_context *tls_ctx = tls_get_ctx(sk);
266 struct tls_prot_info *prot = &tls_ctx->prot_info;
267 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
268 struct tls_rec *rec = ctx->open_rec;
270 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
272 target_size += prot->overhead_size;
273 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
276 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
278 struct tls_context *tls_ctx = tls_get_ctx(sk);
279 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
280 struct tls_rec *rec = ctx->open_rec;
281 struct sk_msg *msg_en = &rec->msg_encrypted;
283 return sk_msg_alloc(sk, msg_en, len, 0);
286 static int tls_clone_plaintext_msg(struct sock *sk, int required)
288 struct tls_context *tls_ctx = tls_get_ctx(sk);
289 struct tls_prot_info *prot = &tls_ctx->prot_info;
290 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
291 struct tls_rec *rec = ctx->open_rec;
292 struct sk_msg *msg_pl = &rec->msg_plaintext;
293 struct sk_msg *msg_en = &rec->msg_encrypted;
296 /* We add page references worth len bytes from encrypted sg
297 * at the end of plaintext sg. It is guaranteed that msg_en
298 * has enough required room (ensured by caller).
300 len = required - msg_pl->sg.size;
302 /* Skip initial bytes in msg_en's data to be able to use
303 * same offset of both plain and encrypted data.
305 skip = prot->prepend_size + msg_pl->sg.size;
307 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
310 static struct tls_rec *tls_get_rec(struct sock *sk)
312 struct tls_context *tls_ctx = tls_get_ctx(sk);
313 struct tls_prot_info *prot = &tls_ctx->prot_info;
314 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
315 struct sk_msg *msg_pl, *msg_en;
319 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
321 rec = kzalloc(mem_size, sk->sk_allocation);
325 msg_pl = &rec->msg_plaintext;
326 msg_en = &rec->msg_encrypted;
331 sg_init_table(rec->sg_aead_in, 2);
332 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
333 sg_unmark_end(&rec->sg_aead_in[1]);
335 sg_init_table(rec->sg_aead_out, 2);
336 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
337 sg_unmark_end(&rec->sg_aead_out[1]);
342 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
344 sk_msg_free(sk, &rec->msg_encrypted);
345 sk_msg_free(sk, &rec->msg_plaintext);
349 static void tls_free_open_rec(struct sock *sk)
351 struct tls_context *tls_ctx = tls_get_ctx(sk);
352 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
353 struct tls_rec *rec = ctx->open_rec;
356 tls_free_rec(sk, rec);
357 ctx->open_rec = NULL;
361 int tls_tx_records(struct sock *sk, int flags)
363 struct tls_context *tls_ctx = tls_get_ctx(sk);
364 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
365 struct tls_rec *rec, *tmp;
366 struct sk_msg *msg_en;
367 int tx_flags, rc = 0;
369 if (tls_is_partially_sent_record(tls_ctx)) {
370 rec = list_first_entry(&ctx->tx_list,
371 struct tls_rec, list);
374 tx_flags = rec->tx_flags;
378 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
382 /* Full record has been transmitted.
383 * Remove the head of tx_list
385 list_del(&rec->list);
386 sk_msg_free(sk, &rec->msg_plaintext);
390 /* Tx all ready records */
391 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
392 if (READ_ONCE(rec->tx_ready)) {
394 tx_flags = rec->tx_flags;
398 msg_en = &rec->msg_encrypted;
399 rc = tls_push_sg(sk, tls_ctx,
400 &msg_en->sg.data[msg_en->sg.curr],
405 list_del(&rec->list);
406 sk_msg_free(sk, &rec->msg_plaintext);
414 if (rc < 0 && rc != -EAGAIN)
415 tls_err_abort(sk, -EBADMSG);
420 static void tls_encrypt_done(struct crypto_async_request *req, int err)
422 struct aead_request *aead_req = (struct aead_request *)req;
423 struct sock *sk = req->data;
424 struct tls_context *tls_ctx = tls_get_ctx(sk);
425 struct tls_prot_info *prot = &tls_ctx->prot_info;
426 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
427 struct scatterlist *sge;
428 struct sk_msg *msg_en;
433 rec = container_of(aead_req, struct tls_rec, aead_req);
434 msg_en = &rec->msg_encrypted;
436 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
437 sge->offset -= prot->prepend_size;
438 sge->length += prot->prepend_size;
440 /* Check if error is previously set on socket */
441 if (err || sk->sk_err) {
444 /* If err is already set on socket, return the same code */
446 ctx->async_wait.err = -sk->sk_err;
448 ctx->async_wait.err = err;
449 tls_err_abort(sk, err);
454 struct tls_rec *first_rec;
456 /* Mark the record as ready for transmission */
457 smp_store_mb(rec->tx_ready, true);
459 /* If received record is at head of tx_list, schedule tx */
460 first_rec = list_first_entry(&ctx->tx_list,
461 struct tls_rec, list);
462 if (rec == first_rec)
466 spin_lock_bh(&ctx->encrypt_compl_lock);
467 pending = atomic_dec_return(&ctx->encrypt_pending);
469 if (!pending && ctx->async_notify)
470 complete(&ctx->async_wait.completion);
471 spin_unlock_bh(&ctx->encrypt_compl_lock);
476 /* Schedule the transmission */
477 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
478 schedule_delayed_work(&ctx->tx_work.work, 1);
481 static int tls_do_encryption(struct sock *sk,
482 struct tls_context *tls_ctx,
483 struct tls_sw_context_tx *ctx,
484 struct aead_request *aead_req,
485 size_t data_len, u32 start)
487 struct tls_prot_info *prot = &tls_ctx->prot_info;
488 struct tls_rec *rec = ctx->open_rec;
489 struct sk_msg *msg_en = &rec->msg_encrypted;
490 struct scatterlist *sge = sk_msg_elem(msg_en, start);
491 int rc, iv_offset = 0;
493 /* For CCM based ciphers, first byte of IV is a constant */
494 switch (prot->cipher_type) {
495 case TLS_CIPHER_AES_CCM_128:
496 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
499 case TLS_CIPHER_SM4_CCM:
500 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
505 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
506 prot->iv_size + prot->salt_size);
508 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
509 tls_ctx->tx.rec_seq);
511 sge->offset += prot->prepend_size;
512 sge->length -= prot->prepend_size;
514 msg_en->sg.curr = start;
516 aead_request_set_tfm(aead_req, ctx->aead_send);
517 aead_request_set_ad(aead_req, prot->aad_size);
518 aead_request_set_crypt(aead_req, rec->sg_aead_in,
520 data_len, rec->iv_data);
522 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
523 tls_encrypt_done, sk);
525 /* Add the record in tx_list */
526 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
527 atomic_inc(&ctx->encrypt_pending);
529 rc = crypto_aead_encrypt(aead_req);
530 if (!rc || rc != -EINPROGRESS) {
531 atomic_dec(&ctx->encrypt_pending);
532 sge->offset -= prot->prepend_size;
533 sge->length += prot->prepend_size;
537 WRITE_ONCE(rec->tx_ready, true);
538 } else if (rc != -EINPROGRESS) {
539 list_del(&rec->list);
543 /* Unhook the record from context if encryption is not failure */
544 ctx->open_rec = NULL;
545 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
549 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
550 struct tls_rec **to, struct sk_msg *msg_opl,
551 struct sk_msg *msg_oen, u32 split_point,
552 u32 tx_overhead_size, u32 *orig_end)
554 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
555 struct scatterlist *sge, *osge, *nsge;
556 u32 orig_size = msg_opl->sg.size;
557 struct scatterlist tmp = { };
558 struct sk_msg *msg_npl;
562 new = tls_get_rec(sk);
565 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
566 tx_overhead_size, 0);
568 tls_free_rec(sk, new);
572 *orig_end = msg_opl->sg.end;
573 i = msg_opl->sg.start;
574 sge = sk_msg_elem(msg_opl, i);
575 while (apply && sge->length) {
576 if (sge->length > apply) {
577 u32 len = sge->length - apply;
579 get_page(sg_page(sge));
580 sg_set_page(&tmp, sg_page(sge), len,
581 sge->offset + apply);
586 apply -= sge->length;
587 bytes += sge->length;
590 sk_msg_iter_var_next(i);
591 if (i == msg_opl->sg.end)
593 sge = sk_msg_elem(msg_opl, i);
597 msg_opl->sg.curr = i;
598 msg_opl->sg.copybreak = 0;
599 msg_opl->apply_bytes = 0;
600 msg_opl->sg.size = bytes;
602 msg_npl = &new->msg_plaintext;
603 msg_npl->apply_bytes = apply;
604 msg_npl->sg.size = orig_size - bytes;
606 j = msg_npl->sg.start;
607 nsge = sk_msg_elem(msg_npl, j);
609 memcpy(nsge, &tmp, sizeof(*nsge));
610 sk_msg_iter_var_next(j);
611 nsge = sk_msg_elem(msg_npl, j);
614 osge = sk_msg_elem(msg_opl, i);
615 while (osge->length) {
616 memcpy(nsge, osge, sizeof(*nsge));
618 sk_msg_iter_var_next(i);
619 sk_msg_iter_var_next(j);
622 osge = sk_msg_elem(msg_opl, i);
623 nsge = sk_msg_elem(msg_npl, j);
627 msg_npl->sg.curr = j;
628 msg_npl->sg.copybreak = 0;
634 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
635 struct tls_rec *from, u32 orig_end)
637 struct sk_msg *msg_npl = &from->msg_plaintext;
638 struct sk_msg *msg_opl = &to->msg_plaintext;
639 struct scatterlist *osge, *nsge;
643 sk_msg_iter_var_prev(i);
644 j = msg_npl->sg.start;
646 osge = sk_msg_elem(msg_opl, i);
647 nsge = sk_msg_elem(msg_npl, j);
649 if (sg_page(osge) == sg_page(nsge) &&
650 osge->offset + osge->length == nsge->offset) {
651 osge->length += nsge->length;
652 put_page(sg_page(nsge));
655 msg_opl->sg.end = orig_end;
656 msg_opl->sg.curr = orig_end;
657 msg_opl->sg.copybreak = 0;
658 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
659 msg_opl->sg.size += msg_npl->sg.size;
661 sk_msg_free(sk, &to->msg_encrypted);
662 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
667 static int tls_push_record(struct sock *sk, int flags,
668 unsigned char record_type)
670 struct tls_context *tls_ctx = tls_get_ctx(sk);
671 struct tls_prot_info *prot = &tls_ctx->prot_info;
672 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
673 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
674 u32 i, split_point, orig_end;
675 struct sk_msg *msg_pl, *msg_en;
676 struct aead_request *req;
683 msg_pl = &rec->msg_plaintext;
684 msg_en = &rec->msg_encrypted;
686 split_point = msg_pl->apply_bytes;
687 split = split_point && split_point < msg_pl->sg.size;
688 if (unlikely((!split &&
690 prot->overhead_size > msg_en->sg.size) ||
693 prot->overhead_size > msg_en->sg.size))) {
695 split_point = msg_en->sg.size;
698 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
699 split_point, prot->overhead_size,
703 /* This can happen if above tls_split_open_record allocates
704 * a single large encryption buffer instead of two smaller
705 * ones. In this case adjust pointers and continue without
708 if (!msg_pl->sg.size) {
709 tls_merge_open_record(sk, rec, tmp, orig_end);
710 msg_pl = &rec->msg_plaintext;
711 msg_en = &rec->msg_encrypted;
714 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
715 prot->overhead_size);
718 rec->tx_flags = flags;
719 req = &rec->aead_req;
722 sk_msg_iter_var_prev(i);
724 rec->content_type = record_type;
725 if (prot->version == TLS_1_3_VERSION) {
726 /* Add content type to end of message. No padding added */
727 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
728 sg_mark_end(&rec->sg_content_type);
729 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
730 &rec->sg_content_type);
732 sg_mark_end(sk_msg_elem(msg_pl, i));
735 if (msg_pl->sg.end < msg_pl->sg.start) {
736 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
737 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
741 i = msg_pl->sg.start;
742 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
745 sk_msg_iter_var_prev(i);
746 sg_mark_end(sk_msg_elem(msg_en, i));
748 i = msg_en->sg.start;
749 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
751 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
752 tls_ctx->tx.rec_seq, record_type, prot);
754 tls_fill_prepend(tls_ctx,
755 page_address(sg_page(&msg_en->sg.data[i])) +
756 msg_en->sg.data[i].offset,
757 msg_pl->sg.size + prot->tail_size,
760 tls_ctx->pending_open_record_frags = false;
762 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
763 msg_pl->sg.size + prot->tail_size, i);
765 if (rc != -EINPROGRESS) {
766 tls_err_abort(sk, -EBADMSG);
768 tls_ctx->pending_open_record_frags = true;
769 tls_merge_open_record(sk, rec, tmp, orig_end);
772 ctx->async_capable = 1;
775 msg_pl = &tmp->msg_plaintext;
776 msg_en = &tmp->msg_encrypted;
777 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
778 tls_ctx->pending_open_record_frags = true;
782 return tls_tx_records(sk, flags);
785 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
786 bool full_record, u8 record_type,
787 ssize_t *copied, int flags)
789 struct tls_context *tls_ctx = tls_get_ctx(sk);
790 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
791 struct sk_msg msg_redir = { };
792 struct sk_psock *psock;
793 struct sock *sk_redir;
799 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
800 psock = sk_psock_get(sk);
801 if (!psock || !policy) {
802 err = tls_push_record(sk, flags, record_type);
803 if (err && sk->sk_err == EBADMSG) {
804 *copied -= sk_msg_free(sk, msg);
805 tls_free_open_rec(sk);
809 sk_psock_put(sk, psock);
813 enospc = sk_msg_full(msg);
814 if (psock->eval == __SK_NONE) {
815 delta = msg->sg.size;
816 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
817 delta -= msg->sg.size;
819 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
820 !enospc && !full_record) {
826 if (msg->apply_bytes && msg->apply_bytes < send)
827 send = msg->apply_bytes;
829 switch (psock->eval) {
831 err = tls_push_record(sk, flags, record_type);
832 if (err && sk->sk_err == EBADMSG) {
833 *copied -= sk_msg_free(sk, msg);
834 tls_free_open_rec(sk);
840 sk_redir = psock->sk_redir;
841 memcpy(&msg_redir, msg, sizeof(*msg));
842 if (msg->apply_bytes < send)
843 msg->apply_bytes = 0;
845 msg->apply_bytes -= send;
846 sk_msg_return_zero(sk, msg, send);
847 msg->sg.size -= send;
849 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
852 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
855 if (msg->sg.size == 0)
856 tls_free_open_rec(sk);
860 sk_msg_free_partial(sk, msg, send);
861 if (msg->apply_bytes < send)
862 msg->apply_bytes = 0;
864 msg->apply_bytes -= send;
865 if (msg->sg.size == 0)
866 tls_free_open_rec(sk);
867 *copied -= (send + delta);
872 bool reset_eval = !ctx->open_rec;
876 msg = &rec->msg_plaintext;
877 if (!msg->apply_bytes)
881 psock->eval = __SK_NONE;
882 if (psock->sk_redir) {
883 sock_put(psock->sk_redir);
884 psock->sk_redir = NULL;
891 sk_psock_put(sk, psock);
895 static int tls_sw_push_pending_record(struct sock *sk, int flags)
897 struct tls_context *tls_ctx = tls_get_ctx(sk);
898 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
899 struct tls_rec *rec = ctx->open_rec;
900 struct sk_msg *msg_pl;
906 msg_pl = &rec->msg_plaintext;
907 copied = msg_pl->sg.size;
911 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
915 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
917 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
918 struct tls_context *tls_ctx = tls_get_ctx(sk);
919 struct tls_prot_info *prot = &tls_ctx->prot_info;
920 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
921 bool async_capable = ctx->async_capable;
922 unsigned char record_type = TLS_RECORD_TYPE_DATA;
923 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
924 bool eor = !(msg->msg_flags & MSG_MORE);
927 struct sk_msg *msg_pl, *msg_en;
938 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
942 mutex_lock(&tls_ctx->tx_lock);
945 if (unlikely(msg->msg_controllen)) {
946 ret = tls_process_cmsg(sk, msg, &record_type);
948 if (ret == -EINPROGRESS)
950 else if (ret != -EAGAIN)
955 while (msg_data_left(msg)) {
964 rec = ctx->open_rec = tls_get_rec(sk);
970 msg_pl = &rec->msg_plaintext;
971 msg_en = &rec->msg_encrypted;
973 orig_size = msg_pl->sg.size;
975 try_to_copy = msg_data_left(msg);
976 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
977 if (try_to_copy >= record_room) {
978 try_to_copy = record_room;
982 required_size = msg_pl->sg.size + try_to_copy +
985 if (!sk_stream_memory_free(sk))
986 goto wait_for_sndbuf;
989 ret = tls_alloc_encrypted_msg(sk, required_size);
992 goto wait_for_memory;
994 /* Adjust try_to_copy according to the amount that was
995 * actually allocated. The difference is due
996 * to max sg elements limit
998 try_to_copy -= required_size - msg_en->sg.size;
1002 if (!is_kvec && (full_record || eor) && !async_capable) {
1003 u32 first = msg_pl->sg.end;
1005 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1006 msg_pl, try_to_copy);
1008 goto fallback_to_reg_send;
1011 copied += try_to_copy;
1013 sk_msg_sg_copy_set(msg_pl, first);
1014 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1015 record_type, &copied,
1018 if (ret == -EINPROGRESS)
1020 else if (ret == -ENOMEM)
1021 goto wait_for_memory;
1022 else if (ctx->open_rec && ret == -ENOSPC)
1024 else if (ret != -EAGAIN)
1029 copied -= try_to_copy;
1030 sk_msg_sg_copy_clear(msg_pl, first);
1031 iov_iter_revert(&msg->msg_iter,
1032 msg_pl->sg.size - orig_size);
1033 fallback_to_reg_send:
1034 sk_msg_trim(sk, msg_pl, orig_size);
1037 required_size = msg_pl->sg.size + try_to_copy;
1039 ret = tls_clone_plaintext_msg(sk, required_size);
1044 /* Adjust try_to_copy according to the amount that was
1045 * actually allocated. The difference is due
1046 * to max sg elements limit
1048 try_to_copy -= required_size - msg_pl->sg.size;
1050 sk_msg_trim(sk, msg_en,
1051 msg_pl->sg.size + prot->overhead_size);
1055 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1056 msg_pl, try_to_copy);
1061 /* Open records defined only if successfully copied, otherwise
1062 * we would trim the sg but not reset the open record frags.
1064 tls_ctx->pending_open_record_frags = true;
1065 copied += try_to_copy;
1066 if (full_record || eor) {
1067 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1068 record_type, &copied,
1071 if (ret == -EINPROGRESS)
1073 else if (ret == -ENOMEM)
1074 goto wait_for_memory;
1075 else if (ret != -EAGAIN) {
1086 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1088 ret = sk_stream_wait_memory(sk, &timeo);
1092 tls_trim_both_msgs(sk, orig_size);
1096 if (ctx->open_rec && msg_en->sg.size < required_size)
1097 goto alloc_encrypted;
1102 } else if (num_zc) {
1103 /* Wait for pending encryptions to get completed */
1104 spin_lock_bh(&ctx->encrypt_compl_lock);
1105 ctx->async_notify = true;
1107 pending = atomic_read(&ctx->encrypt_pending);
1108 spin_unlock_bh(&ctx->encrypt_compl_lock);
1110 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1112 reinit_completion(&ctx->async_wait.completion);
1114 /* There can be no concurrent accesses, since we have no
1115 * pending encrypt operations
1117 WRITE_ONCE(ctx->async_notify, false);
1119 if (ctx->async_wait.err) {
1120 ret = ctx->async_wait.err;
1125 /* Transmit if any encryptions have completed */
1126 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1127 cancel_delayed_work(&ctx->tx_work.work);
1128 tls_tx_records(sk, msg->msg_flags);
1132 ret = sk_stream_error(sk, msg->msg_flags, ret);
1135 mutex_unlock(&tls_ctx->tx_lock);
1136 return copied > 0 ? copied : ret;
1139 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1140 int offset, size_t size, int flags)
1142 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1143 struct tls_context *tls_ctx = tls_get_ctx(sk);
1144 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1145 struct tls_prot_info *prot = &tls_ctx->prot_info;
1146 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1147 struct sk_msg *msg_pl;
1148 struct tls_rec *rec;
1156 eor = !(flags & MSG_SENDPAGE_NOTLAST);
1157 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1159 /* Call the sk_stream functions to manage the sndbuf mem. */
1161 size_t copy, required_size;
1169 rec = ctx->open_rec;
1171 rec = ctx->open_rec = tls_get_rec(sk);
1177 msg_pl = &rec->msg_plaintext;
1179 full_record = false;
1180 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1182 if (copy >= record_room) {
1187 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1189 if (!sk_stream_memory_free(sk))
1190 goto wait_for_sndbuf;
1192 ret = tls_alloc_encrypted_msg(sk, required_size);
1195 goto wait_for_memory;
1197 /* Adjust copy according to the amount that was
1198 * actually allocated. The difference is due
1199 * to max sg elements limit
1201 copy -= required_size - msg_pl->sg.size;
1205 sk_msg_page_add(msg_pl, page, copy, offset);
1206 sk_mem_charge(sk, copy);
1212 tls_ctx->pending_open_record_frags = true;
1213 if (full_record || eor || sk_msg_full(msg_pl)) {
1214 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1215 record_type, &copied, flags);
1217 if (ret == -EINPROGRESS)
1219 else if (ret == -ENOMEM)
1220 goto wait_for_memory;
1221 else if (ret != -EAGAIN) {
1230 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1232 ret = sk_stream_wait_memory(sk, &timeo);
1235 tls_trim_both_msgs(sk, msg_pl->sg.size);
1244 /* Transmit if any encryptions have completed */
1245 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1246 cancel_delayed_work(&ctx->tx_work.work);
1247 tls_tx_records(sk, flags);
1251 ret = sk_stream_error(sk, flags, ret);
1252 return copied > 0 ? copied : ret;
1255 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1256 int offset, size_t size, int flags)
1258 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1259 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1260 MSG_NO_SHARED_FRAGS))
1263 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1266 int tls_sw_sendpage(struct sock *sk, struct page *page,
1267 int offset, size_t size, int flags)
1269 struct tls_context *tls_ctx = tls_get_ctx(sk);
1272 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1273 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1276 mutex_lock(&tls_ctx->tx_lock);
1278 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1280 mutex_unlock(&tls_ctx->tx_lock);
1285 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1288 struct tls_context *tls_ctx = tls_get_ctx(sk);
1289 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1290 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1292 while (!ctx->recv_pkt) {
1293 if (!sk_psock_queue_empty(psock))
1297 return sock_error(sk);
1299 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1300 __strp_unpause(&ctx->strp);
1305 if (sk->sk_shutdown & RCV_SHUTDOWN)
1308 if (sock_flag(sk, SOCK_DONE))
1311 if (nonblock || !timeo)
1314 add_wait_queue(sk_sleep(sk), &wait);
1315 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1316 sk_wait_event(sk, &timeo,
1317 ctx->recv_pkt || !sk_psock_queue_empty(psock),
1319 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1320 remove_wait_queue(sk_sleep(sk), &wait);
1322 /* Handle signals */
1323 if (signal_pending(current))
1324 return sock_intr_errno(timeo);
1330 static int tls_setup_from_iter(struct iov_iter *from,
1331 int length, int *pages_used,
1332 struct scatterlist *to,
1335 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1336 struct page *pages[MAX_SKB_FRAGS];
1337 unsigned int size = 0;
1338 ssize_t copied, use;
1341 while (length > 0) {
1343 maxpages = to_max_pages - num_elem;
1344 if (maxpages == 0) {
1348 copied = iov_iter_get_pages(from, pages,
1356 iov_iter_advance(from, copied);
1361 use = min_t(int, copied, PAGE_SIZE - offset);
1363 sg_set_page(&to[num_elem],
1364 pages[i], use, offset);
1365 sg_unmark_end(&to[num_elem]);
1366 /* We do not uncharge memory from this API */
1375 /* Mark the end in the last sg entry if newly added */
1376 if (num_elem > *pages_used)
1377 sg_mark_end(&to[num_elem - 1]);
1380 iov_iter_revert(from, size);
1381 *pages_used = num_elem;
1386 static struct sk_buff *
1387 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1388 unsigned int full_len)
1390 struct strp_msg *clr_rxm;
1391 struct sk_buff *clr_skb;
1394 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1395 &err, sk->sk_allocation);
1399 skb_copy_header(clr_skb, skb);
1400 clr_skb->len = full_len;
1401 clr_skb->data_len = full_len;
1403 clr_rxm = strp_msg(clr_skb);
1404 clr_rxm->offset = 0;
1411 * tls_decrypt_sg() and tls_decrypt_device() are decrypt handlers.
1412 * They must transform the darg in/out argument are as follows:
1414 * -------------------------------------------------------------------
1415 * zc | Zero-copy decrypt allowed | Zero-copy performed
1416 * async | Async decrypt allowed | Async crypto used / in progress
1417 * skb | * | Output skb
1420 /* This function decrypts the input skb into either out_iov or in out_sg
1421 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1422 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1423 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1424 * NULL, then the decryption happens inside skb buffers itself, i.e.
1425 * zero-copy gets disabled and 'darg->zc' is updated.
1427 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1428 struct scatterlist *out_sg,
1429 struct tls_decrypt_arg *darg)
1431 struct tls_context *tls_ctx = tls_get_ctx(sk);
1432 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1433 struct tls_prot_info *prot = &tls_ctx->prot_info;
1434 int n_sgin, n_sgout, aead_size, err, pages = 0;
1435 struct sk_buff *skb = tls_strp_msg(ctx);
1436 const struct strp_msg *rxm = strp_msg(skb);
1437 const struct tls_msg *tlm = tls_msg(skb);
1438 struct aead_request *aead_req;
1439 struct scatterlist *sgin = NULL;
1440 struct scatterlist *sgout = NULL;
1441 const int data_len = rxm->full_len - prot->overhead_size;
1442 int tail_pages = !!prot->tail_size;
1443 struct tls_decrypt_ctx *dctx;
1444 struct sk_buff *clear_skb;
1448 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1449 rxm->full_len - prot->prepend_size);
1451 return n_sgin ?: -EBADMSG;
1453 if (darg->zc && (out_iov || out_sg)) {
1457 n_sgout = 1 + tail_pages +
1458 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1460 n_sgout = sg_nents(out_sg);
1464 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1468 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1471 /* Increment to accommodate AAD */
1472 n_sgin = n_sgin + 1;
1474 /* Allocate a single block of memory which contains
1475 * aead_req || tls_decrypt_ctx.
1476 * Both structs are variable length.
1478 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1479 mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
1486 /* Segment the allocated memory */
1487 aead_req = (struct aead_request *)mem;
1488 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1489 sgin = &dctx->sg[0];
1490 sgout = &dctx->sg[n_sgin];
1492 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1493 switch (prot->cipher_type) {
1494 case TLS_CIPHER_AES_CCM_128:
1495 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1498 case TLS_CIPHER_SM4_CCM:
1499 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1505 if (prot->version == TLS_1_3_VERSION ||
1506 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1507 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1508 prot->iv_size + prot->salt_size);
1510 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1511 &dctx->iv[iv_offset] + prot->salt_size,
1515 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1517 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1520 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1522 tls_ctx->rx.rec_seq, tlm->control, prot);
1525 sg_init_table(sgin, n_sgin);
1526 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1527 err = skb_to_sgvec(skb, &sgin[1],
1528 rxm->offset + prot->prepend_size,
1529 rxm->full_len - prot->prepend_size);
1534 sg_init_table(sgout, n_sgout);
1535 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1537 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1538 data_len + prot->tail_size);
1541 } else if (out_iov) {
1542 sg_init_table(sgout, n_sgout);
1543 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1545 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1546 (n_sgout - 1 - tail_pages));
1548 goto exit_free_pages;
1550 if (prot->tail_size) {
1551 sg_unmark_end(&sgout[pages]);
1552 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1554 sg_mark_end(&sgout[pages + 1]);
1556 } else if (out_sg) {
1557 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1560 /* Prepare and submit AEAD request */
1561 err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1562 data_len + prot->tail_size, aead_req, darg);
1564 goto exit_free_pages;
1566 darg->skb = clear_skb ?: tls_strp_msg(ctx);
1569 if (unlikely(darg->async)) {
1570 err = tls_strp_msg_hold(sk, skb, &ctx->async_hold);
1572 __skb_queue_tail(&ctx->async_hold, darg->skb);
1576 if (prot->tail_size)
1577 darg->tail = dctx->tail;
1580 /* Release the pages in case iov was mapped to pages */
1581 for (; pages > 0; pages--)
1582 put_page(sg_page(&sgout[pages]));
1586 consume_skb(clear_skb);
1591 tls_decrypt_device(struct sock *sk, struct tls_context *tls_ctx,
1592 struct tls_decrypt_arg *darg)
1594 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1597 if (tls_ctx->rx_conf != TLS_HW)
1600 err = tls_device_decrypted(sk, tls_ctx);
1605 darg->async = false;
1606 darg->skb = tls_strp_msg(ctx);
1607 ctx->recv_pkt = NULL;
1611 static int tls_rx_one_record(struct sock *sk, struct iov_iter *dest,
1612 struct tls_decrypt_arg *darg)
1614 struct tls_context *tls_ctx = tls_get_ctx(sk);
1615 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1616 struct tls_prot_info *prot = &tls_ctx->prot_info;
1617 struct strp_msg *rxm;
1620 err = tls_decrypt_device(sk, tls_ctx, darg);
1626 err = tls_decrypt_sg(sk, dest, NULL, darg);
1628 if (err == -EBADMSG)
1629 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1634 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1635 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1636 darg->tail != TLS_RECORD_TYPE_DATA)) {
1639 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1640 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1641 return tls_rx_one_record(sk, dest, darg);
1645 if (darg->skb == ctx->recv_pkt)
1646 ctx->recv_pkt = NULL;
1648 pad = tls_padding_length(prot, darg->skb, darg);
1650 consume_skb(darg->skb);
1654 rxm = strp_msg(darg->skb);
1655 rxm->full_len -= pad;
1656 rxm->offset += prot->prepend_size;
1657 rxm->full_len -= prot->overhead_size;
1658 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1663 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1665 struct tls_decrypt_arg darg = { .zc = true, };
1667 return tls_decrypt_sg(sk, NULL, sgout, &darg);
1670 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1676 *control = tlm->control;
1680 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1681 sizeof(*control), control);
1682 if (*control != TLS_RECORD_TYPE_DATA) {
1683 if (err || msg->msg_flags & MSG_CTRUNC)
1686 } else if (*control != tlm->control) {
1693 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1695 consume_skb(ctx->recv_pkt);
1696 ctx->recv_pkt = NULL;
1697 __strp_unpause(&ctx->strp);
1700 /* This function traverses the rx_list in tls receive context to copies the
1701 * decrypted records into the buffer provided by caller zero copy is not
1702 * true. Further, the records are removed from the rx_list if it is not a peek
1703 * case and the record has been consumed completely.
1705 static int process_rx_list(struct tls_sw_context_rx *ctx,
1712 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1713 struct tls_msg *tlm;
1717 while (skip && skb) {
1718 struct strp_msg *rxm = strp_msg(skb);
1721 err = tls_record_content_type(msg, tlm, control);
1725 if (skip < rxm->full_len)
1728 skip = skip - rxm->full_len;
1729 skb = skb_peek_next(skb, &ctx->rx_list);
1732 while (len && skb) {
1733 struct sk_buff *next_skb;
1734 struct strp_msg *rxm = strp_msg(skb);
1735 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1739 err = tls_record_content_type(msg, tlm, control);
1743 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1749 copied = copied + chunk;
1751 /* Consume the data from record if it is non-peek case*/
1753 rxm->offset = rxm->offset + chunk;
1754 rxm->full_len = rxm->full_len - chunk;
1756 /* Return if there is unconsumed data in the record */
1757 if (rxm->full_len - skip)
1761 /* The remaining skip-bytes must lie in 1st record in rx_list.
1762 * So from the 2nd record, 'skip' should be 0.
1767 msg->msg_flags |= MSG_EOR;
1769 next_skb = skb_peek_next(skb, &ctx->rx_list);
1772 __skb_unlink(skb, &ctx->rx_list);
1781 return copied ? : err;
1785 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1786 size_t len_left, size_t decrypted, ssize_t done,
1791 if (len_left <= decrypted)
1794 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1795 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1799 sk_flush_backlog(sk);
1802 static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
1809 timeo = sock_rcvtimeo(sk, nonblock);
1811 while (unlikely(ctx->reader_present)) {
1812 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1814 ctx->reader_contended = 1;
1816 add_wait_queue(&ctx->wq, &wait);
1817 sk_wait_event(sk, &timeo,
1818 !READ_ONCE(ctx->reader_present), &wait);
1819 remove_wait_queue(&ctx->wq, &wait);
1823 if (signal_pending(current))
1824 return sock_intr_errno(timeo);
1827 WRITE_ONCE(ctx->reader_present, 1);
1832 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
1834 if (unlikely(ctx->reader_contended)) {
1835 if (wq_has_sleeper(&ctx->wq))
1838 ctx->reader_contended = 0;
1840 WARN_ON_ONCE(!ctx->reader_present);
1843 WRITE_ONCE(ctx->reader_present, 0);
1847 int tls_sw_recvmsg(struct sock *sk,
1853 struct tls_context *tls_ctx = tls_get_ctx(sk);
1854 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1855 struct tls_prot_info *prot = &tls_ctx->prot_info;
1856 ssize_t decrypted = 0, async_copy_bytes = 0;
1857 struct sk_psock *psock;
1858 unsigned char control = 0;
1859 size_t flushed_at = 0;
1860 struct strp_msg *rxm;
1861 struct tls_msg *tlm;
1862 struct sk_buff *skb;
1865 int target, err = 0;
1867 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1868 bool is_peek = flags & MSG_PEEK;
1869 bool bpf_strp_enabled;
1872 if (unlikely(flags & MSG_ERRQUEUE))
1873 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1875 psock = sk_psock_get(sk);
1876 timeo = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
1879 bpf_strp_enabled = sk_psock_strp_enabled(psock);
1881 /* If crypto failed the connection is broken */
1882 err = ctx->async_wait.err;
1886 /* Process pending decrypted records. It must be non-zero-copy */
1887 err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
1895 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1898 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
1901 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1902 struct tls_decrypt_arg darg;
1903 int to_decrypt, chunk;
1905 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, timeo);
1908 chunk = sk_msg_recvmsg(sk, psock, msg, len,
1919 memset(&darg.inargs, 0, sizeof(darg.inargs));
1921 rxm = strp_msg(ctx->recv_pkt);
1922 tlm = tls_msg(ctx->recv_pkt);
1924 to_decrypt = rxm->full_len - prot->overhead_size;
1926 if (zc_capable && to_decrypt <= len &&
1927 tlm->control == TLS_RECORD_TYPE_DATA)
1930 /* Do not use async mode if record is non-data */
1931 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
1932 darg.async = ctx->async_capable;
1936 err = tls_rx_one_record(sk, &msg->msg_iter, &darg);
1938 tls_err_abort(sk, -EBADMSG);
1943 rxm = strp_msg(skb);
1946 async |= darg.async;
1948 /* If the type of records being processed is not known yet,
1949 * set it to record type just dequeued. If it is already known,
1950 * but does not match the record type just dequeued, go to end.
1951 * We always get record type here since for tls1.2, record type
1952 * is known just after record is dequeued from stream parser.
1953 * For tls1.3, we disable async.
1955 err = tls_record_content_type(msg, tlm, &control);
1957 tls_rx_rec_done(ctx);
1959 __skb_queue_tail(&ctx->rx_list, skb);
1963 /* periodically flush backlog, and feed strparser */
1964 tls_read_flush_backlog(sk, prot, len, to_decrypt,
1965 decrypted + copied, &flushed_at);
1967 /* TLS 1.3 may have updated the length by more than overhead */
1968 chunk = rxm->full_len;
1969 tls_rx_rec_done(ctx);
1972 bool partially_consumed = chunk > len;
1975 /* TLS 1.2-only, to_decrypt must be text len */
1976 chunk = min_t(int, to_decrypt, len);
1977 async_copy_bytes += chunk;
1981 __skb_queue_tail(&ctx->rx_list, skb);
1985 if (bpf_strp_enabled) {
1986 err = sk_psock_tls_strp_read(psock, skb);
1987 if (err != __SK_PASS) {
1988 rxm->offset = rxm->offset + rxm->full_len;
1990 if (err == __SK_DROP)
1996 if (partially_consumed)
1999 err = skb_copy_datagram_msg(skb, rxm->offset,
2002 goto put_on_rx_list_err;
2005 goto put_on_rx_list;
2007 if (partially_consumed) {
2008 rxm->offset += chunk;
2009 rxm->full_len -= chunk;
2010 goto put_on_rx_list;
2019 /* Return full control message to userspace before trying
2020 * to parse another message type
2022 msg->msg_flags |= MSG_EOR;
2023 if (control != TLS_RECORD_TYPE_DATA)
2031 /* Wait for all previously submitted records to be decrypted */
2032 spin_lock_bh(&ctx->decrypt_compl_lock);
2033 reinit_completion(&ctx->async_wait.completion);
2034 pending = atomic_read(&ctx->decrypt_pending);
2035 spin_unlock_bh(&ctx->decrypt_compl_lock);
2038 ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2039 __skb_queue_purge(&ctx->async_hold);
2042 if (err >= 0 || err == -EINPROGRESS)
2048 /* Drain records from the rx_list & copy if required */
2049 if (is_peek || is_kvec)
2050 err = process_rx_list(ctx, msg, &control, copied,
2051 decrypted, is_peek);
2053 err = process_rx_list(ctx, msg, &control, 0,
2054 async_copy_bytes, is_peek);
2055 decrypted = max(err, 0);
2058 copied += decrypted;
2061 tls_rx_reader_unlock(sk, ctx);
2063 sk_psock_put(sk, psock);
2064 return copied ? : err;
2067 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2068 struct pipe_inode_info *pipe,
2069 size_t len, unsigned int flags)
2071 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2072 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2073 struct strp_msg *rxm = NULL;
2074 struct sock *sk = sock->sk;
2075 struct tls_msg *tlm;
2076 struct sk_buff *skb;
2082 timeo = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2086 if (!skb_queue_empty(&ctx->rx_list)) {
2087 skb = __skb_dequeue(&ctx->rx_list);
2089 struct tls_decrypt_arg darg;
2091 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2094 goto splice_read_end;
2096 memset(&darg.inargs, 0, sizeof(darg.inargs));
2098 err = tls_rx_one_record(sk, NULL, &darg);
2100 tls_err_abort(sk, -EBADMSG);
2101 goto splice_read_end;
2104 tls_rx_rec_done(ctx);
2108 rxm = strp_msg(skb);
2111 /* splice does not support reading control messages */
2112 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2114 goto splice_requeue;
2117 chunk = min_t(unsigned int, rxm->full_len, len);
2118 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2120 goto splice_requeue;
2122 if (chunk < rxm->full_len) {
2124 rxm->full_len -= len;
2125 goto splice_requeue;
2131 tls_rx_reader_unlock(sk, ctx);
2132 return copied ? : err;
2135 __skb_queue_head(&ctx->rx_list, skb);
2136 goto splice_read_end;
2139 bool tls_sw_sock_is_readable(struct sock *sk)
2141 struct tls_context *tls_ctx = tls_get_ctx(sk);
2142 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2143 bool ingress_empty = true;
2144 struct sk_psock *psock;
2147 psock = sk_psock(sk);
2149 ingress_empty = list_empty(&psock->ingress_msg);
2152 return !ingress_empty || ctx->recv_pkt ||
2153 !skb_queue_empty(&ctx->rx_list);
2156 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2158 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2159 struct tls_prot_info *prot = &tls_ctx->prot_info;
2160 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2161 struct strp_msg *rxm = strp_msg(skb);
2162 struct tls_msg *tlm = tls_msg(skb);
2163 size_t cipher_overhead;
2164 size_t data_len = 0;
2167 /* Verify that we have a full TLS header, or wait for more data */
2168 if (rxm->offset + prot->prepend_size > skb->len)
2171 /* Sanity-check size of on-stack buffer. */
2172 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2177 /* Linearize header to local buffer */
2178 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2182 tlm->control = header[0];
2184 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2186 cipher_overhead = prot->tag_size;
2187 if (prot->version != TLS_1_3_VERSION &&
2188 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2189 cipher_overhead += prot->iv_size;
2191 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2196 if (data_len < cipher_overhead) {
2201 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2202 if (header[1] != TLS_1_2_VERSION_MINOR ||
2203 header[2] != TLS_1_2_VERSION_MAJOR) {
2208 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2209 TCP_SKB_CB(skb)->seq + rxm->offset);
2210 return data_len + TLS_HEADER_SIZE;
2213 tls_err_abort(strp->sk, ret);
2218 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2220 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2221 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2223 ctx->recv_pkt = skb;
2226 ctx->saved_data_ready(strp->sk);
2229 static void tls_data_ready(struct sock *sk)
2231 struct tls_context *tls_ctx = tls_get_ctx(sk);
2232 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2233 struct sk_psock *psock;
2235 strp_data_ready(&ctx->strp);
2237 psock = sk_psock_get(sk);
2239 if (!list_empty(&psock->ingress_msg))
2240 ctx->saved_data_ready(sk);
2241 sk_psock_put(sk, psock);
2245 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2247 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2249 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2250 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2251 cancel_delayed_work_sync(&ctx->tx_work.work);
2254 void tls_sw_release_resources_tx(struct sock *sk)
2256 struct tls_context *tls_ctx = tls_get_ctx(sk);
2257 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2258 struct tls_rec *rec, *tmp;
2261 /* Wait for any pending async encryptions to complete */
2262 spin_lock_bh(&ctx->encrypt_compl_lock);
2263 ctx->async_notify = true;
2264 pending = atomic_read(&ctx->encrypt_pending);
2265 spin_unlock_bh(&ctx->encrypt_compl_lock);
2268 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2270 tls_tx_records(sk, -1);
2272 /* Free up un-sent records in tx_list. First, free
2273 * the partially sent record if any at head of tx_list.
2275 if (tls_ctx->partially_sent_record) {
2276 tls_free_partial_record(sk, tls_ctx);
2277 rec = list_first_entry(&ctx->tx_list,
2278 struct tls_rec, list);
2279 list_del(&rec->list);
2280 sk_msg_free(sk, &rec->msg_plaintext);
2284 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2285 list_del(&rec->list);
2286 sk_msg_free(sk, &rec->msg_encrypted);
2287 sk_msg_free(sk, &rec->msg_plaintext);
2291 crypto_free_aead(ctx->aead_send);
2292 tls_free_open_rec(sk);
2295 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2297 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2302 void tls_sw_release_resources_rx(struct sock *sk)
2304 struct tls_context *tls_ctx = tls_get_ctx(sk);
2305 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2307 kfree(tls_ctx->rx.rec_seq);
2308 kfree(tls_ctx->rx.iv);
2310 if (ctx->aead_recv) {
2311 kfree_skb(ctx->recv_pkt);
2312 ctx->recv_pkt = NULL;
2313 __skb_queue_purge(&ctx->rx_list);
2314 crypto_free_aead(ctx->aead_recv);
2315 strp_stop(&ctx->strp);
2316 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2317 * we still want to strp_stop(), but sk->sk_data_ready was
2320 if (ctx->saved_data_ready) {
2321 write_lock_bh(&sk->sk_callback_lock);
2322 sk->sk_data_ready = ctx->saved_data_ready;
2323 write_unlock_bh(&sk->sk_callback_lock);
2328 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2330 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2332 strp_done(&ctx->strp);
2335 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2337 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2342 void tls_sw_free_resources_rx(struct sock *sk)
2344 struct tls_context *tls_ctx = tls_get_ctx(sk);
2346 tls_sw_release_resources_rx(sk);
2347 tls_sw_free_ctx_rx(tls_ctx);
2350 /* The work handler to transmitt the encrypted records in tx_list */
2351 static void tx_work_handler(struct work_struct *work)
2353 struct delayed_work *delayed_work = to_delayed_work(work);
2354 struct tx_work *tx_work = container_of(delayed_work,
2355 struct tx_work, work);
2356 struct sock *sk = tx_work->sk;
2357 struct tls_context *tls_ctx = tls_get_ctx(sk);
2358 struct tls_sw_context_tx *ctx;
2360 if (unlikely(!tls_ctx))
2363 ctx = tls_sw_ctx_tx(tls_ctx);
2364 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2367 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2369 mutex_lock(&tls_ctx->tx_lock);
2371 tls_tx_records(sk, -1);
2373 mutex_unlock(&tls_ctx->tx_lock);
2376 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2378 struct tls_rec *rec;
2380 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
2384 return READ_ONCE(rec->tx_ready);
2387 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2389 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2391 /* Schedule the transmission if tx list is ready */
2392 if (tls_is_tx_ready(tx_ctx) &&
2393 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2394 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2397 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2399 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2401 write_lock_bh(&sk->sk_callback_lock);
2402 rx_ctx->saved_data_ready = sk->sk_data_ready;
2403 sk->sk_data_ready = tls_data_ready;
2404 write_unlock_bh(&sk->sk_callback_lock);
2406 strp_check_rcv(&rx_ctx->strp);
2409 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2411 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2413 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2414 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2417 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2419 struct tls_context *tls_ctx = tls_get_ctx(sk);
2420 struct tls_prot_info *prot = &tls_ctx->prot_info;
2421 struct tls_crypto_info *crypto_info;
2422 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2423 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2424 struct cipher_context *cctx;
2425 struct crypto_aead **aead;
2426 struct strp_callbacks cb;
2427 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2428 struct crypto_tfm *tfm;
2429 char *iv, *rec_seq, *key, *salt, *cipher_name;
2439 if (!ctx->priv_ctx_tx) {
2440 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2445 ctx->priv_ctx_tx = sw_ctx_tx;
2448 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2451 if (!ctx->priv_ctx_rx) {
2452 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2457 ctx->priv_ctx_rx = sw_ctx_rx;
2460 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2465 crypto_init_wait(&sw_ctx_tx->async_wait);
2466 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2467 crypto_info = &ctx->crypto_send.info;
2469 aead = &sw_ctx_tx->aead_send;
2470 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2471 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2472 sw_ctx_tx->tx_work.sk = sk;
2474 crypto_init_wait(&sw_ctx_rx->async_wait);
2475 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2476 init_waitqueue_head(&sw_ctx_rx->wq);
2477 crypto_info = &ctx->crypto_recv.info;
2479 skb_queue_head_init(&sw_ctx_rx->rx_list);
2480 skb_queue_head_init(&sw_ctx_rx->async_hold);
2481 aead = &sw_ctx_rx->aead_recv;
2484 switch (crypto_info->cipher_type) {
2485 case TLS_CIPHER_AES_GCM_128: {
2486 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2488 gcm_128_info = (void *)crypto_info;
2489 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2490 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2491 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2492 iv = gcm_128_info->iv;
2493 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2494 rec_seq = gcm_128_info->rec_seq;
2495 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2496 key = gcm_128_info->key;
2497 salt = gcm_128_info->salt;
2498 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2499 cipher_name = "gcm(aes)";
2502 case TLS_CIPHER_AES_GCM_256: {
2503 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2505 gcm_256_info = (void *)crypto_info;
2506 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2507 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2508 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2509 iv = gcm_256_info->iv;
2510 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2511 rec_seq = gcm_256_info->rec_seq;
2512 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2513 key = gcm_256_info->key;
2514 salt = gcm_256_info->salt;
2515 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2516 cipher_name = "gcm(aes)";
2519 case TLS_CIPHER_AES_CCM_128: {
2520 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2522 ccm_128_info = (void *)crypto_info;
2523 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2524 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2525 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2526 iv = ccm_128_info->iv;
2527 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2528 rec_seq = ccm_128_info->rec_seq;
2529 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2530 key = ccm_128_info->key;
2531 salt = ccm_128_info->salt;
2532 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2533 cipher_name = "ccm(aes)";
2536 case TLS_CIPHER_CHACHA20_POLY1305: {
2537 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2539 chacha20_poly1305_info = (void *)crypto_info;
2541 tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2542 iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2543 iv = chacha20_poly1305_info->iv;
2544 rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2545 rec_seq = chacha20_poly1305_info->rec_seq;
2546 keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2547 key = chacha20_poly1305_info->key;
2548 salt = chacha20_poly1305_info->salt;
2549 salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2550 cipher_name = "rfc7539(chacha20,poly1305)";
2553 case TLS_CIPHER_SM4_GCM: {
2554 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2556 sm4_gcm_info = (void *)crypto_info;
2557 nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2558 tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2559 iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2560 iv = sm4_gcm_info->iv;
2561 rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2562 rec_seq = sm4_gcm_info->rec_seq;
2563 keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2564 key = sm4_gcm_info->key;
2565 salt = sm4_gcm_info->salt;
2566 salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2567 cipher_name = "gcm(sm4)";
2570 case TLS_CIPHER_SM4_CCM: {
2571 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2573 sm4_ccm_info = (void *)crypto_info;
2574 nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2575 tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2576 iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2577 iv = sm4_ccm_info->iv;
2578 rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2579 rec_seq = sm4_ccm_info->rec_seq;
2580 keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2581 key = sm4_ccm_info->key;
2582 salt = sm4_ccm_info->salt;
2583 salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2584 cipher_name = "ccm(sm4)";
2592 if (crypto_info->version == TLS_1_3_VERSION) {
2594 prot->aad_size = TLS_HEADER_SIZE;
2595 prot->tail_size = 1;
2597 prot->aad_size = TLS_AAD_SPACE_SIZE;
2598 prot->tail_size = 0;
2601 /* Sanity-check the sizes for stack allocations. */
2602 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2603 rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
2604 prot->aad_size > TLS_MAX_AAD_SIZE) {
2609 prot->version = crypto_info->version;
2610 prot->cipher_type = crypto_info->cipher_type;
2611 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2612 prot->tag_size = tag_size;
2613 prot->overhead_size = prot->prepend_size +
2614 prot->tag_size + prot->tail_size;
2615 prot->iv_size = iv_size;
2616 prot->salt_size = salt_size;
2617 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2622 /* Note: 128 & 256 bit salt are the same size */
2623 prot->rec_seq_size = rec_seq_size;
2624 memcpy(cctx->iv, salt, salt_size);
2625 memcpy(cctx->iv + salt_size, iv, iv_size);
2626 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2627 if (!cctx->rec_seq) {
2633 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2634 if (IS_ERR(*aead)) {
2635 rc = PTR_ERR(*aead);
2641 ctx->push_pending_record = tls_sw_push_pending_record;
2643 rc = crypto_aead_setkey(*aead, key, keysize);
2648 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2653 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2655 tls_update_rx_zc_capable(ctx);
2656 sw_ctx_rx->async_capable =
2657 crypto_info->version != TLS_1_3_VERSION &&
2658 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2660 /* Set up strparser */
2661 memset(&cb, 0, sizeof(cb));
2662 cb.rcv_msg = tls_queue;
2663 cb.parse_msg = tls_read_size;
2665 strp_init(&sw_ctx_rx->strp, sk, &cb);
2671 crypto_free_aead(*aead);
2674 kfree(cctx->rec_seq);
2675 cctx->rec_seq = NULL;
2681 kfree(ctx->priv_ctx_tx);
2682 ctx->priv_ctx_tx = NULL;
2684 kfree(ctx->priv_ctx_rx);
2685 ctx->priv_ctx_rx = NULL;