2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/splice.h>
42 #include <crypto/aead.h>
44 #include <net/strparser.h>
49 struct tls_decrypt_arg {
55 struct tls_decrypt_ctx {
57 u8 aad[TLS_MAX_AAD_SIZE];
59 struct scatterlist sg[];
62 noinline void tls_err_abort(struct sock *sk, int err)
64 WARN_ON_ONCE(err >= 0);
65 /* sk->sk_err should contain a positive error code. */
70 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
71 unsigned int recursion_level)
73 int start = skb_headlen(skb);
74 int i, chunk = start - offset;
75 struct sk_buff *frag_iter;
78 if (unlikely(recursion_level >= 24))
91 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
94 WARN_ON(start > offset + len);
96 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
110 if (unlikely(skb_has_frag_list(skb))) {
111 skb_walk_frags(skb, frag_iter) {
114 WARN_ON(start > offset + len);
116 end = start + frag_iter->len;
117 chunk = end - offset;
121 ret = __skb_nsg(frag_iter, offset - start, chunk,
122 recursion_level + 1);
123 if (unlikely(ret < 0))
138 /* Return the number of scatterlist elements required to completely map the
139 * skb, or -EMSGSIZE if the recursion depth is exceeded.
141 static int skb_nsg(struct sk_buff *skb, int offset, int len)
143 return __skb_nsg(skb, offset, len, 0);
146 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
147 struct tls_decrypt_arg *darg)
149 struct strp_msg *rxm = strp_msg(skb);
150 struct tls_msg *tlm = tls_msg(skb);
153 /* Determine zero-padding length */
154 if (prot->version == TLS_1_3_VERSION) {
155 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
156 char content_type = darg->zc ? darg->tail : 0;
159 while (content_type == 0) {
160 if (offset < prot->prepend_size)
162 err = skb_copy_bits(skb, rxm->offset + offset,
171 tlm->control = content_type;
176 static void tls_decrypt_done(struct crypto_async_request *req, int err)
178 struct aead_request *aead_req = (struct aead_request *)req;
179 struct scatterlist *sgout = aead_req->dst;
180 struct scatterlist *sgin = aead_req->src;
181 struct tls_sw_context_rx *ctx;
182 struct tls_context *tls_ctx;
183 struct tls_prot_info *prot;
184 struct scatterlist *sg;
188 skb = (struct sk_buff *)req->data;
189 tls_ctx = tls_get_ctx(skb->sk);
190 ctx = tls_sw_ctx_rx(tls_ctx);
191 prot = &tls_ctx->prot_info;
193 /* Propagate if there was an err */
196 TLS_INC_STATS(sock_net(skb->sk),
197 LINUX_MIB_TLSDECRYPTERROR);
198 ctx->async_wait.err = err;
199 tls_err_abort(skb->sk, err);
201 struct strp_msg *rxm = strp_msg(skb);
203 /* No TLS 1.3 support with async crypto */
204 WARN_ON(prot->tail_size);
206 rxm->offset += prot->prepend_size;
207 rxm->full_len -= prot->overhead_size;
210 /* After using skb->sk to propagate sk through crypto async callback
211 * we need to NULL it again.
216 /* Free the destination pages if skb was not decrypted inplace */
218 /* Skip the first S/G entry as it points to AAD */
219 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
222 put_page(sg_page(sg));
228 spin_lock_bh(&ctx->decrypt_compl_lock);
229 if (!atomic_dec_return(&ctx->decrypt_pending))
230 complete(&ctx->async_wait.completion);
231 spin_unlock_bh(&ctx->decrypt_compl_lock);
234 static int tls_do_decryption(struct sock *sk,
236 struct scatterlist *sgin,
237 struct scatterlist *sgout,
240 struct aead_request *aead_req,
241 struct tls_decrypt_arg *darg)
243 struct tls_context *tls_ctx = tls_get_ctx(sk);
244 struct tls_prot_info *prot = &tls_ctx->prot_info;
245 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
248 aead_request_set_tfm(aead_req, ctx->aead_recv);
249 aead_request_set_ad(aead_req, prot->aad_size);
250 aead_request_set_crypt(aead_req, sgin, sgout,
251 data_len + prot->tag_size,
255 /* Using skb->sk to push sk through to crypto async callback
256 * handler. This allows propagating errors up to the socket
257 * if needed. It _must_ be cleared in the async handler
258 * before consume_skb is called. We _know_ skb->sk is NULL
259 * because it is a clone from strparser.
262 aead_request_set_callback(aead_req,
263 CRYPTO_TFM_REQ_MAY_BACKLOG,
264 tls_decrypt_done, skb);
265 atomic_inc(&ctx->decrypt_pending);
267 aead_request_set_callback(aead_req,
268 CRYPTO_TFM_REQ_MAY_BACKLOG,
269 crypto_req_done, &ctx->async_wait);
272 ret = crypto_aead_decrypt(aead_req);
273 if (ret == -EINPROGRESS) {
277 ret = crypto_wait_req(ret, &ctx->async_wait);
284 static void tls_trim_both_msgs(struct sock *sk, int target_size)
286 struct tls_context *tls_ctx = tls_get_ctx(sk);
287 struct tls_prot_info *prot = &tls_ctx->prot_info;
288 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
289 struct tls_rec *rec = ctx->open_rec;
291 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
293 target_size += prot->overhead_size;
294 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
297 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
299 struct tls_context *tls_ctx = tls_get_ctx(sk);
300 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
301 struct tls_rec *rec = ctx->open_rec;
302 struct sk_msg *msg_en = &rec->msg_encrypted;
304 return sk_msg_alloc(sk, msg_en, len, 0);
307 static int tls_clone_plaintext_msg(struct sock *sk, int required)
309 struct tls_context *tls_ctx = tls_get_ctx(sk);
310 struct tls_prot_info *prot = &tls_ctx->prot_info;
311 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
312 struct tls_rec *rec = ctx->open_rec;
313 struct sk_msg *msg_pl = &rec->msg_plaintext;
314 struct sk_msg *msg_en = &rec->msg_encrypted;
317 /* We add page references worth len bytes from encrypted sg
318 * at the end of plaintext sg. It is guaranteed that msg_en
319 * has enough required room (ensured by caller).
321 len = required - msg_pl->sg.size;
323 /* Skip initial bytes in msg_en's data to be able to use
324 * same offset of both plain and encrypted data.
326 skip = prot->prepend_size + msg_pl->sg.size;
328 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
331 static struct tls_rec *tls_get_rec(struct sock *sk)
333 struct tls_context *tls_ctx = tls_get_ctx(sk);
334 struct tls_prot_info *prot = &tls_ctx->prot_info;
335 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
336 struct sk_msg *msg_pl, *msg_en;
340 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
342 rec = kzalloc(mem_size, sk->sk_allocation);
346 msg_pl = &rec->msg_plaintext;
347 msg_en = &rec->msg_encrypted;
352 sg_init_table(rec->sg_aead_in, 2);
353 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
354 sg_unmark_end(&rec->sg_aead_in[1]);
356 sg_init_table(rec->sg_aead_out, 2);
357 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
358 sg_unmark_end(&rec->sg_aead_out[1]);
363 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
365 sk_msg_free(sk, &rec->msg_encrypted);
366 sk_msg_free(sk, &rec->msg_plaintext);
370 static void tls_free_open_rec(struct sock *sk)
372 struct tls_context *tls_ctx = tls_get_ctx(sk);
373 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
374 struct tls_rec *rec = ctx->open_rec;
377 tls_free_rec(sk, rec);
378 ctx->open_rec = NULL;
382 int tls_tx_records(struct sock *sk, int flags)
384 struct tls_context *tls_ctx = tls_get_ctx(sk);
385 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
386 struct tls_rec *rec, *tmp;
387 struct sk_msg *msg_en;
388 int tx_flags, rc = 0;
390 if (tls_is_partially_sent_record(tls_ctx)) {
391 rec = list_first_entry(&ctx->tx_list,
392 struct tls_rec, list);
395 tx_flags = rec->tx_flags;
399 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
403 /* Full record has been transmitted.
404 * Remove the head of tx_list
406 list_del(&rec->list);
407 sk_msg_free(sk, &rec->msg_plaintext);
411 /* Tx all ready records */
412 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
413 if (READ_ONCE(rec->tx_ready)) {
415 tx_flags = rec->tx_flags;
419 msg_en = &rec->msg_encrypted;
420 rc = tls_push_sg(sk, tls_ctx,
421 &msg_en->sg.data[msg_en->sg.curr],
426 list_del(&rec->list);
427 sk_msg_free(sk, &rec->msg_plaintext);
435 if (rc < 0 && rc != -EAGAIN)
436 tls_err_abort(sk, -EBADMSG);
441 static void tls_encrypt_done(struct crypto_async_request *req, int err)
443 struct aead_request *aead_req = (struct aead_request *)req;
444 struct sock *sk = req->data;
445 struct tls_context *tls_ctx = tls_get_ctx(sk);
446 struct tls_prot_info *prot = &tls_ctx->prot_info;
447 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
448 struct scatterlist *sge;
449 struct sk_msg *msg_en;
454 rec = container_of(aead_req, struct tls_rec, aead_req);
455 msg_en = &rec->msg_encrypted;
457 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
458 sge->offset -= prot->prepend_size;
459 sge->length += prot->prepend_size;
461 /* Check if error is previously set on socket */
462 if (err || sk->sk_err) {
465 /* If err is already set on socket, return the same code */
467 ctx->async_wait.err = -sk->sk_err;
469 ctx->async_wait.err = err;
470 tls_err_abort(sk, err);
475 struct tls_rec *first_rec;
477 /* Mark the record as ready for transmission */
478 smp_store_mb(rec->tx_ready, true);
480 /* If received record is at head of tx_list, schedule tx */
481 first_rec = list_first_entry(&ctx->tx_list,
482 struct tls_rec, list);
483 if (rec == first_rec)
487 spin_lock_bh(&ctx->encrypt_compl_lock);
488 pending = atomic_dec_return(&ctx->encrypt_pending);
490 if (!pending && ctx->async_notify)
491 complete(&ctx->async_wait.completion);
492 spin_unlock_bh(&ctx->encrypt_compl_lock);
497 /* Schedule the transmission */
498 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
499 schedule_delayed_work(&ctx->tx_work.work, 1);
502 static int tls_do_encryption(struct sock *sk,
503 struct tls_context *tls_ctx,
504 struct tls_sw_context_tx *ctx,
505 struct aead_request *aead_req,
506 size_t data_len, u32 start)
508 struct tls_prot_info *prot = &tls_ctx->prot_info;
509 struct tls_rec *rec = ctx->open_rec;
510 struct sk_msg *msg_en = &rec->msg_encrypted;
511 struct scatterlist *sge = sk_msg_elem(msg_en, start);
512 int rc, iv_offset = 0;
514 /* For CCM based ciphers, first byte of IV is a constant */
515 switch (prot->cipher_type) {
516 case TLS_CIPHER_AES_CCM_128:
517 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
520 case TLS_CIPHER_SM4_CCM:
521 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
526 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
527 prot->iv_size + prot->salt_size);
529 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
530 tls_ctx->tx.rec_seq);
532 sge->offset += prot->prepend_size;
533 sge->length -= prot->prepend_size;
535 msg_en->sg.curr = start;
537 aead_request_set_tfm(aead_req, ctx->aead_send);
538 aead_request_set_ad(aead_req, prot->aad_size);
539 aead_request_set_crypt(aead_req, rec->sg_aead_in,
541 data_len, rec->iv_data);
543 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
544 tls_encrypt_done, sk);
546 /* Add the record in tx_list */
547 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
548 atomic_inc(&ctx->encrypt_pending);
550 rc = crypto_aead_encrypt(aead_req);
551 if (!rc || rc != -EINPROGRESS) {
552 atomic_dec(&ctx->encrypt_pending);
553 sge->offset -= prot->prepend_size;
554 sge->length += prot->prepend_size;
558 WRITE_ONCE(rec->tx_ready, true);
559 } else if (rc != -EINPROGRESS) {
560 list_del(&rec->list);
564 /* Unhook the record from context if encryption is not failure */
565 ctx->open_rec = NULL;
566 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
570 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
571 struct tls_rec **to, struct sk_msg *msg_opl,
572 struct sk_msg *msg_oen, u32 split_point,
573 u32 tx_overhead_size, u32 *orig_end)
575 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
576 struct scatterlist *sge, *osge, *nsge;
577 u32 orig_size = msg_opl->sg.size;
578 struct scatterlist tmp = { };
579 struct sk_msg *msg_npl;
583 new = tls_get_rec(sk);
586 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
587 tx_overhead_size, 0);
589 tls_free_rec(sk, new);
593 *orig_end = msg_opl->sg.end;
594 i = msg_opl->sg.start;
595 sge = sk_msg_elem(msg_opl, i);
596 while (apply && sge->length) {
597 if (sge->length > apply) {
598 u32 len = sge->length - apply;
600 get_page(sg_page(sge));
601 sg_set_page(&tmp, sg_page(sge), len,
602 sge->offset + apply);
607 apply -= sge->length;
608 bytes += sge->length;
611 sk_msg_iter_var_next(i);
612 if (i == msg_opl->sg.end)
614 sge = sk_msg_elem(msg_opl, i);
618 msg_opl->sg.curr = i;
619 msg_opl->sg.copybreak = 0;
620 msg_opl->apply_bytes = 0;
621 msg_opl->sg.size = bytes;
623 msg_npl = &new->msg_plaintext;
624 msg_npl->apply_bytes = apply;
625 msg_npl->sg.size = orig_size - bytes;
627 j = msg_npl->sg.start;
628 nsge = sk_msg_elem(msg_npl, j);
630 memcpy(nsge, &tmp, sizeof(*nsge));
631 sk_msg_iter_var_next(j);
632 nsge = sk_msg_elem(msg_npl, j);
635 osge = sk_msg_elem(msg_opl, i);
636 while (osge->length) {
637 memcpy(nsge, osge, sizeof(*nsge));
639 sk_msg_iter_var_next(i);
640 sk_msg_iter_var_next(j);
643 osge = sk_msg_elem(msg_opl, i);
644 nsge = sk_msg_elem(msg_npl, j);
648 msg_npl->sg.curr = j;
649 msg_npl->sg.copybreak = 0;
655 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
656 struct tls_rec *from, u32 orig_end)
658 struct sk_msg *msg_npl = &from->msg_plaintext;
659 struct sk_msg *msg_opl = &to->msg_plaintext;
660 struct scatterlist *osge, *nsge;
664 sk_msg_iter_var_prev(i);
665 j = msg_npl->sg.start;
667 osge = sk_msg_elem(msg_opl, i);
668 nsge = sk_msg_elem(msg_npl, j);
670 if (sg_page(osge) == sg_page(nsge) &&
671 osge->offset + osge->length == nsge->offset) {
672 osge->length += nsge->length;
673 put_page(sg_page(nsge));
676 msg_opl->sg.end = orig_end;
677 msg_opl->sg.curr = orig_end;
678 msg_opl->sg.copybreak = 0;
679 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
680 msg_opl->sg.size += msg_npl->sg.size;
682 sk_msg_free(sk, &to->msg_encrypted);
683 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
688 static int tls_push_record(struct sock *sk, int flags,
689 unsigned char record_type)
691 struct tls_context *tls_ctx = tls_get_ctx(sk);
692 struct tls_prot_info *prot = &tls_ctx->prot_info;
693 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
694 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
695 u32 i, split_point, orig_end;
696 struct sk_msg *msg_pl, *msg_en;
697 struct aead_request *req;
704 msg_pl = &rec->msg_plaintext;
705 msg_en = &rec->msg_encrypted;
707 split_point = msg_pl->apply_bytes;
708 split = split_point && split_point < msg_pl->sg.size;
709 if (unlikely((!split &&
711 prot->overhead_size > msg_en->sg.size) ||
714 prot->overhead_size > msg_en->sg.size))) {
716 split_point = msg_en->sg.size;
719 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
720 split_point, prot->overhead_size,
724 /* This can happen if above tls_split_open_record allocates
725 * a single large encryption buffer instead of two smaller
726 * ones. In this case adjust pointers and continue without
729 if (!msg_pl->sg.size) {
730 tls_merge_open_record(sk, rec, tmp, orig_end);
731 msg_pl = &rec->msg_plaintext;
732 msg_en = &rec->msg_encrypted;
735 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
736 prot->overhead_size);
739 rec->tx_flags = flags;
740 req = &rec->aead_req;
743 sk_msg_iter_var_prev(i);
745 rec->content_type = record_type;
746 if (prot->version == TLS_1_3_VERSION) {
747 /* Add content type to end of message. No padding added */
748 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
749 sg_mark_end(&rec->sg_content_type);
750 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
751 &rec->sg_content_type);
753 sg_mark_end(sk_msg_elem(msg_pl, i));
756 if (msg_pl->sg.end < msg_pl->sg.start) {
757 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
758 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
762 i = msg_pl->sg.start;
763 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
766 sk_msg_iter_var_prev(i);
767 sg_mark_end(sk_msg_elem(msg_en, i));
769 i = msg_en->sg.start;
770 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
772 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
773 tls_ctx->tx.rec_seq, record_type, prot);
775 tls_fill_prepend(tls_ctx,
776 page_address(sg_page(&msg_en->sg.data[i])) +
777 msg_en->sg.data[i].offset,
778 msg_pl->sg.size + prot->tail_size,
781 tls_ctx->pending_open_record_frags = false;
783 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
784 msg_pl->sg.size + prot->tail_size, i);
786 if (rc != -EINPROGRESS) {
787 tls_err_abort(sk, -EBADMSG);
789 tls_ctx->pending_open_record_frags = true;
790 tls_merge_open_record(sk, rec, tmp, orig_end);
793 ctx->async_capable = 1;
796 msg_pl = &tmp->msg_plaintext;
797 msg_en = &tmp->msg_encrypted;
798 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
799 tls_ctx->pending_open_record_frags = true;
803 return tls_tx_records(sk, flags);
806 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
807 bool full_record, u8 record_type,
808 ssize_t *copied, int flags)
810 struct tls_context *tls_ctx = tls_get_ctx(sk);
811 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
812 struct sk_msg msg_redir = { };
813 struct sk_psock *psock;
814 struct sock *sk_redir;
820 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
821 psock = sk_psock_get(sk);
822 if (!psock || !policy) {
823 err = tls_push_record(sk, flags, record_type);
824 if (err && sk->sk_err == EBADMSG) {
825 *copied -= sk_msg_free(sk, msg);
826 tls_free_open_rec(sk);
830 sk_psock_put(sk, psock);
834 enospc = sk_msg_full(msg);
835 if (psock->eval == __SK_NONE) {
836 delta = msg->sg.size;
837 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
838 delta -= msg->sg.size;
840 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
841 !enospc && !full_record) {
847 if (msg->apply_bytes && msg->apply_bytes < send)
848 send = msg->apply_bytes;
850 switch (psock->eval) {
852 err = tls_push_record(sk, flags, record_type);
853 if (err && sk->sk_err == EBADMSG) {
854 *copied -= sk_msg_free(sk, msg);
855 tls_free_open_rec(sk);
861 sk_redir = psock->sk_redir;
862 memcpy(&msg_redir, msg, sizeof(*msg));
863 if (msg->apply_bytes < send)
864 msg->apply_bytes = 0;
866 msg->apply_bytes -= send;
867 sk_msg_return_zero(sk, msg, send);
868 msg->sg.size -= send;
870 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
873 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
876 if (msg->sg.size == 0)
877 tls_free_open_rec(sk);
881 sk_msg_free_partial(sk, msg, send);
882 if (msg->apply_bytes < send)
883 msg->apply_bytes = 0;
885 msg->apply_bytes -= send;
886 if (msg->sg.size == 0)
887 tls_free_open_rec(sk);
888 *copied -= (send + delta);
893 bool reset_eval = !ctx->open_rec;
897 msg = &rec->msg_plaintext;
898 if (!msg->apply_bytes)
902 psock->eval = __SK_NONE;
903 if (psock->sk_redir) {
904 sock_put(psock->sk_redir);
905 psock->sk_redir = NULL;
912 sk_psock_put(sk, psock);
916 static int tls_sw_push_pending_record(struct sock *sk, int flags)
918 struct tls_context *tls_ctx = tls_get_ctx(sk);
919 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
920 struct tls_rec *rec = ctx->open_rec;
921 struct sk_msg *msg_pl;
927 msg_pl = &rec->msg_plaintext;
928 copied = msg_pl->sg.size;
932 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
936 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
938 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
939 struct tls_context *tls_ctx = tls_get_ctx(sk);
940 struct tls_prot_info *prot = &tls_ctx->prot_info;
941 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
942 bool async_capable = ctx->async_capable;
943 unsigned char record_type = TLS_RECORD_TYPE_DATA;
944 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
945 bool eor = !(msg->msg_flags & MSG_MORE);
948 struct sk_msg *msg_pl, *msg_en;
959 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
963 mutex_lock(&tls_ctx->tx_lock);
966 if (unlikely(msg->msg_controllen)) {
967 ret = tls_process_cmsg(sk, msg, &record_type);
969 if (ret == -EINPROGRESS)
971 else if (ret != -EAGAIN)
976 while (msg_data_left(msg)) {
985 rec = ctx->open_rec = tls_get_rec(sk);
991 msg_pl = &rec->msg_plaintext;
992 msg_en = &rec->msg_encrypted;
994 orig_size = msg_pl->sg.size;
996 try_to_copy = msg_data_left(msg);
997 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
998 if (try_to_copy >= record_room) {
999 try_to_copy = record_room;
1003 required_size = msg_pl->sg.size + try_to_copy +
1004 prot->overhead_size;
1006 if (!sk_stream_memory_free(sk))
1007 goto wait_for_sndbuf;
1010 ret = tls_alloc_encrypted_msg(sk, required_size);
1013 goto wait_for_memory;
1015 /* Adjust try_to_copy according to the amount that was
1016 * actually allocated. The difference is due
1017 * to max sg elements limit
1019 try_to_copy -= required_size - msg_en->sg.size;
1023 if (!is_kvec && (full_record || eor) && !async_capable) {
1024 u32 first = msg_pl->sg.end;
1026 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1027 msg_pl, try_to_copy);
1029 goto fallback_to_reg_send;
1032 copied += try_to_copy;
1034 sk_msg_sg_copy_set(msg_pl, first);
1035 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1036 record_type, &copied,
1039 if (ret == -EINPROGRESS)
1041 else if (ret == -ENOMEM)
1042 goto wait_for_memory;
1043 else if (ctx->open_rec && ret == -ENOSPC)
1045 else if (ret != -EAGAIN)
1050 copied -= try_to_copy;
1051 sk_msg_sg_copy_clear(msg_pl, first);
1052 iov_iter_revert(&msg->msg_iter,
1053 msg_pl->sg.size - orig_size);
1054 fallback_to_reg_send:
1055 sk_msg_trim(sk, msg_pl, orig_size);
1058 required_size = msg_pl->sg.size + try_to_copy;
1060 ret = tls_clone_plaintext_msg(sk, required_size);
1065 /* Adjust try_to_copy according to the amount that was
1066 * actually allocated. The difference is due
1067 * to max sg elements limit
1069 try_to_copy -= required_size - msg_pl->sg.size;
1071 sk_msg_trim(sk, msg_en,
1072 msg_pl->sg.size + prot->overhead_size);
1076 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1077 msg_pl, try_to_copy);
1082 /* Open records defined only if successfully copied, otherwise
1083 * we would trim the sg but not reset the open record frags.
1085 tls_ctx->pending_open_record_frags = true;
1086 copied += try_to_copy;
1087 if (full_record || eor) {
1088 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1089 record_type, &copied,
1092 if (ret == -EINPROGRESS)
1094 else if (ret == -ENOMEM)
1095 goto wait_for_memory;
1096 else if (ret != -EAGAIN) {
1107 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1109 ret = sk_stream_wait_memory(sk, &timeo);
1113 tls_trim_both_msgs(sk, orig_size);
1117 if (ctx->open_rec && msg_en->sg.size < required_size)
1118 goto alloc_encrypted;
1123 } else if (num_zc) {
1124 /* Wait for pending encryptions to get completed */
1125 spin_lock_bh(&ctx->encrypt_compl_lock);
1126 ctx->async_notify = true;
1128 pending = atomic_read(&ctx->encrypt_pending);
1129 spin_unlock_bh(&ctx->encrypt_compl_lock);
1131 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1133 reinit_completion(&ctx->async_wait.completion);
1135 /* There can be no concurrent accesses, since we have no
1136 * pending encrypt operations
1138 WRITE_ONCE(ctx->async_notify, false);
1140 if (ctx->async_wait.err) {
1141 ret = ctx->async_wait.err;
1146 /* Transmit if any encryptions have completed */
1147 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1148 cancel_delayed_work(&ctx->tx_work.work);
1149 tls_tx_records(sk, msg->msg_flags);
1153 ret = sk_stream_error(sk, msg->msg_flags, ret);
1156 mutex_unlock(&tls_ctx->tx_lock);
1157 return copied > 0 ? copied : ret;
1160 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1161 int offset, size_t size, int flags)
1163 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1164 struct tls_context *tls_ctx = tls_get_ctx(sk);
1165 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1166 struct tls_prot_info *prot = &tls_ctx->prot_info;
1167 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1168 struct sk_msg *msg_pl;
1169 struct tls_rec *rec;
1177 eor = !(flags & MSG_SENDPAGE_NOTLAST);
1178 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1180 /* Call the sk_stream functions to manage the sndbuf mem. */
1182 size_t copy, required_size;
1190 rec = ctx->open_rec;
1192 rec = ctx->open_rec = tls_get_rec(sk);
1198 msg_pl = &rec->msg_plaintext;
1200 full_record = false;
1201 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1203 if (copy >= record_room) {
1208 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1210 if (!sk_stream_memory_free(sk))
1211 goto wait_for_sndbuf;
1213 ret = tls_alloc_encrypted_msg(sk, required_size);
1216 goto wait_for_memory;
1218 /* Adjust copy according to the amount that was
1219 * actually allocated. The difference is due
1220 * to max sg elements limit
1222 copy -= required_size - msg_pl->sg.size;
1226 sk_msg_page_add(msg_pl, page, copy, offset);
1227 sk_mem_charge(sk, copy);
1233 tls_ctx->pending_open_record_frags = true;
1234 if (full_record || eor || sk_msg_full(msg_pl)) {
1235 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1236 record_type, &copied, flags);
1238 if (ret == -EINPROGRESS)
1240 else if (ret == -ENOMEM)
1241 goto wait_for_memory;
1242 else if (ret != -EAGAIN) {
1251 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1253 ret = sk_stream_wait_memory(sk, &timeo);
1256 tls_trim_both_msgs(sk, msg_pl->sg.size);
1265 /* Transmit if any encryptions have completed */
1266 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1267 cancel_delayed_work(&ctx->tx_work.work);
1268 tls_tx_records(sk, flags);
1272 ret = sk_stream_error(sk, flags, ret);
1273 return copied > 0 ? copied : ret;
1276 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1277 int offset, size_t size, int flags)
1279 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1280 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1281 MSG_NO_SHARED_FRAGS))
1284 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1287 int tls_sw_sendpage(struct sock *sk, struct page *page,
1288 int offset, size_t size, int flags)
1290 struct tls_context *tls_ctx = tls_get_ctx(sk);
1293 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1294 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1297 mutex_lock(&tls_ctx->tx_lock);
1299 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1301 mutex_unlock(&tls_ctx->tx_lock);
1306 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1309 struct tls_context *tls_ctx = tls_get_ctx(sk);
1310 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1311 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1313 while (!ctx->recv_pkt) {
1314 if (!sk_psock_queue_empty(psock))
1318 return sock_error(sk);
1320 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1321 __strp_unpause(&ctx->strp);
1326 if (sk->sk_shutdown & RCV_SHUTDOWN)
1329 if (sock_flag(sk, SOCK_DONE))
1332 if (nonblock || !timeo)
1335 add_wait_queue(sk_sleep(sk), &wait);
1336 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1337 sk_wait_event(sk, &timeo,
1338 ctx->recv_pkt || !sk_psock_queue_empty(psock),
1340 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1341 remove_wait_queue(sk_sleep(sk), &wait);
1343 /* Handle signals */
1344 if (signal_pending(current))
1345 return sock_intr_errno(timeo);
1351 static int tls_setup_from_iter(struct iov_iter *from,
1352 int length, int *pages_used,
1353 struct scatterlist *to,
1356 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1357 struct page *pages[MAX_SKB_FRAGS];
1358 unsigned int size = 0;
1359 ssize_t copied, use;
1362 while (length > 0) {
1364 maxpages = to_max_pages - num_elem;
1365 if (maxpages == 0) {
1369 copied = iov_iter_get_pages(from, pages,
1377 iov_iter_advance(from, copied);
1382 use = min_t(int, copied, PAGE_SIZE - offset);
1384 sg_set_page(&to[num_elem],
1385 pages[i], use, offset);
1386 sg_unmark_end(&to[num_elem]);
1387 /* We do not uncharge memory from this API */
1396 /* Mark the end in the last sg entry if newly added */
1397 if (num_elem > *pages_used)
1398 sg_mark_end(&to[num_elem - 1]);
1401 iov_iter_revert(from, size);
1402 *pages_used = num_elem;
1407 /* This function decrypts the input skb into either out_iov or in out_sg
1408 * or in skb buffers itself. The input parameter 'zc' indicates if
1409 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1410 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1411 * NULL, then the decryption happens inside skb buffers itself, i.e.
1412 * zero-copy gets disabled and 'zc' is updated.
1415 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1416 struct iov_iter *out_iov,
1417 struct scatterlist *out_sg,
1418 struct tls_decrypt_arg *darg)
1420 struct tls_context *tls_ctx = tls_get_ctx(sk);
1421 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1422 struct tls_prot_info *prot = &tls_ctx->prot_info;
1423 int n_sgin, n_sgout, aead_size, err, pages = 0;
1424 struct strp_msg *rxm = strp_msg(skb);
1425 struct tls_msg *tlm = tls_msg(skb);
1426 struct aead_request *aead_req;
1427 struct sk_buff *unused;
1428 struct scatterlist *sgin = NULL;
1429 struct scatterlist *sgout = NULL;
1430 const int data_len = rxm->full_len - prot->overhead_size;
1431 int tail_pages = !!prot->tail_size;
1432 struct tls_decrypt_ctx *dctx;
1436 if (darg->zc && (out_iov || out_sg)) {
1438 n_sgout = 1 + tail_pages +
1439 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1441 n_sgout = sg_nents(out_sg);
1442 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1443 rxm->full_len - prot->prepend_size);
1447 n_sgin = skb_cow_data(skb, 0, &unused);
1453 /* Increment to accommodate AAD */
1454 n_sgin = n_sgin + 1;
1456 /* Allocate a single block of memory which contains
1457 * aead_req || tls_decrypt_ctx.
1458 * Both structs are variable length.
1460 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1461 mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
1466 /* Segment the allocated memory */
1467 aead_req = (struct aead_request *)mem;
1468 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1469 sgin = &dctx->sg[0];
1470 sgout = &dctx->sg[n_sgin];
1472 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1473 switch (prot->cipher_type) {
1474 case TLS_CIPHER_AES_CCM_128:
1475 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1478 case TLS_CIPHER_SM4_CCM:
1479 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1485 if (prot->version == TLS_1_3_VERSION ||
1486 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1487 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1488 prot->iv_size + prot->salt_size);
1490 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1491 &dctx->iv[iv_offset] + prot->salt_size,
1495 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1497 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1500 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1502 tls_ctx->rx.rec_seq, tlm->control, prot);
1505 sg_init_table(sgin, n_sgin);
1506 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1507 err = skb_to_sgvec(skb, &sgin[1],
1508 rxm->offset + prot->prepend_size,
1509 rxm->full_len - prot->prepend_size);
1515 sg_init_table(sgout, n_sgout);
1516 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1518 err = tls_setup_from_iter(out_iov, data_len,
1520 (n_sgout - 1 - tail_pages));
1522 goto fallback_to_reg_recv;
1524 if (prot->tail_size) {
1525 sg_unmark_end(&sgout[pages]);
1526 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1528 sg_mark_end(&sgout[pages + 1]);
1530 } else if (out_sg) {
1531 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1533 goto fallback_to_reg_recv;
1536 fallback_to_reg_recv:
1542 /* Prepare and submit AEAD request */
1543 err = tls_do_decryption(sk, skb, sgin, sgout, dctx->iv,
1544 data_len + prot->tail_size, aead_req, darg);
1548 if (prot->tail_size)
1549 darg->tail = dctx->tail;
1551 /* Release the pages in case iov was mapped to pages */
1552 for (; pages > 0; pages--)
1553 put_page(sg_page(&sgout[pages]));
1559 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1560 struct iov_iter *dest,
1561 struct tls_decrypt_arg *darg)
1563 struct tls_context *tls_ctx = tls_get_ctx(sk);
1564 struct tls_prot_info *prot = &tls_ctx->prot_info;
1565 struct strp_msg *rxm = strp_msg(skb);
1566 struct tls_msg *tlm = tls_msg(skb);
1569 if (tlm->decrypted) {
1571 darg->async = false;
1575 if (tls_ctx->rx_conf == TLS_HW) {
1576 err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
1582 darg->async = false;
1587 err = decrypt_internal(sk, skb, dest, NULL, darg);
1589 if (err == -EBADMSG)
1590 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1595 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1596 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1597 darg->tail != TLS_RECORD_TYPE_DATA)) {
1600 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1601 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1602 return decrypt_skb_update(sk, skb, dest, darg);
1606 pad = tls_padding_length(prot, skb, darg);
1610 rxm->full_len -= pad;
1611 rxm->offset += prot->prepend_size;
1612 rxm->full_len -= prot->overhead_size;
1615 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1620 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1621 struct scatterlist *sgout)
1623 struct tls_decrypt_arg darg = { .zc = true, };
1625 return decrypt_internal(sk, skb, NULL, sgout, &darg);
1628 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1634 *control = tlm->control;
1638 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1639 sizeof(*control), control);
1640 if (*control != TLS_RECORD_TYPE_DATA) {
1641 if (err || msg->msg_flags & MSG_CTRUNC)
1644 } else if (*control != tlm->control) {
1651 /* This function traverses the rx_list in tls receive context to copies the
1652 * decrypted records into the buffer provided by caller zero copy is not
1653 * true. Further, the records are removed from the rx_list if it is not a peek
1654 * case and the record has been consumed completely.
1656 static int process_rx_list(struct tls_sw_context_rx *ctx,
1664 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1665 struct tls_msg *tlm;
1669 while (skip && skb) {
1670 struct strp_msg *rxm = strp_msg(skb);
1673 err = tls_record_content_type(msg, tlm, control);
1677 if (skip < rxm->full_len)
1680 skip = skip - rxm->full_len;
1681 skb = skb_peek_next(skb, &ctx->rx_list);
1684 while (len && skb) {
1685 struct sk_buff *next_skb;
1686 struct strp_msg *rxm = strp_msg(skb);
1687 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1691 err = tls_record_content_type(msg, tlm, control);
1695 if (!zc || (rxm->full_len - skip) > len) {
1696 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1703 copied = copied + chunk;
1705 /* Consume the data from record if it is non-peek case*/
1707 rxm->offset = rxm->offset + chunk;
1708 rxm->full_len = rxm->full_len - chunk;
1710 /* Return if there is unconsumed data in the record */
1711 if (rxm->full_len - skip)
1715 /* The remaining skip-bytes must lie in 1st record in rx_list.
1716 * So from the 2nd record, 'skip' should be 0.
1721 msg->msg_flags |= MSG_EOR;
1723 next_skb = skb_peek_next(skb, &ctx->rx_list);
1726 __skb_unlink(skb, &ctx->rx_list);
1735 return copied ? : err;
1739 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1740 size_t len_left, size_t decrypted, ssize_t done,
1745 if (len_left <= decrypted)
1748 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1749 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1753 sk_flush_backlog(sk);
1756 int tls_sw_recvmsg(struct sock *sk,
1762 struct tls_context *tls_ctx = tls_get_ctx(sk);
1763 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1764 struct tls_prot_info *prot = &tls_ctx->prot_info;
1765 struct sk_psock *psock;
1766 unsigned char control = 0;
1767 ssize_t decrypted = 0;
1768 size_t flushed_at = 0;
1769 struct strp_msg *rxm;
1770 struct tls_msg *tlm;
1771 struct sk_buff *skb;
1774 int target, err = 0;
1776 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1777 bool is_peek = flags & MSG_PEEK;
1778 bool bpf_strp_enabled;
1781 if (unlikely(flags & MSG_ERRQUEUE))
1782 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1784 psock = sk_psock_get(sk);
1786 bpf_strp_enabled = sk_psock_strp_enabled(psock);
1788 /* If crypto failed the connection is broken */
1789 err = ctx->async_wait.err;
1793 /* Process pending decrypted records. It must be non-zero-copy */
1794 err = process_rx_list(ctx, msg, &control, 0, len, false, is_peek);
1802 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1804 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1806 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
1809 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1810 struct tls_decrypt_arg darg = {};
1811 int to_decrypt, chunk;
1813 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, timeo);
1816 chunk = sk_msg_recvmsg(sk, psock, msg, len,
1824 skb = ctx->recv_pkt;
1825 rxm = strp_msg(skb);
1828 to_decrypt = rxm->full_len - prot->overhead_size;
1830 if (zc_capable && to_decrypt <= len &&
1831 tlm->control == TLS_RECORD_TYPE_DATA)
1834 /* Do not use async mode if record is non-data */
1835 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
1836 darg.async = ctx->async_capable;
1840 err = decrypt_skb_update(sk, skb, &msg->msg_iter, &darg);
1842 tls_err_abort(sk, -EBADMSG);
1846 async |= darg.async;
1848 /* If the type of records being processed is not known yet,
1849 * set it to record type just dequeued. If it is already known,
1850 * but does not match the record type just dequeued, go to end.
1851 * We always get record type here since for tls1.2, record type
1852 * is known just after record is dequeued from stream parser.
1853 * For tls1.3, we disable async.
1855 err = tls_record_content_type(msg, tlm, &control);
1859 /* periodically flush backlog, and feed strparser */
1860 tls_read_flush_backlog(sk, prot, len, to_decrypt,
1861 decrypted + copied, &flushed_at);
1863 ctx->recv_pkt = NULL;
1864 __strp_unpause(&ctx->strp);
1865 __skb_queue_tail(&ctx->rx_list, skb);
1868 /* TLS 1.2-only, to_decrypt must be text length */
1869 chunk = min_t(int, to_decrypt, len);
1875 /* TLS 1.3 may have updated the length by more than overhead */
1876 chunk = rxm->full_len;
1879 bool partially_consumed = chunk > len;
1881 if (bpf_strp_enabled) {
1882 /* BPF may try to queue the skb */
1883 __skb_unlink(skb, &ctx->rx_list);
1884 err = sk_psock_tls_strp_read(psock, skb);
1885 if (err != __SK_PASS) {
1886 rxm->offset = rxm->offset + rxm->full_len;
1888 if (err == __SK_DROP)
1892 __skb_queue_tail(&ctx->rx_list, skb);
1895 if (partially_consumed)
1898 err = skb_copy_datagram_msg(skb, rxm->offset,
1906 if (partially_consumed) {
1907 rxm->offset += chunk;
1908 rxm->full_len -= chunk;
1916 __skb_unlink(skb, &ctx->rx_list);
1919 /* Return full control message to userspace before trying
1920 * to parse another message type
1922 msg->msg_flags |= MSG_EOR;
1923 if (control != TLS_RECORD_TYPE_DATA)
1931 /* Wait for all previously submitted records to be decrypted */
1932 spin_lock_bh(&ctx->decrypt_compl_lock);
1933 reinit_completion(&ctx->async_wait.completion);
1934 pending = atomic_read(&ctx->decrypt_pending);
1935 spin_unlock_bh(&ctx->decrypt_compl_lock);
1937 ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1939 if (err >= 0 || err == -EINPROGRESS)
1946 /* Drain records from the rx_list & copy if required */
1947 if (is_peek || is_kvec)
1948 err = process_rx_list(ctx, msg, &control, copied,
1949 decrypted, false, is_peek);
1951 err = process_rx_list(ctx, msg, &control, 0,
1952 decrypted, true, is_peek);
1953 decrypted = max(err, 0);
1956 copied += decrypted;
1961 sk_psock_put(sk, psock);
1962 return copied ? : err;
1965 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1966 struct pipe_inode_info *pipe,
1967 size_t len, unsigned int flags)
1969 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1970 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1971 struct strp_msg *rxm = NULL;
1972 struct sock *sk = sock->sk;
1973 struct tls_msg *tlm;
1974 struct sk_buff *skb;
1983 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
1985 from_queue = !skb_queue_empty(&ctx->rx_list);
1987 skb = __skb_dequeue(&ctx->rx_list);
1989 struct tls_decrypt_arg darg = {};
1991 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
1994 goto splice_read_end;
1996 skb = ctx->recv_pkt;
1998 err = decrypt_skb_update(sk, skb, NULL, &darg);
2000 tls_err_abort(sk, -EBADMSG);
2001 goto splice_read_end;
2005 rxm = strp_msg(skb);
2008 /* splice does not support reading control messages */
2009 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2011 goto splice_read_end;
2014 chunk = min_t(unsigned int, rxm->full_len, len);
2015 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2017 goto splice_read_end;
2020 ctx->recv_pkt = NULL;
2021 __strp_unpause(&ctx->strp);
2023 if (chunk < rxm->full_len) {
2024 __skb_queue_head(&ctx->rx_list, skb);
2026 rxm->full_len -= len;
2033 return copied ? : err;
2036 bool tls_sw_sock_is_readable(struct sock *sk)
2038 struct tls_context *tls_ctx = tls_get_ctx(sk);
2039 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2040 bool ingress_empty = true;
2041 struct sk_psock *psock;
2044 psock = sk_psock(sk);
2046 ingress_empty = list_empty(&psock->ingress_msg);
2049 return !ingress_empty || ctx->recv_pkt ||
2050 !skb_queue_empty(&ctx->rx_list);
2053 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2055 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2056 struct tls_prot_info *prot = &tls_ctx->prot_info;
2057 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2058 struct strp_msg *rxm = strp_msg(skb);
2059 struct tls_msg *tlm = tls_msg(skb);
2060 size_t cipher_overhead;
2061 size_t data_len = 0;
2064 /* Verify that we have a full TLS header, or wait for more data */
2065 if (rxm->offset + prot->prepend_size > skb->len)
2068 /* Sanity-check size of on-stack buffer. */
2069 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2074 /* Linearize header to local buffer */
2075 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2080 tlm->control = header[0];
2082 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2084 cipher_overhead = prot->tag_size;
2085 if (prot->version != TLS_1_3_VERSION &&
2086 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2087 cipher_overhead += prot->iv_size;
2089 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2094 if (data_len < cipher_overhead) {
2099 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2100 if (header[1] != TLS_1_2_VERSION_MINOR ||
2101 header[2] != TLS_1_2_VERSION_MAJOR) {
2106 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2107 TCP_SKB_CB(skb)->seq + rxm->offset);
2108 return data_len + TLS_HEADER_SIZE;
2111 tls_err_abort(strp->sk, ret);
2116 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2118 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2119 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2121 ctx->recv_pkt = skb;
2124 ctx->saved_data_ready(strp->sk);
2127 static void tls_data_ready(struct sock *sk)
2129 struct tls_context *tls_ctx = tls_get_ctx(sk);
2130 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2131 struct sk_psock *psock;
2133 strp_data_ready(&ctx->strp);
2135 psock = sk_psock_get(sk);
2137 if (!list_empty(&psock->ingress_msg))
2138 ctx->saved_data_ready(sk);
2139 sk_psock_put(sk, psock);
2143 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2145 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2147 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2148 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2149 cancel_delayed_work_sync(&ctx->tx_work.work);
2152 void tls_sw_release_resources_tx(struct sock *sk)
2154 struct tls_context *tls_ctx = tls_get_ctx(sk);
2155 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2156 struct tls_rec *rec, *tmp;
2159 /* Wait for any pending async encryptions to complete */
2160 spin_lock_bh(&ctx->encrypt_compl_lock);
2161 ctx->async_notify = true;
2162 pending = atomic_read(&ctx->encrypt_pending);
2163 spin_unlock_bh(&ctx->encrypt_compl_lock);
2166 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2168 tls_tx_records(sk, -1);
2170 /* Free up un-sent records in tx_list. First, free
2171 * the partially sent record if any at head of tx_list.
2173 if (tls_ctx->partially_sent_record) {
2174 tls_free_partial_record(sk, tls_ctx);
2175 rec = list_first_entry(&ctx->tx_list,
2176 struct tls_rec, list);
2177 list_del(&rec->list);
2178 sk_msg_free(sk, &rec->msg_plaintext);
2182 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2183 list_del(&rec->list);
2184 sk_msg_free(sk, &rec->msg_encrypted);
2185 sk_msg_free(sk, &rec->msg_plaintext);
2189 crypto_free_aead(ctx->aead_send);
2190 tls_free_open_rec(sk);
2193 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2195 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2200 void tls_sw_release_resources_rx(struct sock *sk)
2202 struct tls_context *tls_ctx = tls_get_ctx(sk);
2203 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2205 kfree(tls_ctx->rx.rec_seq);
2206 kfree(tls_ctx->rx.iv);
2208 if (ctx->aead_recv) {
2209 kfree_skb(ctx->recv_pkt);
2210 ctx->recv_pkt = NULL;
2211 __skb_queue_purge(&ctx->rx_list);
2212 crypto_free_aead(ctx->aead_recv);
2213 strp_stop(&ctx->strp);
2214 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2215 * we still want to strp_stop(), but sk->sk_data_ready was
2218 if (ctx->saved_data_ready) {
2219 write_lock_bh(&sk->sk_callback_lock);
2220 sk->sk_data_ready = ctx->saved_data_ready;
2221 write_unlock_bh(&sk->sk_callback_lock);
2226 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2228 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2230 strp_done(&ctx->strp);
2233 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2235 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2240 void tls_sw_free_resources_rx(struct sock *sk)
2242 struct tls_context *tls_ctx = tls_get_ctx(sk);
2244 tls_sw_release_resources_rx(sk);
2245 tls_sw_free_ctx_rx(tls_ctx);
2248 /* The work handler to transmitt the encrypted records in tx_list */
2249 static void tx_work_handler(struct work_struct *work)
2251 struct delayed_work *delayed_work = to_delayed_work(work);
2252 struct tx_work *tx_work = container_of(delayed_work,
2253 struct tx_work, work);
2254 struct sock *sk = tx_work->sk;
2255 struct tls_context *tls_ctx = tls_get_ctx(sk);
2256 struct tls_sw_context_tx *ctx;
2258 if (unlikely(!tls_ctx))
2261 ctx = tls_sw_ctx_tx(tls_ctx);
2262 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2265 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2267 mutex_lock(&tls_ctx->tx_lock);
2269 tls_tx_records(sk, -1);
2271 mutex_unlock(&tls_ctx->tx_lock);
2274 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2276 struct tls_rec *rec;
2278 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
2282 return READ_ONCE(rec->tx_ready);
2285 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2287 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2289 /* Schedule the transmission if tx list is ready */
2290 if (tls_is_tx_ready(tx_ctx) &&
2291 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2292 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2295 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2297 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2299 write_lock_bh(&sk->sk_callback_lock);
2300 rx_ctx->saved_data_ready = sk->sk_data_ready;
2301 sk->sk_data_ready = tls_data_ready;
2302 write_unlock_bh(&sk->sk_callback_lock);
2304 strp_check_rcv(&rx_ctx->strp);
2307 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2309 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2311 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2312 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2315 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2317 struct tls_context *tls_ctx = tls_get_ctx(sk);
2318 struct tls_prot_info *prot = &tls_ctx->prot_info;
2319 struct tls_crypto_info *crypto_info;
2320 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2321 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2322 struct cipher_context *cctx;
2323 struct crypto_aead **aead;
2324 struct strp_callbacks cb;
2325 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2326 struct crypto_tfm *tfm;
2327 char *iv, *rec_seq, *key, *salt, *cipher_name;
2337 if (!ctx->priv_ctx_tx) {
2338 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2343 ctx->priv_ctx_tx = sw_ctx_tx;
2346 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2349 if (!ctx->priv_ctx_rx) {
2350 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2355 ctx->priv_ctx_rx = sw_ctx_rx;
2358 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2363 crypto_init_wait(&sw_ctx_tx->async_wait);
2364 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2365 crypto_info = &ctx->crypto_send.info;
2367 aead = &sw_ctx_tx->aead_send;
2368 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2369 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2370 sw_ctx_tx->tx_work.sk = sk;
2372 crypto_init_wait(&sw_ctx_rx->async_wait);
2373 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2374 crypto_info = &ctx->crypto_recv.info;
2376 skb_queue_head_init(&sw_ctx_rx->rx_list);
2377 aead = &sw_ctx_rx->aead_recv;
2380 switch (crypto_info->cipher_type) {
2381 case TLS_CIPHER_AES_GCM_128: {
2382 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2384 gcm_128_info = (void *)crypto_info;
2385 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2386 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2387 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2388 iv = gcm_128_info->iv;
2389 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2390 rec_seq = gcm_128_info->rec_seq;
2391 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2392 key = gcm_128_info->key;
2393 salt = gcm_128_info->salt;
2394 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2395 cipher_name = "gcm(aes)";
2398 case TLS_CIPHER_AES_GCM_256: {
2399 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2401 gcm_256_info = (void *)crypto_info;
2402 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2403 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2404 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2405 iv = gcm_256_info->iv;
2406 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2407 rec_seq = gcm_256_info->rec_seq;
2408 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2409 key = gcm_256_info->key;
2410 salt = gcm_256_info->salt;
2411 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2412 cipher_name = "gcm(aes)";
2415 case TLS_CIPHER_AES_CCM_128: {
2416 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2418 ccm_128_info = (void *)crypto_info;
2419 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2420 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2421 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2422 iv = ccm_128_info->iv;
2423 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2424 rec_seq = ccm_128_info->rec_seq;
2425 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2426 key = ccm_128_info->key;
2427 salt = ccm_128_info->salt;
2428 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2429 cipher_name = "ccm(aes)";
2432 case TLS_CIPHER_CHACHA20_POLY1305: {
2433 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2435 chacha20_poly1305_info = (void *)crypto_info;
2437 tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2438 iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2439 iv = chacha20_poly1305_info->iv;
2440 rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2441 rec_seq = chacha20_poly1305_info->rec_seq;
2442 keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2443 key = chacha20_poly1305_info->key;
2444 salt = chacha20_poly1305_info->salt;
2445 salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2446 cipher_name = "rfc7539(chacha20,poly1305)";
2449 case TLS_CIPHER_SM4_GCM: {
2450 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2452 sm4_gcm_info = (void *)crypto_info;
2453 nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2454 tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2455 iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2456 iv = sm4_gcm_info->iv;
2457 rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2458 rec_seq = sm4_gcm_info->rec_seq;
2459 keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2460 key = sm4_gcm_info->key;
2461 salt = sm4_gcm_info->salt;
2462 salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2463 cipher_name = "gcm(sm4)";
2466 case TLS_CIPHER_SM4_CCM: {
2467 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2469 sm4_ccm_info = (void *)crypto_info;
2470 nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2471 tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2472 iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2473 iv = sm4_ccm_info->iv;
2474 rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2475 rec_seq = sm4_ccm_info->rec_seq;
2476 keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2477 key = sm4_ccm_info->key;
2478 salt = sm4_ccm_info->salt;
2479 salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2480 cipher_name = "ccm(sm4)";
2488 if (crypto_info->version == TLS_1_3_VERSION) {
2490 prot->aad_size = TLS_HEADER_SIZE;
2491 prot->tail_size = 1;
2493 prot->aad_size = TLS_AAD_SPACE_SIZE;
2494 prot->tail_size = 0;
2497 /* Sanity-check the sizes for stack allocations. */
2498 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2499 rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
2500 prot->aad_size > TLS_MAX_AAD_SIZE) {
2505 prot->version = crypto_info->version;
2506 prot->cipher_type = crypto_info->cipher_type;
2507 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2508 prot->tag_size = tag_size;
2509 prot->overhead_size = prot->prepend_size +
2510 prot->tag_size + prot->tail_size;
2511 prot->iv_size = iv_size;
2512 prot->salt_size = salt_size;
2513 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2518 /* Note: 128 & 256 bit salt are the same size */
2519 prot->rec_seq_size = rec_seq_size;
2520 memcpy(cctx->iv, salt, salt_size);
2521 memcpy(cctx->iv + salt_size, iv, iv_size);
2522 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2523 if (!cctx->rec_seq) {
2529 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2530 if (IS_ERR(*aead)) {
2531 rc = PTR_ERR(*aead);
2537 ctx->push_pending_record = tls_sw_push_pending_record;
2539 rc = crypto_aead_setkey(*aead, key, keysize);
2544 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2549 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2551 tls_update_rx_zc_capable(ctx);
2552 sw_ctx_rx->async_capable =
2553 crypto_info->version != TLS_1_3_VERSION &&
2554 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2556 /* Set up strparser */
2557 memset(&cb, 0, sizeof(cb));
2558 cb.rcv_msg = tls_queue;
2559 cb.parse_msg = tls_read_size;
2561 strp_init(&sw_ctx_rx->strp, sk, &cb);
2567 crypto_free_aead(*aead);
2570 kfree(cctx->rec_seq);
2571 cctx->rec_seq = NULL;
2577 kfree(ctx->priv_ctx_tx);
2578 ctx->priv_ctx_tx = NULL;
2580 kfree(ctx->priv_ctx_rx);
2581 ctx->priv_ctx_rx = NULL;