2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/sched/signal.h>
38 #include <linux/module.h>
39 #include <crypto/aead.h>
41 #include <net/strparser.h>
44 static int tls_do_decryption(struct sock *sk,
45 struct scatterlist *sgin,
46 struct scatterlist *sgout,
52 struct tls_context *tls_ctx = tls_get_ctx(sk);
53 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
54 struct strp_msg *rxm = strp_msg(skb);
55 struct aead_request *aead_req;
58 unsigned int req_size = sizeof(struct aead_request) +
59 crypto_aead_reqsize(ctx->aead_recv);
61 aead_req = kzalloc(req_size, flags);
65 aead_request_set_tfm(aead_req, ctx->aead_recv);
66 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
67 aead_request_set_crypt(aead_req, sgin, sgout,
68 data_len + tls_ctx->rx.tag_size,
70 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
71 crypto_req_done, &ctx->async_wait);
73 ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
78 rxm->offset += tls_ctx->rx.prepend_size;
79 rxm->full_len -= tls_ctx->rx.overhead_size;
80 tls_advance_record_sn(sk, &tls_ctx->rx);
82 ctx->decrypted = true;
84 ctx->saved_data_ready(sk);
91 static void trim_sg(struct sock *sk, struct scatterlist *sg,
92 int *sg_num_elem, unsigned int *sg_size, int target_size)
94 int i = *sg_num_elem - 1;
95 int trim = *sg_size - target_size;
102 *sg_size = target_size;
103 while (trim >= sg[i].length) {
104 trim -= sg[i].length;
105 sk_mem_uncharge(sk, sg[i].length);
106 put_page(sg_page(&sg[i]));
113 sg[i].length -= trim;
114 sk_mem_uncharge(sk, trim);
117 *sg_num_elem = i + 1;
120 static void trim_both_sgl(struct sock *sk, int target_size)
122 struct tls_context *tls_ctx = tls_get_ctx(sk);
123 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
125 trim_sg(sk, ctx->sg_plaintext_data,
126 &ctx->sg_plaintext_num_elem,
127 &ctx->sg_plaintext_size,
131 target_size += tls_ctx->tx.overhead_size;
133 trim_sg(sk, ctx->sg_encrypted_data,
134 &ctx->sg_encrypted_num_elem,
135 &ctx->sg_encrypted_size,
139 static int alloc_encrypted_sg(struct sock *sk, int len)
141 struct tls_context *tls_ctx = tls_get_ctx(sk);
142 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
145 rc = sk_alloc_sg(sk, len,
146 ctx->sg_encrypted_data, 0,
147 &ctx->sg_encrypted_num_elem,
148 &ctx->sg_encrypted_size, 0);
153 static int alloc_plaintext_sg(struct sock *sk, int len)
155 struct tls_context *tls_ctx = tls_get_ctx(sk);
156 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
159 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
160 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
161 tls_ctx->pending_open_record_frags);
166 static void free_sg(struct sock *sk, struct scatterlist *sg,
167 int *sg_num_elem, unsigned int *sg_size)
169 int i, n = *sg_num_elem;
171 for (i = 0; i < n; ++i) {
172 sk_mem_uncharge(sk, sg[i].length);
173 put_page(sg_page(&sg[i]));
179 static void tls_free_both_sg(struct sock *sk)
181 struct tls_context *tls_ctx = tls_get_ctx(sk);
182 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
184 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
185 &ctx->sg_encrypted_size);
187 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
188 &ctx->sg_plaintext_size);
191 static int tls_do_encryption(struct tls_context *tls_ctx,
192 struct tls_sw_context *ctx, size_t data_len,
195 unsigned int req_size = sizeof(struct aead_request) +
196 crypto_aead_reqsize(ctx->aead_send);
197 struct aead_request *aead_req;
200 aead_req = kzalloc(req_size, flags);
204 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
205 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
207 aead_request_set_tfm(aead_req, ctx->aead_send);
208 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
209 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
210 data_len, tls_ctx->tx.iv);
212 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
213 crypto_req_done, &ctx->async_wait);
215 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
217 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
218 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
224 static int tls_push_record(struct sock *sk, int flags,
225 unsigned char record_type)
227 struct tls_context *tls_ctx = tls_get_ctx(sk);
228 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
231 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
232 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
234 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
235 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
238 tls_fill_prepend(tls_ctx,
239 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
240 ctx->sg_encrypted_data[0].offset,
241 ctx->sg_plaintext_size, record_type);
243 tls_ctx->pending_open_record_frags = 0;
244 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
246 rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
249 /* If we are called from write_space and
250 * we fail, we need to set this SOCK_NOSPACE
251 * to trigger another write_space in the future.
253 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
257 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
258 &ctx->sg_plaintext_size);
260 ctx->sg_encrypted_num_elem = 0;
261 ctx->sg_encrypted_size = 0;
263 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
264 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
265 if (rc < 0 && rc != -EAGAIN)
266 tls_err_abort(sk, EBADMSG);
268 tls_advance_record_sn(sk, &tls_ctx->tx);
272 static int tls_sw_push_pending_record(struct sock *sk, int flags)
274 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
277 static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
278 int length, int *pages_used,
279 unsigned int *size_used,
280 struct scatterlist *to, int to_max_pages,
283 struct page *pages[MAX_SKB_FRAGS];
288 unsigned int size = *size_used;
289 int num_elem = *pages_used;
295 maxpages = to_max_pages - num_elem;
300 copied = iov_iter_get_pages(from, pages,
308 iov_iter_advance(from, copied);
313 use = min_t(int, copied, PAGE_SIZE - offset);
315 sg_set_page(&to[num_elem],
316 pages[i], use, offset);
317 sg_unmark_end(&to[num_elem]);
319 sk_mem_charge(sk, use);
331 *pages_used = num_elem;
336 static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
339 struct tls_context *tls_ctx = tls_get_ctx(sk);
340 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
341 struct scatterlist *sg = ctx->sg_plaintext_data;
344 for (i = tls_ctx->pending_open_record_frags;
345 i < ctx->sg_plaintext_num_elem; ++i) {
348 page_address(sg_page(&sg[i])) + sg[i].offset,
349 copy, from) != copy) {
355 ++tls_ctx->pending_open_record_frags;
365 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
367 struct tls_context *tls_ctx = tls_get_ctx(sk);
368 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
371 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
372 bool eor = !(msg->msg_flags & MSG_MORE);
373 size_t try_to_copy, copied = 0;
374 unsigned char record_type = TLS_RECORD_TYPE_DATA;
379 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
384 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
387 if (unlikely(msg->msg_controllen)) {
388 ret = tls_proccess_cmsg(sk, msg, &record_type);
393 while (msg_data_left(msg)) {
399 orig_size = ctx->sg_plaintext_size;
401 try_to_copy = msg_data_left(msg);
402 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
403 if (try_to_copy >= record_room) {
404 try_to_copy = record_room;
408 required_size = ctx->sg_plaintext_size + try_to_copy +
409 tls_ctx->tx.overhead_size;
411 if (!sk_stream_memory_free(sk))
412 goto wait_for_sndbuf;
414 ret = alloc_encrypted_sg(sk, required_size);
417 goto wait_for_memory;
419 /* Adjust try_to_copy according to the amount that was
420 * actually allocated. The difference is due
421 * to max sg elements limit
423 try_to_copy -= required_size - ctx->sg_encrypted_size;
427 if (full_record || eor) {
428 ret = zerocopy_from_iter(sk, &msg->msg_iter,
429 try_to_copy, &ctx->sg_plaintext_num_elem,
430 &ctx->sg_plaintext_size,
431 ctx->sg_plaintext_data,
432 ARRAY_SIZE(ctx->sg_plaintext_data),
435 goto fallback_to_reg_send;
437 copied += try_to_copy;
438 ret = tls_push_record(sk, msg->msg_flags, record_type);
444 copied -= try_to_copy;
445 fallback_to_reg_send:
446 iov_iter_revert(&msg->msg_iter,
447 ctx->sg_plaintext_size - orig_size);
448 trim_sg(sk, ctx->sg_plaintext_data,
449 &ctx->sg_plaintext_num_elem,
450 &ctx->sg_plaintext_size,
454 required_size = ctx->sg_plaintext_size + try_to_copy;
456 ret = alloc_plaintext_sg(sk, required_size);
459 goto wait_for_memory;
461 /* Adjust try_to_copy according to the amount that was
462 * actually allocated. The difference is due
463 * to max sg elements limit
465 try_to_copy -= required_size - ctx->sg_plaintext_size;
468 trim_sg(sk, ctx->sg_encrypted_data,
469 &ctx->sg_encrypted_num_elem,
470 &ctx->sg_encrypted_size,
471 ctx->sg_plaintext_size +
472 tls_ctx->tx.overhead_size);
475 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
479 copied += try_to_copy;
480 if (full_record || eor) {
482 ret = tls_push_record(sk, msg->msg_flags, record_type);
485 goto wait_for_memory;
494 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
496 ret = sk_stream_wait_memory(sk, &timeo);
499 trim_both_sgl(sk, orig_size);
503 if (tls_is_pending_closed_record(tls_ctx))
506 if (ctx->sg_encrypted_size < required_size)
507 goto alloc_encrypted;
509 goto alloc_plaintext;
513 ret = sk_stream_error(sk, msg->msg_flags, ret);
516 return copied ? copied : ret;
519 int tls_sw_sendpage(struct sock *sk, struct page *page,
520 int offset, size_t size, int flags)
522 struct tls_context *tls_ctx = tls_get_ctx(sk);
523 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
525 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
527 size_t orig_size = size;
528 unsigned char record_type = TLS_RECORD_TYPE_DATA;
529 struct scatterlist *sg;
533 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
534 MSG_SENDPAGE_NOTLAST))
537 /* No MSG_EOR from splice, only look at MSG_MORE */
538 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
542 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
544 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
547 /* Call the sk_stream functions to manage the sndbuf mem. */
549 size_t copy, required_size;
557 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
559 if (copy >= record_room) {
563 required_size = ctx->sg_plaintext_size + copy +
564 tls_ctx->tx.overhead_size;
566 if (!sk_stream_memory_free(sk))
567 goto wait_for_sndbuf;
569 ret = alloc_encrypted_sg(sk, required_size);
572 goto wait_for_memory;
574 /* Adjust copy according to the amount that was
575 * actually allocated. The difference is due
576 * to max sg elements limit
578 copy -= required_size - ctx->sg_plaintext_size;
583 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
584 sg_set_page(sg, page, copy, offset);
587 ctx->sg_plaintext_num_elem++;
589 sk_mem_charge(sk, copy);
592 ctx->sg_plaintext_size += copy;
593 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
595 if (full_record || eor ||
596 ctx->sg_plaintext_num_elem ==
597 ARRAY_SIZE(ctx->sg_plaintext_data)) {
599 ret = tls_push_record(sk, flags, record_type);
602 goto wait_for_memory;
609 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
611 ret = sk_stream_wait_memory(sk, &timeo);
613 trim_both_sgl(sk, ctx->sg_plaintext_size);
617 if (tls_is_pending_closed_record(tls_ctx))
624 if (orig_size > size)
625 ret = orig_size - size;
627 ret = sk_stream_error(sk, flags, ret);
633 static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
634 long timeo, int *err)
636 struct tls_context *tls_ctx = tls_get_ctx(sk);
637 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
639 DEFINE_WAIT_FUNC(wait, woken_wake_function);
641 while (!(skb = ctx->recv_pkt)) {
643 *err = sock_error(sk);
647 if (sock_flag(sk, SOCK_DONE))
650 if ((flags & MSG_DONTWAIT) || !timeo) {
655 add_wait_queue(sk_sleep(sk), &wait);
656 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
657 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
658 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
659 remove_wait_queue(sk_sleep(sk), &wait);
662 if (signal_pending(current)) {
663 *err = sock_intr_errno(timeo);
671 static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
672 struct scatterlist *sgout)
674 struct tls_context *tls_ctx = tls_get_ctx(sk);
675 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
676 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + tls_ctx->rx.iv_size];
677 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
678 struct scatterlist *sgin = &sgin_arr[0];
679 struct strp_msg *rxm = strp_msg(skb);
680 int ret, nsg = ARRAY_SIZE(sgin_arr);
681 char aad_recv[TLS_AAD_SPACE_SIZE];
682 struct sk_buff *unused;
684 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
685 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
686 tls_ctx->rx.iv_size);
690 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
692 nsg = skb_cow_data(skb, 0, &unused) + 1;
693 sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
698 sg_init_table(sgin, nsg);
699 sg_set_buf(&sgin[0], aad_recv, sizeof(aad_recv));
701 nsg = skb_to_sgvec(skb, &sgin[1],
702 rxm->offset + tls_ctx->rx.prepend_size,
703 rxm->full_len - tls_ctx->rx.prepend_size);
705 tls_make_aad(aad_recv,
706 rxm->full_len - tls_ctx->rx.overhead_size,
708 tls_ctx->rx.rec_seq_size,
711 ret = tls_do_decryption(sk, sgin, sgout, iv,
712 rxm->full_len - tls_ctx->rx.overhead_size,
713 skb, sk->sk_allocation);
715 if (sgin != &sgin_arr[0])
721 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
724 struct tls_context *tls_ctx = tls_get_ctx(sk);
725 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
726 struct strp_msg *rxm = strp_msg(skb);
728 if (len < rxm->full_len) {
730 rxm->full_len -= len;
735 /* Finished with message */
736 ctx->recv_pkt = NULL;
738 strp_unpause(&ctx->strp);
743 int tls_sw_recvmsg(struct sock *sk,
750 struct tls_context *tls_ctx = tls_get_ctx(sk);
751 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
752 unsigned char control;
753 struct strp_msg *rxm;
762 if (unlikely(flags & MSG_ERRQUEUE))
763 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
767 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
772 skb = tls_wait_data(sk, flags, timeo, &err);
780 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
781 sizeof(ctx->control), &ctx->control);
783 control = ctx->control;
784 if (ctx->control != TLS_RECORD_TYPE_DATA) {
785 if (cerr || msg->msg_flags & MSG_CTRUNC) {
790 } else if (control != ctx->control) {
794 if (!ctx->decrypted) {
798 page_count = iov_iter_npages(&msg->msg_iter,
800 to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
801 if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
802 likely(!(flags & MSG_PEEK))) {
803 struct scatterlist sgin[MAX_SKB_FRAGS + 1];
808 sg_init_table(sgin, MAX_SKB_FRAGS + 1);
809 sg_set_buf(&sgin[0], unused, 13);
811 err = zerocopy_from_iter(sk, &msg->msg_iter,
814 MAX_SKB_FRAGS, false);
816 goto fallback_to_reg_recv;
818 err = decrypt_skb(sk, skb, sgin);
819 for (; pages > 0; pages--)
820 put_page(sg_page(&sgin[pages]));
822 tls_err_abort(sk, EBADMSG);
826 fallback_to_reg_recv:
827 err = decrypt_skb(sk, skb, NULL);
829 tls_err_abort(sk, EBADMSG);
833 ctx->decrypted = true;
837 chunk = min_t(unsigned int, rxm->full_len, len);
838 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
846 if (likely(!(flags & MSG_PEEK))) {
847 u8 control = ctx->control;
849 if (tls_sw_advance_skb(sk, skb, chunk)) {
850 /* Return full control message to
851 * userspace before trying to parse
852 * another message type
854 msg->msg_flags |= MSG_EOR;
855 if (control != TLS_RECORD_TYPE_DATA)
863 return copied ? : err;
866 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
867 struct pipe_inode_info *pipe,
868 size_t len, unsigned int flags)
870 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
871 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
872 struct strp_msg *rxm = NULL;
873 struct sock *sk = sock->sk;
882 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
884 skb = tls_wait_data(sk, flags, timeo, &err);
886 goto splice_read_end;
888 /* splice does not support reading control messages */
889 if (ctx->control != TLS_RECORD_TYPE_DATA) {
891 goto splice_read_end;
894 if (!ctx->decrypted) {
895 err = decrypt_skb(sk, skb, NULL);
898 tls_err_abort(sk, EBADMSG);
899 goto splice_read_end;
901 ctx->decrypted = true;
905 chunk = min_t(unsigned int, rxm->full_len, len);
906 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
908 goto splice_read_end;
910 if (likely(!(flags & MSG_PEEK)))
911 tls_sw_advance_skb(sk, skb, copied);
915 return copied ? : err;
918 unsigned int tls_sw_poll(struct file *file, struct socket *sock,
919 struct poll_table_struct *wait)
922 struct sock *sk = sock->sk;
923 struct tls_context *tls_ctx = tls_get_ctx(sk);
924 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
926 /* Grab POLLOUT and POLLHUP from the underlying socket */
927 ret = ctx->sk_poll(file, sock, wait);
929 /* Clear POLLIN bits, and set based on recv_pkt */
930 ret &= ~(POLLIN | POLLRDNORM);
932 ret |= POLLIN | POLLRDNORM;
937 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
939 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
940 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
941 char header[tls_ctx->rx.prepend_size];
942 struct strp_msg *rxm = strp_msg(skb);
943 size_t cipher_overhead;
947 /* Verify that we have a full TLS header, or wait for more data */
948 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
951 /* Linearize header to local buffer */
952 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
957 ctx->control = header[0];
959 data_len = ((header[4] & 0xFF) | (header[3] << 8));
961 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
963 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
967 if (data_len < cipher_overhead) {
972 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
973 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
978 return data_len + TLS_HEADER_SIZE;
981 tls_err_abort(strp->sk, ret);
986 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
988 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
989 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
990 struct strp_msg *rxm;
994 ctx->decrypted = false;
999 strp->sk->sk_state_change(strp->sk);
1002 static void tls_data_ready(struct sock *sk)
1004 struct tls_context *tls_ctx = tls_get_ctx(sk);
1005 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
1007 strp_data_ready(&ctx->strp);
1010 void tls_sw_free_resources(struct sock *sk)
1012 struct tls_context *tls_ctx = tls_get_ctx(sk);
1013 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
1016 crypto_free_aead(ctx->aead_send);
1017 if (ctx->aead_recv) {
1018 if (ctx->recv_pkt) {
1019 kfree_skb(ctx->recv_pkt);
1020 ctx->recv_pkt = NULL;
1022 crypto_free_aead(ctx->aead_recv);
1023 strp_stop(&ctx->strp);
1024 write_lock_bh(&sk->sk_callback_lock);
1025 sk->sk_data_ready = ctx->saved_data_ready;
1026 write_unlock_bh(&sk->sk_callback_lock);
1028 strp_done(&ctx->strp);
1032 tls_free_both_sg(sk);
1038 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1040 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
1041 struct tls_crypto_info *crypto_info;
1042 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
1043 struct tls_sw_context *sw_ctx;
1044 struct cipher_context *cctx;
1045 struct crypto_aead **aead;
1046 struct strp_callbacks cb;
1047 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1056 if (!ctx->priv_ctx) {
1057 sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL);
1062 crypto_init_wait(&sw_ctx->async_wait);
1064 sw_ctx = ctx->priv_ctx;
1067 ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
1070 crypto_info = &ctx->crypto_send;
1072 aead = &sw_ctx->aead_send;
1074 crypto_info = &ctx->crypto_recv;
1076 aead = &sw_ctx->aead_recv;
1079 switch (crypto_info->cipher_type) {
1080 case TLS_CIPHER_AES_GCM_128: {
1081 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1082 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1083 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1084 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1085 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1087 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1089 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1097 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1098 cctx->tag_size = tag_size;
1099 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1100 cctx->iv_size = iv_size;
1101 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1107 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1108 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1109 cctx->rec_seq_size = rec_seq_size;
1110 cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
1111 if (!cctx->rec_seq) {
1115 memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
1118 sg_init_table(sw_ctx->sg_encrypted_data,
1119 ARRAY_SIZE(sw_ctx->sg_encrypted_data));
1120 sg_init_table(sw_ctx->sg_plaintext_data,
1121 ARRAY_SIZE(sw_ctx->sg_plaintext_data));
1123 sg_init_table(sw_ctx->sg_aead_in, 2);
1124 sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space,
1125 sizeof(sw_ctx->aad_space));
1126 sg_unmark_end(&sw_ctx->sg_aead_in[1]);
1127 sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data);
1128 sg_init_table(sw_ctx->sg_aead_out, 2);
1129 sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space,
1130 sizeof(sw_ctx->aad_space));
1131 sg_unmark_end(&sw_ctx->sg_aead_out[1]);
1132 sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data);
1136 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1137 if (IS_ERR(*aead)) {
1138 rc = PTR_ERR(*aead);
1144 ctx->push_pending_record = tls_sw_push_pending_record;
1146 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1148 rc = crypto_aead_setkey(*aead, keyval,
1149 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1153 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1158 /* Set up strparser */
1159 memset(&cb, 0, sizeof(cb));
1160 cb.rcv_msg = tls_queue;
1161 cb.parse_msg = tls_read_size;
1163 strp_init(&sw_ctx->strp, sk, &cb);
1165 write_lock_bh(&sk->sk_callback_lock);
1166 sw_ctx->saved_data_ready = sk->sk_data_ready;
1167 sk->sk_data_ready = tls_data_ready;
1168 write_unlock_bh(&sk->sk_callback_lock);
1170 sw_ctx->sk_poll = sk->sk_socket->ops->poll;
1172 strp_check_rcv(&sw_ctx->strp);
1178 crypto_free_aead(*aead);
1181 kfree(cctx->rec_seq);
1182 cctx->rec_seq = NULL;
1187 kfree(ctx->priv_ctx);
1188 ctx->priv_ctx = NULL;