1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
43 /* device_offload_lock is used to synchronize tls_dev_add
44 * against NETDEV_DOWN notifications.
46 static DECLARE_RWSEM(device_offload_lock);
48 static void tls_device_gc_task(struct work_struct *work);
50 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
51 static LIST_HEAD(tls_device_gc_list);
52 static LIST_HEAD(tls_device_list);
53 static DEFINE_SPINLOCK(tls_device_lock);
55 static void tls_device_free_ctx(struct tls_context *ctx)
57 if (ctx->tx_conf == TLS_HW) {
58 kfree(tls_offload_ctx_tx(ctx));
59 kfree(ctx->tx.rec_seq);
63 if (ctx->rx_conf == TLS_HW)
64 kfree(tls_offload_ctx_rx(ctx));
66 tls_ctx_free(NULL, ctx);
69 static void tls_device_gc_task(struct work_struct *work)
71 struct tls_context *ctx, *tmp;
75 spin_lock_irqsave(&tls_device_lock, flags);
76 list_splice_init(&tls_device_gc_list, &gc_list);
77 spin_unlock_irqrestore(&tls_device_lock, flags);
79 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
80 struct net_device *netdev = ctx->netdev;
82 if (netdev && ctx->tx_conf == TLS_HW) {
83 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
84 TLS_OFFLOAD_CTX_DIR_TX);
90 tls_device_free_ctx(ctx);
94 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
98 spin_lock_irqsave(&tls_device_lock, flags);
99 list_move_tail(&ctx->list, &tls_device_gc_list);
101 /* schedule_work inside the spinlock
102 * to make sure tls_device_down waits for that work.
104 schedule_work(&tls_device_gc_work);
106 spin_unlock_irqrestore(&tls_device_lock, flags);
109 /* We assume that the socket is already connected */
110 static struct net_device *get_netdev_for_sock(struct sock *sk)
112 struct dst_entry *dst = sk_dst_get(sk);
113 struct net_device *netdev = NULL;
125 static void destroy_record(struct tls_record_info *record)
129 for (i = 0; i < record->num_frags; i++)
130 __skb_frag_unref(&record->frags[i]);
134 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
136 struct tls_record_info *info, *temp;
138 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
139 list_del(&info->list);
140 destroy_record(info);
143 offload_ctx->retransmit_hint = NULL;
146 static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
148 struct tls_context *tls_ctx = tls_get_ctx(sk);
149 struct tls_record_info *info, *temp;
150 struct tls_offload_context_tx *ctx;
151 u64 deleted_records = 0;
157 ctx = tls_offload_ctx_tx(tls_ctx);
159 spin_lock_irqsave(&ctx->lock, flags);
160 info = ctx->retransmit_hint;
161 if (info && !before(acked_seq, info->end_seq))
162 ctx->retransmit_hint = NULL;
164 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
165 if (before(acked_seq, info->end_seq))
167 list_del(&info->list);
169 destroy_record(info);
173 ctx->unacked_record_sn += deleted_records;
174 spin_unlock_irqrestore(&ctx->lock, flags);
177 /* At this point, there should be no references on this
178 * socket and no in-flight SKBs associated with this
179 * socket, so it is safe to free all the resources.
181 void tls_device_sk_destruct(struct sock *sk)
183 struct tls_context *tls_ctx = tls_get_ctx(sk);
184 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
186 tls_ctx->sk_destruct(sk);
188 if (tls_ctx->tx_conf == TLS_HW) {
189 if (ctx->open_record)
190 destroy_record(ctx->open_record);
191 delete_all_records(ctx);
192 crypto_free_aead(ctx->aead_send);
193 clean_acked_data_disable(inet_csk(sk));
196 if (refcount_dec_and_test(&tls_ctx->refcount))
197 tls_device_queue_ctx_destruction(tls_ctx);
199 EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
201 void tls_device_free_resources_tx(struct sock *sk)
203 struct tls_context *tls_ctx = tls_get_ctx(sk);
205 tls_free_partial_record(sk, tls_ctx);
208 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
210 struct tls_context *tls_ctx = tls_get_ctx(sk);
212 trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
213 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
215 EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
217 static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
220 struct net_device *netdev;
225 skb = tcp_write_queue_tail(sk);
227 TCP_SKB_CB(skb)->eor = 1;
229 rcd_sn = tls_ctx->tx.rec_seq;
231 trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
232 down_read(&device_offload_lock);
233 netdev = tls_ctx->netdev;
235 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
237 TLS_OFFLOAD_CTX_DIR_TX);
238 up_read(&device_offload_lock);
242 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
245 static void tls_append_frag(struct tls_record_info *record,
246 struct page_frag *pfrag,
251 frag = &record->frags[record->num_frags - 1];
252 if (skb_frag_page(frag) == pfrag->page &&
253 skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
254 skb_frag_size_add(frag, size);
257 __skb_frag_set_page(frag, pfrag->page);
258 skb_frag_off_set(frag, pfrag->offset);
259 skb_frag_size_set(frag, size);
261 get_page(pfrag->page);
264 pfrag->offset += size;
268 static int tls_push_record(struct sock *sk,
269 struct tls_context *ctx,
270 struct tls_offload_context_tx *offload_ctx,
271 struct tls_record_info *record,
274 struct tls_prot_info *prot = &ctx->prot_info;
275 struct tcp_sock *tp = tcp_sk(sk);
279 record->end_seq = tp->write_seq + record->len;
280 list_add_tail_rcu(&record->list, &offload_ctx->records_list);
281 offload_ctx->open_record = NULL;
283 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
284 tls_device_resync_tx(sk, ctx, tp->write_seq);
286 tls_advance_record_sn(sk, prot, &ctx->tx);
288 for (i = 0; i < record->num_frags; i++) {
289 frag = &record->frags[i];
290 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
291 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
292 skb_frag_size(frag), skb_frag_off(frag));
293 sk_mem_charge(sk, skb_frag_size(frag));
294 get_page(skb_frag_page(frag));
296 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
298 /* all ready, send */
299 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
302 static int tls_device_record_close(struct sock *sk,
303 struct tls_context *ctx,
304 struct tls_record_info *record,
305 struct page_frag *pfrag,
306 unsigned char record_type)
308 struct tls_prot_info *prot = &ctx->prot_info;
312 * device will fill in the tag, we just need to append a placeholder
313 * use socket memory to improve coalescing (re-using a single buffer
314 * increases frag count)
315 * if we can't allocate memory now, steal some back from data
317 if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
318 sk->sk_allocation))) {
320 tls_append_frag(record, pfrag, prot->tag_size);
322 ret = prot->tag_size;
323 if (record->len <= prot->overhead_size)
328 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
329 record->len - prot->overhead_size,
330 record_type, prot->version);
334 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
335 struct page_frag *pfrag,
338 struct tls_record_info *record;
341 record = kmalloc(sizeof(*record), GFP_KERNEL);
345 frag = &record->frags[0];
346 __skb_frag_set_page(frag, pfrag->page);
347 skb_frag_off_set(frag, pfrag->offset);
348 skb_frag_size_set(frag, prepend_size);
350 get_page(pfrag->page);
351 pfrag->offset += prepend_size;
353 record->num_frags = 1;
354 record->len = prepend_size;
355 offload_ctx->open_record = record;
359 static int tls_do_allocation(struct sock *sk,
360 struct tls_offload_context_tx *offload_ctx,
361 struct page_frag *pfrag,
366 if (!offload_ctx->open_record) {
367 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
368 sk->sk_allocation))) {
369 READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
370 sk_stream_moderate_sndbuf(sk);
374 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
378 if (pfrag->size > pfrag->offset)
382 if (!sk_page_frag_refill(sk, pfrag))
388 static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
390 size_t pre_copy, nocache;
392 pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
394 pre_copy = min(pre_copy, bytes);
395 if (copy_from_iter(addr, pre_copy, i) != pre_copy)
401 nocache = round_down(bytes, SMP_CACHE_BYTES);
402 if (copy_from_iter_nocache(addr, nocache, i) != nocache)
407 if (bytes && copy_from_iter(addr, bytes, i) != bytes)
413 static int tls_push_data(struct sock *sk,
414 struct iov_iter *msg_iter,
415 size_t size, int flags,
416 unsigned char record_type)
418 struct tls_context *tls_ctx = tls_get_ctx(sk);
419 struct tls_prot_info *prot = &tls_ctx->prot_info;
420 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
421 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
422 struct tls_record_info *record = ctx->open_record;
423 int tls_push_record_flags;
424 struct page_frag *pfrag;
425 size_t orig_size = size;
426 u32 max_open_record_len;
432 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
435 if (unlikely(sk->sk_err))
438 flags |= MSG_SENDPAGE_DECRYPTED;
439 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
441 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
442 if (tls_is_partially_sent_record(tls_ctx)) {
443 rc = tls_push_partial_record(sk, tls_ctx, flags);
448 pfrag = sk_page_frag(sk);
450 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
451 * we need to leave room for an authentication tag.
453 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
456 rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
458 rc = sk_stream_wait_memory(sk, &timeo);
462 record = ctx->open_record;
466 if (record_type != TLS_RECORD_TYPE_DATA) {
467 /* avoid sending partial
468 * record with type !=
472 destroy_record(record);
473 ctx->open_record = NULL;
474 } else if (record->len > prot->prepend_size) {
481 record = ctx->open_record;
482 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
483 copy = min_t(size_t, copy, (max_open_record_len - record->len));
485 rc = tls_device_copy_data(page_address(pfrag->page) +
486 pfrag->offset, copy, msg_iter);
489 tls_append_frag(record, pfrag, copy);
494 tls_push_record_flags = flags;
496 tls_ctx->pending_open_record_frags =
504 if (done || record->len >= max_open_record_len ||
505 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
506 rc = tls_device_record_close(sk, tls_ctx, record,
513 destroy_record(record);
514 ctx->open_record = NULL;
519 rc = tls_push_record(sk,
523 tls_push_record_flags);
529 if (orig_size - size > 0)
530 rc = orig_size - size;
535 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
537 unsigned char record_type = TLS_RECORD_TYPE_DATA;
538 struct tls_context *tls_ctx = tls_get_ctx(sk);
541 mutex_lock(&tls_ctx->tx_lock);
544 if (unlikely(msg->msg_controllen)) {
545 rc = tls_proccess_cmsg(sk, msg, &record_type);
550 rc = tls_push_data(sk, &msg->msg_iter, size,
551 msg->msg_flags, record_type);
555 mutex_unlock(&tls_ctx->tx_lock);
559 int tls_device_sendpage(struct sock *sk, struct page *page,
560 int offset, size_t size, int flags)
562 struct tls_context *tls_ctx = tls_get_ctx(sk);
563 struct iov_iter msg_iter;
564 char *kaddr = kmap(page);
568 if (flags & MSG_SENDPAGE_NOTLAST)
571 mutex_lock(&tls_ctx->tx_lock);
574 if (flags & MSG_OOB) {
579 iov.iov_base = kaddr + offset;
581 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
582 rc = tls_push_data(sk, &msg_iter, size,
583 flags, TLS_RECORD_TYPE_DATA);
588 mutex_unlock(&tls_ctx->tx_lock);
592 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
593 u32 seq, u64 *p_record_sn)
595 u64 record_sn = context->hint_record_sn;
596 struct tls_record_info *info, *last;
598 info = context->retransmit_hint;
600 before(seq, info->end_seq - info->len)) {
601 /* if retransmit_hint is irrelevant start
602 * from the beggining of the list
604 info = list_first_entry_or_null(&context->records_list,
605 struct tls_record_info, list);
608 /* send the start_marker record if seq number is before the
609 * tls offload start marker sequence number. This record is
610 * required to handle TCP packets which are before TLS offload
612 * And if it's not start marker, look if this seq number
613 * belongs to the list.
615 if (likely(!tls_record_is_start_marker(info))) {
616 /* we have the first record, get the last record to see
617 * if this seq number belongs to the list.
619 last = list_last_entry(&context->records_list,
620 struct tls_record_info, list);
622 if (!between(seq, tls_record_start_seq(info),
626 record_sn = context->unacked_record_sn;
629 /* We just need the _rcu for the READ_ONCE() */
631 list_for_each_entry_from_rcu(info, &context->records_list, list) {
632 if (before(seq, info->end_seq)) {
633 if (!context->retransmit_hint ||
635 context->retransmit_hint->end_seq)) {
636 context->hint_record_sn = record_sn;
637 context->retransmit_hint = info;
639 *p_record_sn = record_sn;
640 goto exit_rcu_unlock;
650 EXPORT_SYMBOL(tls_get_record);
652 static int tls_device_push_pending_record(struct sock *sk, int flags)
654 struct iov_iter msg_iter;
656 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
657 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
660 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
662 if (tls_is_partially_sent_record(ctx)) {
663 gfp_t sk_allocation = sk->sk_allocation;
665 WARN_ON_ONCE(sk->sk_write_pending);
667 sk->sk_allocation = GFP_ATOMIC;
668 tls_push_partial_record(sk, ctx,
669 MSG_DONTWAIT | MSG_NOSIGNAL |
670 MSG_SENDPAGE_DECRYPTED);
671 sk->sk_allocation = sk_allocation;
675 static void tls_device_resync_rx(struct tls_context *tls_ctx,
676 struct sock *sk, u32 seq, u8 *rcd_sn)
678 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
679 struct net_device *netdev;
681 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
684 trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
685 netdev = READ_ONCE(tls_ctx->netdev);
687 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
688 TLS_OFFLOAD_CTX_DIR_RX);
689 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
690 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
694 tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
695 s64 resync_req, u32 *seq)
697 u32 is_async = resync_req & RESYNC_REQ_ASYNC;
698 u32 req_seq = resync_req >> 32;
699 u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
702 /* asynchronous stage: log all headers seq such that
703 * req_seq <= seq <= end_seq, and wait for real resync request
705 if (between(*seq, req_seq, req_end) &&
706 resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
707 resync_async->log[resync_async->loglen++] = *seq;
712 /* synchronous stage: check against the logged entries and
713 * proceed to check the next entries if no match was found
715 while (resync_async->loglen) {
716 if (req_seq == resync_async->log[resync_async->loglen - 1] &&
717 atomic64_try_cmpxchg(&resync_async->req,
719 resync_async->loglen = 0;
723 resync_async->loglen--;
726 if (req_seq == *seq &&
727 atomic64_try_cmpxchg(&resync_async->req,
734 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
736 struct tls_context *tls_ctx = tls_get_ctx(sk);
737 struct tls_offload_context_rx *rx_ctx;
738 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
739 u32 sock_data, is_req_pending;
740 struct tls_prot_info *prot;
744 if (tls_ctx->rx_conf != TLS_HW)
747 prot = &tls_ctx->prot_info;
748 rx_ctx = tls_offload_ctx_rx(tls_ctx);
749 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
751 switch (rx_ctx->resync_type) {
752 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
753 resync_req = atomic64_read(&rx_ctx->resync_req);
754 req_seq = resync_req >> 32;
755 seq += TLS_HEADER_SIZE - 1;
756 is_req_pending = resync_req;
758 if (likely(!is_req_pending) || req_seq != seq ||
759 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
762 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
763 if (likely(!rx_ctx->resync_nh_do_now))
766 /* head of next rec is already in, note that the sock_inq will
767 * include the currently parsed message when called from parser
769 sock_data = tcp_inq(sk);
770 if (sock_data > rcd_len) {
771 trace_tls_device_rx_resync_nh_delay(sk, sock_data,
776 rx_ctx->resync_nh_do_now = 0;
778 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
780 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
781 resync_req = atomic64_read(&rx_ctx->resync_async->req);
782 is_req_pending = resync_req;
783 if (likely(!is_req_pending))
786 if (!tls_device_rx_resync_async(rx_ctx->resync_async,
792 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
795 static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
796 struct tls_offload_context_rx *ctx,
797 struct sock *sk, struct sk_buff *skb)
799 struct strp_msg *rxm;
801 /* device will request resyncs by itself based on stream scan */
802 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
804 /* already scheduled */
805 if (ctx->resync_nh_do_now)
807 /* seen decrypted fragments since last fully-failed record */
808 if (ctx->resync_nh_reset) {
809 ctx->resync_nh_reset = 0;
810 ctx->resync_nh.decrypted_failed = 1;
811 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
815 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
818 /* doing resync, bump the next target in case it fails */
819 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
820 ctx->resync_nh.decrypted_tgt *= 2;
822 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
826 /* head of next rec is already in, parser will sync for us */
827 if (tcp_inq(sk) > rxm->full_len) {
828 trace_tls_device_rx_resync_nh_schedule(sk);
829 ctx->resync_nh_do_now = 1;
831 struct tls_prot_info *prot = &tls_ctx->prot_info;
832 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
834 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
835 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
837 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
842 static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
844 struct strp_msg *rxm = strp_msg(skb);
845 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
846 struct sk_buff *skb_iter, *unused;
847 struct scatterlist sg[1];
848 char *orig_buf, *buf;
850 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
851 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
856 nsg = skb_cow_data(skb, 0, &unused);
857 if (unlikely(nsg < 0)) {
862 sg_init_table(sg, 1);
863 sg_set_buf(&sg[0], buf,
864 rxm->full_len + TLS_HEADER_SIZE +
865 TLS_CIPHER_AES_GCM_128_IV_SIZE);
866 err = skb_copy_bits(skb, offset, buf,
867 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
871 /* We are interested only in the decrypted data not the auth */
872 err = decrypt_skb(sk, skb, sg);
878 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
880 if (skb_pagelen(skb) > offset) {
881 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
883 if (skb->decrypted) {
884 err = skb_store_bits(skb, offset, buf, copy);
893 pos = skb_pagelen(skb);
894 skb_walk_frags(skb, skb_iter) {
897 /* Practically all frags must belong to msg if reencrypt
898 * is needed with current strparser and coalescing logic,
899 * but strparser may "get optimized", so let's be safe.
901 if (pos + skb_iter->len <= offset)
903 if (pos >= data_len + rxm->offset)
906 frag_pos = offset - pos;
907 copy = min_t(int, skb_iter->len - frag_pos,
908 data_len + rxm->offset - offset);
910 if (skb_iter->decrypted) {
911 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
919 pos += skb_iter->len;
927 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
928 struct sk_buff *skb, struct strp_msg *rxm)
930 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
931 int is_decrypted = skb->decrypted;
932 int is_encrypted = !is_decrypted;
933 struct sk_buff *skb_iter;
935 /* Check if all the data is decrypted already */
936 skb_walk_frags(skb, skb_iter) {
937 is_decrypted &= skb_iter->decrypted;
938 is_encrypted &= !skb_iter->decrypted;
941 trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
942 tls_ctx->rx.rec_seq, rxm->full_len,
943 is_encrypted, is_decrypted);
945 ctx->sw.decrypted |= is_decrypted;
947 /* Return immediately if the record is either entirely plaintext or
948 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
952 ctx->resync_nh_reset = 1;
956 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
960 ctx->resync_nh_reset = 1;
961 return tls_device_reencrypt(sk, skb);
964 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
965 struct net_device *netdev)
967 if (sk->sk_destruct != tls_device_sk_destruct) {
968 refcount_set(&ctx->refcount, 1);
970 ctx->netdev = netdev;
971 spin_lock_irq(&tls_device_lock);
972 list_add_tail(&ctx->list, &tls_device_list);
973 spin_unlock_irq(&tls_device_lock);
975 ctx->sk_destruct = sk->sk_destruct;
976 smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
980 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
982 u16 nonce_size, tag_size, iv_size, rec_seq_size;
983 struct tls_context *tls_ctx = tls_get_ctx(sk);
984 struct tls_prot_info *prot = &tls_ctx->prot_info;
985 struct tls_record_info *start_marker_record;
986 struct tls_offload_context_tx *offload_ctx;
987 struct tls_crypto_info *crypto_info;
988 struct net_device *netdev;
997 if (ctx->priv_ctx_tx)
1000 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1001 if (!start_marker_record)
1004 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
1007 goto free_marker_record;
1010 crypto_info = &ctx->crypto_send.info;
1011 if (crypto_info->version != TLS_1_2_VERSION) {
1013 goto free_offload_ctx;
1016 switch (crypto_info->cipher_type) {
1017 case TLS_CIPHER_AES_GCM_128:
1018 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1019 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1020 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1021 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1022 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1024 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1028 goto free_offload_ctx;
1031 /* Sanity-check the rec_seq_size for stack allocations */
1032 if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
1034 goto free_offload_ctx;
1037 prot->version = crypto_info->version;
1038 prot->cipher_type = crypto_info->cipher_type;
1039 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
1040 prot->tag_size = tag_size;
1041 prot->overhead_size = prot->prepend_size + prot->tag_size;
1042 prot->iv_size = iv_size;
1043 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1047 goto free_offload_ctx;
1050 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1052 prot->rec_seq_size = rec_seq_size;
1053 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
1054 if (!ctx->tx.rec_seq) {
1059 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1063 /* start at rec_seq - 1 to account for the start marker record */
1064 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1065 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1067 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1068 start_marker_record->len = 0;
1069 start_marker_record->num_frags = 0;
1071 INIT_LIST_HEAD(&offload_ctx->records_list);
1072 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1073 spin_lock_init(&offload_ctx->lock);
1074 sg_init_table(offload_ctx->sg_tx_data,
1075 ARRAY_SIZE(offload_ctx->sg_tx_data));
1077 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1078 ctx->push_pending_record = tls_device_push_pending_record;
1080 /* TLS offload is greatly simplified if we don't send
1081 * SKBs where only part of the payload needs to be encrypted.
1082 * So mark the last skb in the write queue as end of record.
1084 skb = tcp_write_queue_tail(sk);
1086 TCP_SKB_CB(skb)->eor = 1;
1088 netdev = get_netdev_for_sock(sk);
1090 pr_err_ratelimited("%s: netdev not found\n", __func__);
1095 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1097 goto release_netdev;
1100 /* Avoid offloading if the device is down
1101 * We don't want to offload new flows after
1102 * the NETDEV_DOWN event
1104 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1105 * handler thus protecting from the device going down before
1106 * ctx was added to tls_device_list.
1108 down_read(&device_offload_lock);
1109 if (!(netdev->flags & IFF_UP)) {
1114 ctx->priv_ctx_tx = offload_ctx;
1115 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1116 &ctx->crypto_send.info,
1117 tcp_sk(sk)->write_seq);
1118 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1119 tcp_sk(sk)->write_seq, rec_seq, rc);
1123 tls_device_attach(ctx, sk, netdev);
1124 up_read(&device_offload_lock);
1126 /* following this assignment tls_is_sk_tx_device_offloaded
1127 * will return true and the context might be accessed
1128 * by the netdev's xmit function.
1130 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1136 up_read(&device_offload_lock);
1140 clean_acked_data_disable(inet_csk(sk));
1141 crypto_free_aead(offload_ctx->aead_send);
1143 kfree(ctx->tx.rec_seq);
1148 ctx->priv_ctx_tx = NULL;
1150 kfree(start_marker_record);
1154 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1156 struct tls12_crypto_info_aes_gcm_128 *info;
1157 struct tls_offload_context_rx *context;
1158 struct net_device *netdev;
1161 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1164 netdev = get_netdev_for_sock(sk);
1166 pr_err_ratelimited("%s: netdev not found\n", __func__);
1170 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1172 goto release_netdev;
1175 /* Avoid offloading if the device is down
1176 * We don't want to offload new flows after
1177 * the NETDEV_DOWN event
1179 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1180 * handler thus protecting from the device going down before
1181 * ctx was added to tls_device_list.
1183 down_read(&device_offload_lock);
1184 if (!(netdev->flags & IFF_UP)) {
1189 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1194 context->resync_nh_reset = 1;
1196 ctx->priv_ctx_rx = context;
1197 rc = tls_set_sw_offload(sk, ctx, 0);
1201 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1202 &ctx->crypto_recv.info,
1203 tcp_sk(sk)->copied_seq);
1204 info = (void *)&ctx->crypto_recv.info;
1205 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1206 tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1208 goto free_sw_resources;
1210 tls_device_attach(ctx, sk, netdev);
1211 up_read(&device_offload_lock);
1218 up_read(&device_offload_lock);
1219 tls_sw_free_resources_rx(sk);
1220 down_read(&device_offload_lock);
1222 ctx->priv_ctx_rx = NULL;
1224 up_read(&device_offload_lock);
1230 void tls_device_offload_cleanup_rx(struct sock *sk)
1232 struct tls_context *tls_ctx = tls_get_ctx(sk);
1233 struct net_device *netdev;
1235 down_read(&device_offload_lock);
1236 netdev = tls_ctx->netdev;
1240 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1241 TLS_OFFLOAD_CTX_DIR_RX);
1243 if (tls_ctx->tx_conf != TLS_HW) {
1245 tls_ctx->netdev = NULL;
1248 up_read(&device_offload_lock);
1249 tls_sw_release_resources_rx(sk);
1252 static int tls_device_down(struct net_device *netdev)
1254 struct tls_context *ctx, *tmp;
1255 unsigned long flags;
1258 /* Request a write lock to block new offload attempts */
1259 down_write(&device_offload_lock);
1261 spin_lock_irqsave(&tls_device_lock, flags);
1262 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1263 if (ctx->netdev != netdev ||
1264 !refcount_inc_not_zero(&ctx->refcount))
1267 list_move(&ctx->list, &list);
1269 spin_unlock_irqrestore(&tls_device_lock, flags);
1271 list_for_each_entry_safe(ctx, tmp, &list, list) {
1272 if (ctx->tx_conf == TLS_HW)
1273 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1274 TLS_OFFLOAD_CTX_DIR_TX);
1275 if (ctx->rx_conf == TLS_HW)
1276 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1277 TLS_OFFLOAD_CTX_DIR_RX);
1278 WRITE_ONCE(ctx->netdev, NULL);
1279 smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
1280 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
1281 usleep_range(10, 200);
1283 list_del_init(&ctx->list);
1285 if (refcount_dec_and_test(&ctx->refcount))
1286 tls_device_free_ctx(ctx);
1289 up_write(&device_offload_lock);
1291 flush_work(&tls_device_gc_work);
1296 static int tls_dev_event(struct notifier_block *this, unsigned long event,
1299 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1301 if (!dev->tlsdev_ops &&
1302 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1306 case NETDEV_REGISTER:
1307 case NETDEV_FEAT_CHANGE:
1308 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1309 !dev->tlsdev_ops->tls_dev_resync)
1312 if (dev->tlsdev_ops &&
1313 dev->tlsdev_ops->tls_dev_add &&
1314 dev->tlsdev_ops->tls_dev_del)
1319 return tls_device_down(dev);
1324 static struct notifier_block tls_dev_notifier = {
1325 .notifier_call = tls_dev_event,
1328 void __init tls_device_init(void)
1330 register_netdevice_notifier(&tls_dev_notifier);
1333 void __exit tls_device_cleanup(void)
1335 unregister_netdevice_notifier(&tls_dev_notifier);
1336 flush_work(&tls_device_gc_work);
1337 clean_acked_data_flush();