1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
44 /* device_offload_lock is used to synchronize tls_dev_add
45 * against NETDEV_DOWN notifications.
47 static DECLARE_RWSEM(device_offload_lock);
49 static void tls_device_gc_task(struct work_struct *work);
51 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
52 static LIST_HEAD(tls_device_gc_list);
53 static LIST_HEAD(tls_device_list);
54 static LIST_HEAD(tls_device_down_list);
55 static DEFINE_SPINLOCK(tls_device_lock);
57 static void tls_device_free_ctx(struct tls_context *ctx)
59 if (ctx->tx_conf == TLS_HW) {
60 kfree(tls_offload_ctx_tx(ctx));
61 kfree(ctx->tx.rec_seq);
65 if (ctx->rx_conf == TLS_HW)
66 kfree(tls_offload_ctx_rx(ctx));
68 tls_ctx_free(NULL, ctx);
71 static void tls_device_gc_task(struct work_struct *work)
73 struct tls_context *ctx, *tmp;
77 spin_lock_irqsave(&tls_device_lock, flags);
78 list_splice_init(&tls_device_gc_list, &gc_list);
79 spin_unlock_irqrestore(&tls_device_lock, flags);
81 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
82 struct net_device *netdev = ctx->netdev;
84 if (netdev && ctx->tx_conf == TLS_HW) {
85 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
86 TLS_OFFLOAD_CTX_DIR_TX);
92 tls_device_free_ctx(ctx);
96 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
100 spin_lock_irqsave(&tls_device_lock, flags);
101 if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
104 list_move_tail(&ctx->list, &tls_device_gc_list);
106 /* schedule_work inside the spinlock
107 * to make sure tls_device_down waits for that work.
109 schedule_work(&tls_device_gc_work);
111 spin_unlock_irqrestore(&tls_device_lock, flags);
114 /* We assume that the socket is already connected */
115 static struct net_device *get_netdev_for_sock(struct sock *sk)
117 struct dst_entry *dst = sk_dst_get(sk);
118 struct net_device *netdev = NULL;
121 netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
130 static void destroy_record(struct tls_record_info *record)
134 for (i = 0; i < record->num_frags; i++)
135 __skb_frag_unref(&record->frags[i], false);
139 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
141 struct tls_record_info *info, *temp;
143 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
144 list_del(&info->list);
145 destroy_record(info);
148 offload_ctx->retransmit_hint = NULL;
151 static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
153 struct tls_context *tls_ctx = tls_get_ctx(sk);
154 struct tls_record_info *info, *temp;
155 struct tls_offload_context_tx *ctx;
156 u64 deleted_records = 0;
162 ctx = tls_offload_ctx_tx(tls_ctx);
164 spin_lock_irqsave(&ctx->lock, flags);
165 info = ctx->retransmit_hint;
166 if (info && !before(acked_seq, info->end_seq))
167 ctx->retransmit_hint = NULL;
169 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
170 if (before(acked_seq, info->end_seq))
172 list_del(&info->list);
174 destroy_record(info);
178 ctx->unacked_record_sn += deleted_records;
179 spin_unlock_irqrestore(&ctx->lock, flags);
182 /* At this point, there should be no references on this
183 * socket and no in-flight SKBs associated with this
184 * socket, so it is safe to free all the resources.
186 void tls_device_sk_destruct(struct sock *sk)
188 struct tls_context *tls_ctx = tls_get_ctx(sk);
189 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
191 tls_ctx->sk_destruct(sk);
193 if (tls_ctx->tx_conf == TLS_HW) {
194 if (ctx->open_record)
195 destroy_record(ctx->open_record);
196 delete_all_records(ctx);
197 crypto_free_aead(ctx->aead_send);
198 clean_acked_data_disable(inet_csk(sk));
201 tls_device_queue_ctx_destruction(tls_ctx);
203 EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
205 void tls_device_free_resources_tx(struct sock *sk)
207 struct tls_context *tls_ctx = tls_get_ctx(sk);
209 tls_free_partial_record(sk, tls_ctx);
212 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
214 struct tls_context *tls_ctx = tls_get_ctx(sk);
216 trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
217 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
219 EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
221 static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
224 struct net_device *netdev;
229 skb = tcp_write_queue_tail(sk);
231 TCP_SKB_CB(skb)->eor = 1;
233 rcd_sn = tls_ctx->tx.rec_seq;
235 trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
236 down_read(&device_offload_lock);
237 netdev = tls_ctx->netdev;
239 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
241 TLS_OFFLOAD_CTX_DIR_TX);
242 up_read(&device_offload_lock);
246 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
249 static void tls_append_frag(struct tls_record_info *record,
250 struct page_frag *pfrag,
255 frag = &record->frags[record->num_frags - 1];
256 if (skb_frag_page(frag) == pfrag->page &&
257 skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
258 skb_frag_size_add(frag, size);
261 __skb_frag_set_page(frag, pfrag->page);
262 skb_frag_off_set(frag, pfrag->offset);
263 skb_frag_size_set(frag, size);
265 get_page(pfrag->page);
268 pfrag->offset += size;
272 static int tls_push_record(struct sock *sk,
273 struct tls_context *ctx,
274 struct tls_offload_context_tx *offload_ctx,
275 struct tls_record_info *record,
278 struct tls_prot_info *prot = &ctx->prot_info;
279 struct tcp_sock *tp = tcp_sk(sk);
283 record->end_seq = tp->write_seq + record->len;
284 list_add_tail_rcu(&record->list, &offload_ctx->records_list);
285 offload_ctx->open_record = NULL;
287 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
288 tls_device_resync_tx(sk, ctx, tp->write_seq);
290 tls_advance_record_sn(sk, prot, &ctx->tx);
292 for (i = 0; i < record->num_frags; i++) {
293 frag = &record->frags[i];
294 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
295 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
296 skb_frag_size(frag), skb_frag_off(frag));
297 sk_mem_charge(sk, skb_frag_size(frag));
298 get_page(skb_frag_page(frag));
300 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
302 /* all ready, send */
303 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
306 static int tls_device_record_close(struct sock *sk,
307 struct tls_context *ctx,
308 struct tls_record_info *record,
309 struct page_frag *pfrag,
310 unsigned char record_type)
312 struct tls_prot_info *prot = &ctx->prot_info;
316 * device will fill in the tag, we just need to append a placeholder
317 * use socket memory to improve coalescing (re-using a single buffer
318 * increases frag count)
319 * if we can't allocate memory now, steal some back from data
321 if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
322 sk->sk_allocation))) {
324 tls_append_frag(record, pfrag, prot->tag_size);
326 ret = prot->tag_size;
327 if (record->len <= prot->overhead_size)
332 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
333 record->len - prot->overhead_size,
338 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
339 struct page_frag *pfrag,
342 struct tls_record_info *record;
345 record = kmalloc(sizeof(*record), GFP_KERNEL);
349 frag = &record->frags[0];
350 __skb_frag_set_page(frag, pfrag->page);
351 skb_frag_off_set(frag, pfrag->offset);
352 skb_frag_size_set(frag, prepend_size);
354 get_page(pfrag->page);
355 pfrag->offset += prepend_size;
357 record->num_frags = 1;
358 record->len = prepend_size;
359 offload_ctx->open_record = record;
363 static int tls_do_allocation(struct sock *sk,
364 struct tls_offload_context_tx *offload_ctx,
365 struct page_frag *pfrag,
370 if (!offload_ctx->open_record) {
371 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
372 sk->sk_allocation))) {
373 READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
374 sk_stream_moderate_sndbuf(sk);
378 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
382 if (pfrag->size > pfrag->offset)
386 if (!sk_page_frag_refill(sk, pfrag))
392 static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
394 size_t pre_copy, nocache;
396 pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
398 pre_copy = min(pre_copy, bytes);
399 if (copy_from_iter(addr, pre_copy, i) != pre_copy)
405 nocache = round_down(bytes, SMP_CACHE_BYTES);
406 if (copy_from_iter_nocache(addr, nocache, i) != nocache)
411 if (bytes && copy_from_iter(addr, bytes, i) != bytes)
417 union tls_iter_offset {
418 struct iov_iter *msg_iter;
422 static int tls_push_data(struct sock *sk,
423 union tls_iter_offset iter_offset,
424 size_t size, int flags,
425 unsigned char record_type,
426 struct page *zc_page)
428 struct tls_context *tls_ctx = tls_get_ctx(sk);
429 struct tls_prot_info *prot = &tls_ctx->prot_info;
430 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
431 struct tls_record_info *record;
432 int tls_push_record_flags;
433 struct page_frag *pfrag;
434 size_t orig_size = size;
435 u32 max_open_record_len;
442 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
445 if (unlikely(sk->sk_err))
448 flags |= MSG_SENDPAGE_DECRYPTED;
449 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
451 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
452 if (tls_is_partially_sent_record(tls_ctx)) {
453 rc = tls_push_partial_record(sk, tls_ctx, flags);
458 pfrag = sk_page_frag(sk);
460 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
461 * we need to leave room for an authentication tag.
463 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
466 rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
468 rc = sk_stream_wait_memory(sk, &timeo);
472 record = ctx->open_record;
476 if (record_type != TLS_RECORD_TYPE_DATA) {
477 /* avoid sending partial
478 * record with type !=
482 destroy_record(record);
483 ctx->open_record = NULL;
484 } else if (record->len > prot->prepend_size) {
491 record = ctx->open_record;
493 copy = min_t(size_t, size, max_open_record_len - record->len);
494 if (copy && zc_page) {
495 struct page_frag zc_pfrag;
497 zc_pfrag.page = zc_page;
498 zc_pfrag.offset = iter_offset.offset;
499 zc_pfrag.size = copy;
500 tls_append_frag(record, &zc_pfrag, copy);
502 copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
504 rc = tls_device_copy_data(page_address(pfrag->page) +
506 iter_offset.msg_iter);
509 tls_append_frag(record, pfrag, copy);
515 tls_push_record_flags = flags;
516 if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
524 if (done || record->len >= max_open_record_len ||
525 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
526 rc = tls_device_record_close(sk, tls_ctx, record,
533 destroy_record(record);
534 ctx->open_record = NULL;
539 rc = tls_push_record(sk,
543 tls_push_record_flags);
549 tls_ctx->pending_open_record_frags = more;
551 if (orig_size - size > 0)
552 rc = orig_size - size;
557 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
559 unsigned char record_type = TLS_RECORD_TYPE_DATA;
560 struct tls_context *tls_ctx = tls_get_ctx(sk);
561 union tls_iter_offset iter;
564 mutex_lock(&tls_ctx->tx_lock);
567 if (unlikely(msg->msg_controllen)) {
568 rc = tls_process_cmsg(sk, msg, &record_type);
573 iter.msg_iter = &msg->msg_iter;
574 rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL);
578 mutex_unlock(&tls_ctx->tx_lock);
582 int tls_device_sendpage(struct sock *sk, struct page *page,
583 int offset, size_t size, int flags)
585 struct tls_context *tls_ctx = tls_get_ctx(sk);
586 union tls_iter_offset iter_offset;
587 struct iov_iter msg_iter;
592 if (flags & MSG_SENDPAGE_NOTLAST)
595 mutex_lock(&tls_ctx->tx_lock);
598 if (flags & MSG_OOB) {
603 if (tls_ctx->zerocopy_sendfile) {
604 iter_offset.offset = offset;
605 rc = tls_push_data(sk, iter_offset, size,
606 flags, TLS_RECORD_TYPE_DATA, page);
611 iov.iov_base = kaddr + offset;
613 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
614 iter_offset.msg_iter = &msg_iter;
615 rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA,
621 mutex_unlock(&tls_ctx->tx_lock);
625 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
626 u32 seq, u64 *p_record_sn)
628 u64 record_sn = context->hint_record_sn;
629 struct tls_record_info *info, *last;
631 info = context->retransmit_hint;
633 before(seq, info->end_seq - info->len)) {
634 /* if retransmit_hint is irrelevant start
635 * from the beginning of the list
637 info = list_first_entry_or_null(&context->records_list,
638 struct tls_record_info, list);
641 /* send the start_marker record if seq number is before the
642 * tls offload start marker sequence number. This record is
643 * required to handle TCP packets which are before TLS offload
645 * And if it's not start marker, look if this seq number
646 * belongs to the list.
648 if (likely(!tls_record_is_start_marker(info))) {
649 /* we have the first record, get the last record to see
650 * if this seq number belongs to the list.
652 last = list_last_entry(&context->records_list,
653 struct tls_record_info, list);
655 if (!between(seq, tls_record_start_seq(info),
659 record_sn = context->unacked_record_sn;
662 /* We just need the _rcu for the READ_ONCE() */
664 list_for_each_entry_from_rcu(info, &context->records_list, list) {
665 if (before(seq, info->end_seq)) {
666 if (!context->retransmit_hint ||
668 context->retransmit_hint->end_seq)) {
669 context->hint_record_sn = record_sn;
670 context->retransmit_hint = info;
672 *p_record_sn = record_sn;
673 goto exit_rcu_unlock;
683 EXPORT_SYMBOL(tls_get_record);
685 static int tls_device_push_pending_record(struct sock *sk, int flags)
687 union tls_iter_offset iter;
688 struct iov_iter msg_iter;
690 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
691 iter.msg_iter = &msg_iter;
692 return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL);
695 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
697 if (tls_is_partially_sent_record(ctx)) {
698 gfp_t sk_allocation = sk->sk_allocation;
700 WARN_ON_ONCE(sk->sk_write_pending);
702 sk->sk_allocation = GFP_ATOMIC;
703 tls_push_partial_record(sk, ctx,
704 MSG_DONTWAIT | MSG_NOSIGNAL |
705 MSG_SENDPAGE_DECRYPTED);
706 sk->sk_allocation = sk_allocation;
710 static void tls_device_resync_rx(struct tls_context *tls_ctx,
711 struct sock *sk, u32 seq, u8 *rcd_sn)
713 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
714 struct net_device *netdev;
716 trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
718 netdev = READ_ONCE(tls_ctx->netdev);
720 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
721 TLS_OFFLOAD_CTX_DIR_RX);
723 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
727 tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
728 s64 resync_req, u32 *seq, u16 *rcd_delta)
730 u32 is_async = resync_req & RESYNC_REQ_ASYNC;
731 u32 req_seq = resync_req >> 32;
732 u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
738 /* shouldn't get to wraparound:
739 * too long in async stage, something bad happened
741 if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
744 /* asynchronous stage: log all headers seq such that
745 * req_seq <= seq <= end_seq, and wait for real resync request
747 if (before(*seq, req_seq))
749 if (!after(*seq, req_end) &&
750 resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
751 resync_async->log[resync_async->loglen++] = *seq;
753 resync_async->rcd_delta++;
758 /* synchronous stage: check against the logged entries and
759 * proceed to check the next entries if no match was found
761 for (i = 0; i < resync_async->loglen; i++)
762 if (req_seq == resync_async->log[i] &&
763 atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
764 *rcd_delta = resync_async->rcd_delta - i;
766 resync_async->loglen = 0;
767 resync_async->rcd_delta = 0;
771 resync_async->loglen = 0;
772 resync_async->rcd_delta = 0;
774 if (req_seq == *seq &&
775 atomic64_try_cmpxchg(&resync_async->req,
782 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
784 struct tls_context *tls_ctx = tls_get_ctx(sk);
785 struct tls_offload_context_rx *rx_ctx;
786 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
787 u32 sock_data, is_req_pending;
788 struct tls_prot_info *prot;
793 if (tls_ctx->rx_conf != TLS_HW)
795 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
798 prot = &tls_ctx->prot_info;
799 rx_ctx = tls_offload_ctx_rx(tls_ctx);
800 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
802 switch (rx_ctx->resync_type) {
803 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
804 resync_req = atomic64_read(&rx_ctx->resync_req);
805 req_seq = resync_req >> 32;
806 seq += TLS_HEADER_SIZE - 1;
807 is_req_pending = resync_req;
809 if (likely(!is_req_pending) || req_seq != seq ||
810 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
813 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
814 if (likely(!rx_ctx->resync_nh_do_now))
817 /* head of next rec is already in, note that the sock_inq will
818 * include the currently parsed message when called from parser
820 sock_data = tcp_inq(sk);
821 if (sock_data > rcd_len) {
822 trace_tls_device_rx_resync_nh_delay(sk, sock_data,
827 rx_ctx->resync_nh_do_now = 0;
829 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
831 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
832 resync_req = atomic64_read(&rx_ctx->resync_async->req);
833 is_req_pending = resync_req;
834 if (likely(!is_req_pending))
837 if (!tls_device_rx_resync_async(rx_ctx->resync_async,
838 resync_req, &seq, &rcd_delta))
840 tls_bigint_subtract(rcd_sn, rcd_delta);
844 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
847 static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
848 struct tls_offload_context_rx *ctx,
849 struct sock *sk, struct sk_buff *skb)
851 struct strp_msg *rxm;
853 /* device will request resyncs by itself based on stream scan */
854 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
856 /* already scheduled */
857 if (ctx->resync_nh_do_now)
859 /* seen decrypted fragments since last fully-failed record */
860 if (ctx->resync_nh_reset) {
861 ctx->resync_nh_reset = 0;
862 ctx->resync_nh.decrypted_failed = 1;
863 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
867 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
870 /* doing resync, bump the next target in case it fails */
871 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
872 ctx->resync_nh.decrypted_tgt *= 2;
874 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
878 /* head of next rec is already in, parser will sync for us */
879 if (tcp_inq(sk) > rxm->full_len) {
880 trace_tls_device_rx_resync_nh_schedule(sk);
881 ctx->resync_nh_do_now = 1;
883 struct tls_prot_info *prot = &tls_ctx->prot_info;
884 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
886 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
887 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
889 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
895 tls_device_reencrypt(struct sock *sk, struct tls_sw_context_rx *sw_ctx)
897 int err, offset, copy, data_len, pos;
898 struct sk_buff *skb, *skb_iter;
899 struct scatterlist sg[1];
900 struct strp_msg *rxm;
901 char *orig_buf, *buf;
903 rxm = strp_msg(tls_strp_msg(sw_ctx));
904 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
905 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
910 err = tls_strp_msg_cow(sw_ctx);
914 skb = tls_strp_msg(sw_ctx);
916 offset = rxm->offset;
918 sg_init_table(sg, 1);
919 sg_set_buf(&sg[0], buf,
920 rxm->full_len + TLS_HEADER_SIZE +
921 TLS_CIPHER_AES_GCM_128_IV_SIZE);
922 err = skb_copy_bits(skb, offset, buf,
923 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
927 /* We are interested only in the decrypted data not the auth */
928 err = decrypt_skb(sk, sg);
934 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
936 if (skb_pagelen(skb) > offset) {
937 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
939 if (skb->decrypted) {
940 err = skb_store_bits(skb, offset, buf, copy);
949 pos = skb_pagelen(skb);
950 skb_walk_frags(skb, skb_iter) {
953 /* Practically all frags must belong to msg if reencrypt
954 * is needed with current strparser and coalescing logic,
955 * but strparser may "get optimized", so let's be safe.
957 if (pos + skb_iter->len <= offset)
959 if (pos >= data_len + rxm->offset)
962 frag_pos = offset - pos;
963 copy = min_t(int, skb_iter->len - frag_pos,
964 data_len + rxm->offset - offset);
966 if (skb_iter->decrypted) {
967 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
975 pos += skb_iter->len;
983 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
985 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
986 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
987 struct sk_buff *skb = tls_strp_msg(sw_ctx);
988 struct strp_msg *rxm = strp_msg(skb);
989 int is_decrypted = skb->decrypted;
990 int is_encrypted = !is_decrypted;
991 struct sk_buff *skb_iter;
993 /* Check if all the data is decrypted already */
994 skb_walk_frags(skb, skb_iter) {
995 is_decrypted &= skb_iter->decrypted;
996 is_encrypted &= !skb_iter->decrypted;
999 trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
1000 tls_ctx->rx.rec_seq, rxm->full_len,
1001 is_encrypted, is_decrypted);
1003 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
1004 if (likely(is_encrypted || is_decrypted))
1005 return is_decrypted;
1007 /* After tls_device_down disables the offload, the next SKB will
1008 * likely have initial fragments decrypted, and final ones not
1009 * decrypted. We need to reencrypt that single SKB.
1011 return tls_device_reencrypt(sk, sw_ctx);
1014 /* Return immediately if the record is either entirely plaintext or
1015 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
1019 ctx->resync_nh_reset = 1;
1020 return is_decrypted;
1023 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1027 ctx->resync_nh_reset = 1;
1028 return tls_device_reencrypt(sk, sw_ctx);
1031 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1032 struct net_device *netdev)
1034 if (sk->sk_destruct != tls_device_sk_destruct) {
1035 refcount_set(&ctx->refcount, 1);
1037 ctx->netdev = netdev;
1038 spin_lock_irq(&tls_device_lock);
1039 list_add_tail(&ctx->list, &tls_device_list);
1040 spin_unlock_irq(&tls_device_lock);
1042 ctx->sk_destruct = sk->sk_destruct;
1043 smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1047 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
1049 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
1050 struct tls_context *tls_ctx = tls_get_ctx(sk);
1051 struct tls_prot_info *prot = &tls_ctx->prot_info;
1052 struct tls_record_info *start_marker_record;
1053 struct tls_offload_context_tx *offload_ctx;
1054 struct tls_crypto_info *crypto_info;
1055 struct net_device *netdev;
1057 struct sk_buff *skb;
1064 if (ctx->priv_ctx_tx)
1067 netdev = get_netdev_for_sock(sk);
1069 pr_err_ratelimited("%s: netdev not found\n", __func__);
1073 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1075 goto release_netdev;
1078 crypto_info = &ctx->crypto_send.info;
1079 if (crypto_info->version != TLS_1_2_VERSION) {
1081 goto release_netdev;
1084 switch (crypto_info->cipher_type) {
1085 case TLS_CIPHER_AES_GCM_128:
1086 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1087 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1088 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1089 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1090 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1091 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
1093 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1097 goto release_netdev;
1100 /* Sanity-check the rec_seq_size for stack allocations */
1101 if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
1103 goto release_netdev;
1106 prot->version = crypto_info->version;
1107 prot->cipher_type = crypto_info->cipher_type;
1108 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
1109 prot->tag_size = tag_size;
1110 prot->overhead_size = prot->prepend_size + prot->tag_size;
1111 prot->iv_size = iv_size;
1112 prot->salt_size = salt_size;
1113 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1117 goto release_netdev;
1120 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1122 prot->rec_seq_size = rec_seq_size;
1123 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
1124 if (!ctx->tx.rec_seq) {
1129 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1130 if (!start_marker_record) {
1135 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
1138 goto free_marker_record;
1141 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1143 goto free_offload_ctx;
1145 /* start at rec_seq - 1 to account for the start marker record */
1146 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1147 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1149 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1150 start_marker_record->len = 0;
1151 start_marker_record->num_frags = 0;
1153 INIT_LIST_HEAD(&offload_ctx->records_list);
1154 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1155 spin_lock_init(&offload_ctx->lock);
1156 sg_init_table(offload_ctx->sg_tx_data,
1157 ARRAY_SIZE(offload_ctx->sg_tx_data));
1159 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1160 ctx->push_pending_record = tls_device_push_pending_record;
1162 /* TLS offload is greatly simplified if we don't send
1163 * SKBs where only part of the payload needs to be encrypted.
1164 * So mark the last skb in the write queue as end of record.
1166 skb = tcp_write_queue_tail(sk);
1168 TCP_SKB_CB(skb)->eor = 1;
1170 /* Avoid offloading if the device is down
1171 * We don't want to offload new flows after
1172 * the NETDEV_DOWN event
1174 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1175 * handler thus protecting from the device going down before
1176 * ctx was added to tls_device_list.
1178 down_read(&device_offload_lock);
1179 if (!(netdev->flags & IFF_UP)) {
1184 ctx->priv_ctx_tx = offload_ctx;
1185 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1186 &ctx->crypto_send.info,
1187 tcp_sk(sk)->write_seq);
1188 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1189 tcp_sk(sk)->write_seq, rec_seq, rc);
1193 tls_device_attach(ctx, sk, netdev);
1194 up_read(&device_offload_lock);
1196 /* following this assignment tls_is_sk_tx_device_offloaded
1197 * will return true and the context might be accessed
1198 * by the netdev's xmit function.
1200 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1206 up_read(&device_offload_lock);
1207 clean_acked_data_disable(inet_csk(sk));
1208 crypto_free_aead(offload_ctx->aead_send);
1211 ctx->priv_ctx_tx = NULL;
1213 kfree(start_marker_record);
1215 kfree(ctx->tx.rec_seq);
1223 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1225 struct tls12_crypto_info_aes_gcm_128 *info;
1226 struct tls_offload_context_rx *context;
1227 struct net_device *netdev;
1230 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1233 netdev = get_netdev_for_sock(sk);
1235 pr_err_ratelimited("%s: netdev not found\n", __func__);
1239 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1241 goto release_netdev;
1244 /* Avoid offloading if the device is down
1245 * We don't want to offload new flows after
1246 * the NETDEV_DOWN event
1248 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1249 * handler thus protecting from the device going down before
1250 * ctx was added to tls_device_list.
1252 down_read(&device_offload_lock);
1253 if (!(netdev->flags & IFF_UP)) {
1258 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1263 context->resync_nh_reset = 1;
1265 ctx->priv_ctx_rx = context;
1266 rc = tls_set_sw_offload(sk, ctx, 0);
1270 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1271 &ctx->crypto_recv.info,
1272 tcp_sk(sk)->copied_seq);
1273 info = (void *)&ctx->crypto_recv.info;
1274 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1275 tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1277 goto free_sw_resources;
1279 tls_device_attach(ctx, sk, netdev);
1280 up_read(&device_offload_lock);
1287 up_read(&device_offload_lock);
1288 tls_sw_free_resources_rx(sk);
1289 down_read(&device_offload_lock);
1291 ctx->priv_ctx_rx = NULL;
1293 up_read(&device_offload_lock);
1299 void tls_device_offload_cleanup_rx(struct sock *sk)
1301 struct tls_context *tls_ctx = tls_get_ctx(sk);
1302 struct net_device *netdev;
1304 down_read(&device_offload_lock);
1305 netdev = tls_ctx->netdev;
1309 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1310 TLS_OFFLOAD_CTX_DIR_RX);
1312 if (tls_ctx->tx_conf != TLS_HW) {
1314 tls_ctx->netdev = NULL;
1316 set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1319 up_read(&device_offload_lock);
1320 tls_sw_release_resources_rx(sk);
1323 static int tls_device_down(struct net_device *netdev)
1325 struct tls_context *ctx, *tmp;
1326 unsigned long flags;
1329 /* Request a write lock to block new offload attempts */
1330 down_write(&device_offload_lock);
1332 spin_lock_irqsave(&tls_device_lock, flags);
1333 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1334 if (ctx->netdev != netdev ||
1335 !refcount_inc_not_zero(&ctx->refcount))
1338 list_move(&ctx->list, &list);
1340 spin_unlock_irqrestore(&tls_device_lock, flags);
1342 list_for_each_entry_safe(ctx, tmp, &list, list) {
1343 /* Stop offloaded TX and switch to the fallback.
1344 * tls_is_sk_tx_device_offloaded will return false.
1346 WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1348 /* Stop the RX and TX resync.
1349 * tls_dev_resync must not be called after tls_dev_del.
1351 WRITE_ONCE(ctx->netdev, NULL);
1353 /* Start skipping the RX resync logic completely. */
1354 set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1356 /* Sync with inflight packets. After this point:
1357 * TX: no non-encrypted packets will be passed to the driver.
1358 * RX: resync requests from the driver will be ignored.
1362 /* Release the offload context on the driver side. */
1363 if (ctx->tx_conf == TLS_HW)
1364 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1365 TLS_OFFLOAD_CTX_DIR_TX);
1366 if (ctx->rx_conf == TLS_HW &&
1367 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1368 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1369 TLS_OFFLOAD_CTX_DIR_RX);
1373 /* Move the context to a separate list for two reasons:
1374 * 1. When the context is deallocated, list_del is called.
1375 * 2. It's no longer an offloaded context, so we don't want to
1376 * run offload-specific code on this context.
1378 spin_lock_irqsave(&tls_device_lock, flags);
1379 list_move_tail(&ctx->list, &tls_device_down_list);
1380 spin_unlock_irqrestore(&tls_device_lock, flags);
1382 /* Device contexts for RX and TX will be freed in on sk_destruct
1383 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1384 * Now release the ref taken above.
1386 if (refcount_dec_and_test(&ctx->refcount))
1387 tls_device_free_ctx(ctx);
1390 up_write(&device_offload_lock);
1392 flush_work(&tls_device_gc_work);
1397 static int tls_dev_event(struct notifier_block *this, unsigned long event,
1400 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1402 if (!dev->tlsdev_ops &&
1403 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1407 case NETDEV_REGISTER:
1408 case NETDEV_FEAT_CHANGE:
1409 if (netif_is_bond_master(dev))
1411 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1412 !dev->tlsdev_ops->tls_dev_resync)
1415 if (dev->tlsdev_ops &&
1416 dev->tlsdev_ops->tls_dev_add &&
1417 dev->tlsdev_ops->tls_dev_del)
1422 return tls_device_down(dev);
1427 static struct notifier_block tls_dev_notifier = {
1428 .notifier_call = tls_dev_event,
1431 int __init tls_device_init(void)
1433 return register_netdevice_notifier(&tls_dev_notifier);
1436 void __exit tls_device_cleanup(void)
1438 unregister_netdevice_notifier(&tls_dev_notifier);
1439 flush_work(&tls_device_gc_work);
1440 clean_acked_data_flush();