1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/skbuff.h>
7 #include <linux/module.h>
8 #include <linux/highmem.h>
11 #include <linux/netdevice.h>
12 #include <crypto/aes.h>
13 #include "chcr_ktls.h"
15 static LIST_HEAD(uld_ctx_list);
16 static DEFINE_MUTEX(dev_mutex);
18 /* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
20 * @start: start offset.
21 * @len: how much data to send after @start
23 static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
25 struct skb_shared_info *si = skb_shinfo(skb);
26 u32 frag_size, skb_linear_data_len = skb_headlen(skb);
27 u8 nfrags = 0, frag_idx = 0;
30 /* if its a linear skb then return 1 */
31 if (!skb_is_nonlinear(skb))
34 if (unlikely(start < skb_linear_data_len)) {
35 frag_size = min(len, skb_linear_data_len - start);
38 start -= skb_linear_data_len;
40 frag = &si->frags[frag_idx];
41 frag_size = skb_frag_size(frag);
42 while (start >= frag_size) {
45 frag = &si->frags[frag_idx];
46 frag_size = skb_frag_size(frag);
48 frag_size = min(len, skb_frag_size(frag) - start);
54 frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
62 static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
64 * chcr_ktls_save_keys: calculate and save crypto keys.
65 * @tx_info - driver specific tls info.
66 * @crypto_info - tls crypto information.
67 * @direction - TX/RX direction.
68 * return - SUCCESS/FAILURE.
70 static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
71 struct tls_crypto_info *crypto_info,
72 enum tls_offload_ctx_dir direction)
74 int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
75 unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
76 struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
77 struct ktls_key_ctx *kctx = &tx_info->key_ctx;
78 struct crypto_aes_ctx aes_ctx;
79 unsigned char *key, *salt;
81 switch (crypto_info->cipher_type) {
82 case TLS_CIPHER_AES_GCM_128:
84 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
85 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
86 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
87 tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
88 mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
89 tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
90 tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
92 ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
93 key = info_128_gcm->key;
94 salt = info_128_gcm->salt;
95 tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
97 /* The SCMD fields used when encrypting a full TLS
98 * record. Its a one time calculation till the
101 tx_info->scmd0_seqno_numivs =
102 SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
103 SCMD_CIPH_AUTH_SEQ_CTRL_F |
104 SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
105 SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
106 SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
107 SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
110 /* keys will be sent inline. */
111 tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
113 /* The SCMD fields used when encrypting a partial TLS
114 * record (no trailer and possibly a truncated payload).
116 tx_info->scmd0_short_seqno_numivs =
117 SCMD_CIPH_AUTH_SEQ_CTRL_F |
118 SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
119 SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
120 SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
122 tx_info->scmd0_short_ivgen_hdrlen =
123 tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
128 pr_err("GCM: cipher type 0x%x not supported\n",
129 crypto_info->cipher_type);
134 key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
135 roundup(keylen, 16) + ghash_size;
136 /* Calculate the H = CIPH(K, 0 repeated 16 times).
137 * It will go in key context
140 ret = aes_expandkey(&aes_ctx, key, keylen);
144 memset(ghash_h, 0, ghash_size);
145 aes_encrypt(&aes_ctx, ghash_h, ghash_h);
146 memzero_explicit(&aes_ctx, sizeof(aes_ctx));
148 /* fill the Key context */
149 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
150 kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
158 memcpy(kctx->salt, salt, tx_info->salt_size);
159 memcpy(kctx->key, key, keylen);
160 memcpy(kctx->key + keylen, ghash_h, ghash_size);
161 tx_info->key_ctx_len = key_ctx_size;
168 * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
170 * @tx_info - driver specific tls info.
171 * @atid - connection active tid.
172 * return - send success/failure.
174 static int chcr_ktls_act_open_req(struct sock *sk,
175 struct chcr_ktls_info *tx_info,
178 struct inet_sock *inet = inet_sk(sk);
179 struct cpl_t6_act_open_req *cpl6;
180 struct cpl_act_open_req *cpl;
187 skb = alloc_skb(len, GFP_KERNEL);
190 /* mark it a control pkt */
191 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
193 cpl6 = __skb_put_zero(skb, len);
194 cpl = (struct cpl_act_open_req *)cpl6;
196 qid_atid = TID_QID_V(tx_info->rx_qid) |
198 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
199 cpl->local_port = inet->inet_sport;
200 cpl->peer_port = inet->inet_dport;
201 cpl->local_ip = inet->inet_rcv_saddr;
202 cpl->peer_ip = inet->inet_daddr;
204 /* fill first 64 bit option field. */
205 options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
206 SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
207 cpl->opt0 = cpu_to_be64(options);
209 /* next 64 bit option field. */
211 TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
212 cpl->opt2 = htonl(options);
214 return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
217 #if IS_ENABLED(CONFIG_IPV6)
219 * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
221 * @tx_info - driver specific tls info.
222 * @atid - connection active tid.
223 * return - send success/failure.
225 static int chcr_ktls_act_open_req6(struct sock *sk,
226 struct chcr_ktls_info *tx_info,
229 struct inet_sock *inet = inet_sk(sk);
230 struct cpl_t6_act_open_req6 *cpl6;
231 struct cpl_act_open_req6 *cpl;
238 skb = alloc_skb(len, GFP_KERNEL);
241 /* mark it a control pkt */
242 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
244 cpl6 = __skb_put_zero(skb, len);
245 cpl = (struct cpl_act_open_req6 *)cpl6;
247 qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
248 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
249 cpl->local_port = inet->inet_sport;
250 cpl->peer_port = inet->inet_dport;
251 cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
252 cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
253 cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
254 cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
256 /* first 64 bit option field. */
257 options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
258 SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
259 cpl->opt0 = cpu_to_be64(options);
260 /* next 64 bit option field. */
262 TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
263 cpl->opt2 = htonl(options);
265 return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
267 #endif /* #if IS_ENABLED(CONFIG_IPV6) */
270 * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
272 * @tx_info - driver specific tls info.
273 * return: NET_TX_OK/NET_XMIT_DROP
275 static int chcr_setup_connection(struct sock *sk,
276 struct chcr_ktls_info *tx_info)
278 struct tid_info *t = &tx_info->adap->tids;
281 atid = cxgb4_alloc_atid(t, tx_info);
285 tx_info->atid = atid;
287 if (tx_info->ip_family == AF_INET) {
288 ret = chcr_ktls_act_open_req(sk, tx_info, atid);
289 #if IS_ENABLED(CONFIG_IPV6)
291 ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
292 &sk->sk_v6_rcv_saddr,
296 ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
300 /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
301 * success, if any other return type clear atid and return that failure.
304 if (ret == NET_XMIT_CN) {
307 #if IS_ENABLED(CONFIG_IPV6)
308 /* clear clip entry */
309 if (tx_info->ip_family == AF_INET6)
310 cxgb4_clip_release(tx_info->netdev,
312 &sk->sk_v6_rcv_saddr,
315 cxgb4_free_atid(t, atid);
323 * chcr_set_tcb_field: update tcb fields.
324 * @tx_info - driver specific tls info.
326 * @mask - TCB word related mask.
327 * @val - TCB word related value.
328 * @no_reply - set 1 if not looking for TP response.
330 static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
331 u64 mask, u64 val, int no_reply)
333 struct cpl_set_tcb_field *req;
336 skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
340 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
341 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
342 req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
343 NO_REPLY_V(no_reply));
344 req->word_cookie = htons(TCB_WORD_V(word));
345 req->mask = cpu_to_be64(mask);
346 req->val = cpu_to_be64(val);
348 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
349 return cxgb4_ofld_send(tx_info->netdev, skb);
353 * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
354 * @tx_info - driver specific tls info.
355 * return: NET_TX_OK/NET_XMIT_DROP.
357 static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
359 return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
360 TCB_T_STATE_V(TCB_T_STATE_M),
361 CHCR_TCB_STATE_CLOSED, 1);
365 * chcr_ktls_dev_del: call back for tls_dev_del.
366 * Remove the tid and l2t entry and close the connection.
367 * it per connection basis.
368 * @netdev - net device.
369 * @tls_cts - tls context.
370 * @direction - TX/RX crypto direction
372 static void chcr_ktls_dev_del(struct net_device *netdev,
373 struct tls_context *tls_ctx,
374 enum tls_offload_ctx_dir direction)
376 struct chcr_ktls_ofld_ctx_tx *tx_ctx =
377 chcr_get_ktls_tx_context(tls_ctx);
378 struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
379 struct ch_ktls_port_stats_debug *port_stats;
384 /* clear l2t entry */
386 cxgb4_l2t_release(tx_info->l2te);
388 #if IS_ENABLED(CONFIG_IPV6)
389 /* clear clip entry */
390 if (tx_info->ip_family == AF_INET6)
391 cxgb4_clip_release(netdev, (const u32 *)
392 &tx_info->sk->sk_v6_rcv_saddr,
397 if (tx_info->tid != -1) {
398 /* clear tcb state and then release tid */
399 chcr_ktls_mark_tcb_close(tx_info);
400 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
401 tx_info->tid, tx_info->ip_family);
404 port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
405 atomic64_inc(&port_stats->ktls_tx_connection_close);
407 tx_ctx->chcr_info = NULL;
408 /* release module refcount */
409 module_put(THIS_MODULE);
413 * chcr_ktls_dev_add: call back for tls_dev_add.
414 * Create a tcb entry for TP. Also add l2t entry for the connection. And
415 * generate keys & save those keys locally.
416 * @netdev - net device.
417 * @tls_cts - tls context.
418 * @direction - TX/RX crypto direction
419 * return: SUCCESS/FAILURE.
421 static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
422 enum tls_offload_ctx_dir direction,
423 struct tls_crypto_info *crypto_info,
424 u32 start_offload_tcp_sn)
426 struct tls_context *tls_ctx = tls_get_ctx(sk);
427 struct ch_ktls_port_stats_debug *port_stats;
428 struct chcr_ktls_ofld_ctx_tx *tx_ctx;
429 struct chcr_ktls_info *tx_info;
430 struct dst_entry *dst;
431 struct adapter *adap;
432 struct port_info *pi;
437 tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
439 pi = netdev_priv(netdev);
441 port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
442 atomic64_inc(&port_stats->ktls_tx_connection_open);
444 if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
445 pr_err("not expecting for RX direction\n");
449 if (tx_ctx->chcr_info)
452 tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
457 spin_lock_init(&tx_info->lock);
458 /* initialize tid and atid to -1, 0 is a also a valid id. */
462 tx_info->adap = adap;
463 tx_info->netdev = netdev;
464 tx_info->first_qset = pi->first_qset;
465 tx_info->tx_chan = pi->tx_chan;
466 tx_info->smt_idx = pi->smt_idx;
467 tx_info->port_id = pi->port_id;
468 tx_info->prev_ack = 0;
469 tx_info->prev_win = 0;
471 tx_info->rx_qid = chcr_get_first_rx_qid(adap);
472 if (unlikely(tx_info->rx_qid < 0))
475 tx_info->prev_seq = start_offload_tcp_sn;
476 tx_info->tcp_start_seq_number = start_offload_tcp_sn;
478 /* save crypto keys */
479 ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
484 if (sk->sk_family == AF_INET) {
485 memcpy(daaddr, &sk->sk_daddr, 4);
486 tx_info->ip_family = AF_INET;
487 #if IS_ENABLED(CONFIG_IPV6)
489 if (!sk->sk_ipv6only &&
490 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
491 memcpy(daaddr, &sk->sk_daddr, 4);
492 tx_info->ip_family = AF_INET;
494 memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
495 tx_info->ip_family = AF_INET6;
500 /* get the l2t index */
501 dst = sk_dst_get(sk);
503 pr_err("DST entry not found\n");
506 n = dst_neigh_lookup(dst, daaddr);
508 pr_err("neighbour not found\n");
512 tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
517 if (!tx_info->l2te) {
518 pr_err("l2t entry not found\n");
522 /* Driver shouldn't be removed until any single connection exists */
523 if (!try_module_get(THIS_MODULE))
526 init_completion(&tx_info->completion);
527 /* create a filter and call cxgb4_l2t_send to send the packet out, which
528 * will take care of updating l2t entry in hw if not already done.
530 tx_info->open_state = CH_KTLS_OPEN_PENDING;
532 if (chcr_setup_connection(sk, tx_info))
536 wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
537 spin_lock_bh(&tx_info->lock);
538 if (tx_info->open_state) {
539 /* need to wait for hw response, can't free tx_info yet. */
540 if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
541 tx_info->pending_close = true;
543 spin_unlock_bh(&tx_info->lock);
544 /* if in pending close, free the lock after the cleanup */
547 spin_unlock_bh(&tx_info->lock);
550 reinit_completion(&tx_info->completion);
551 /* mark it pending for hw response */
552 tx_info->open_state = CH_KTLS_OPEN_PENDING;
554 if (chcr_init_tcb_fields(tx_info))
558 wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
559 spin_lock_bh(&tx_info->lock);
560 if (tx_info->open_state) {
561 /* need to wait for hw response, can't free tx_info yet. */
562 tx_info->pending_close = true;
563 /* free the lock after cleanup */
566 spin_unlock_bh(&tx_info->lock);
568 if (!cxgb4_check_l2t_valid(tx_info->l2te))
571 atomic64_inc(&port_stats->ktls_tx_ctx);
572 tx_ctx->chcr_info = tx_info;
577 chcr_ktls_mark_tcb_close(tx_info);
578 #if IS_ENABLED(CONFIG_IPV6)
579 /* clear clip entry */
580 if (tx_info->ip_family == AF_INET6)
581 cxgb4_clip_release(netdev, (const u32 *)
582 &sk->sk_v6_rcv_saddr,
585 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
586 tx_info->tid, tx_info->ip_family);
589 /* release module refcount */
590 module_put(THIS_MODULE);
592 cxgb4_l2t_release(tx_info->l2te);
594 if (tx_info->pending_close)
595 spin_unlock_bh(&tx_info->lock);
599 atomic64_inc(&port_stats->ktls_tx_connection_fail);
604 * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
606 * @tx_info - driver specific tls info.
607 * return: NET_TX_OK/NET_XMIT_DROP
609 static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
613 /* set tcb in offload and bypass */
615 chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
616 TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
617 TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
620 /* reset snd_una and snd_next fields in tcb */
621 ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
622 TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
623 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
629 ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
630 TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
635 /* update l2t index and request for tp reply to confirm tcb is
636 * initialised to handle tx traffic.
638 ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
639 TCB_L2T_IX_V(TCB_L2T_IX_M),
640 TCB_L2T_IX_V(tx_info->l2te->idx), 0);
645 * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
647 static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
648 unsigned char *input)
650 const struct cpl_act_open_rpl *p = (void *)input;
651 struct chcr_ktls_info *tx_info = NULL;
652 unsigned int atid, tid, status;
656 status = AOPEN_STATUS_G(ntohl(p->atid_status));
657 atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
660 tx_info = lookup_atid(t, atid);
662 if (!tx_info || tx_info->atid != atid) {
663 pr_err("%s: incorrect tx_info or atid\n", __func__);
667 cxgb4_free_atid(t, atid);
670 spin_lock(&tx_info->lock);
671 /* HW response is very close, finish pending cleanup */
672 if (tx_info->pending_close) {
673 spin_unlock(&tx_info->lock);
675 /* it's a late success, tcb status is established,
678 chcr_ktls_mark_tcb_close(tx_info);
679 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
680 tid, tx_info->ip_family);
688 cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
689 tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
691 tx_info->open_state = CH_KTLS_OPEN_FAILURE;
693 spin_unlock(&tx_info->lock);
695 complete(&tx_info->completion);
700 * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
702 static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
704 const struct cpl_set_tcb_rpl *p = (void *)input;
705 struct chcr_ktls_info *tx_info = NULL;
712 tx_info = lookup_tid(t, tid);
714 if (!tx_info || tx_info->tid != tid) {
715 pr_err("%s: incorrect tx_info or tid\n", __func__);
719 spin_lock(&tx_info->lock);
720 if (tx_info->pending_close) {
721 spin_unlock(&tx_info->lock);
725 tx_info->open_state = false;
726 spin_unlock(&tx_info->lock);
728 complete(&tx_info->completion);
732 static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
733 u32 tid, void *pos, u16 word,
734 struct sge_eth_txq *q, u64 mask,
737 struct cpl_set_tcb_field_core *cpl;
738 struct ulptx_idata *idata;
739 struct ulp_txpkt *txpkt;
743 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
744 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
745 ULP_TXPKT_FID_V(q->q.cntxt_id) |
747 txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
749 /* ULPTX_IDATA sub-command */
750 idata = (struct ulptx_idata *)(txpkt + 1);
751 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
752 idata->len = htonl(sizeof(*cpl));
756 /* CPL_SET_TCB_FIELD */
757 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
758 cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
760 cpl->word_cookie = htons(TCB_WORD_V(word));
761 cpl->mask = cpu_to_be64(mask);
762 cpl->val = cpu_to_be64(val);
765 idata = (struct ulptx_idata *)(cpl + 1);
766 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
767 idata->len = htonl(0);
775 * chcr_write_cpl_set_tcb_ulp: update tcb values.
776 * TCB is responsible to create tcp headers, so all the related values
777 * should be correctly updated.
778 * @tx_info - driver specific tls info.
779 * @q - tx queue on which packet is going out.
780 * @tid - TCB identifier.
781 * @pos - current index where should we start writing.
783 * @mask - TCB word related mask.
784 * @val - TCB word related value.
785 * @reply - set 1 if looking for TP response.
786 * return - next position to write.
788 static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
789 struct sge_eth_txq *q, u32 tid,
790 void *pos, u16 word, u64 mask,
793 int left = (void *)q->q.stat - pos;
795 if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
801 __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
804 return chcr_copy_to_txd(buf, &q->q, pos,
805 CHCR_SET_TCB_FIELD_LEN);
809 pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
812 /* check again if we are at the end of the queue */
813 if (left == CHCR_SET_TCB_FIELD_LEN)
820 * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
821 * with updated values like tcp seq, ack, window etc.
822 * @tx_info - driver specific tls info.
827 * return: NETDEV_TX_BUSY/NET_TX_OK.
829 static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
830 struct sge_eth_txq *q, u64 tcp_seq,
831 u64 tcp_ack, u64 tcp_win, bool offset)
833 bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
834 struct ch_ktls_port_stats_debug *port_stats;
835 u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
836 struct fw_ulptx_wr *wr;
840 wr_len = sizeof(*wr);
841 /* there can be max 4 cpls, check if we have enough credits */
842 len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
843 ndesc = DIV_ROUND_UP(len, 64);
845 credits = chcr_txq_avail(&q->q) - ndesc;
846 if (unlikely(credits < 0)) {
847 chcr_eth_txq_stop(q);
848 return NETDEV_TX_BUSY;
851 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
852 chcr_eth_txq_stop(q);
853 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
856 pos = &q->q.desc[q->q.pidx];
857 /* make space for WR, we'll fill it later when we know all the cpls
858 * being sent out and have complete length.
862 /* update tx_max if its a re-transmit or the first wr */
863 if (first_wr || tcp_seq != tx_info->prev_seq) {
864 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
866 TCB_TX_MAX_V(TCB_TX_MAX_M),
867 TCB_TX_MAX_V(tcp_seq), 0);
870 /* reset snd una if it's a re-transmit pkt */
871 if (tcp_seq != tx_info->prev_seq || offset) {
874 &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
875 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
879 TCB_SND_UNA_RAW_V(0), 0);
880 if (tcp_seq != tx_info->prev_seq)
881 atomic64_inc(&port_stats->ktls_tx_ooo);
885 if (first_wr || tx_info->prev_ack != tcp_ack) {
886 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
888 TCB_RCV_NXT_V(TCB_RCV_NXT_M),
889 TCB_RCV_NXT_V(tcp_ack), 0);
890 tx_info->prev_ack = tcp_ack;
893 /* update receive window */
894 if (first_wr || tx_info->prev_win != tcp_win) {
895 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
897 TCB_RCV_WND_V(TCB_RCV_WND_M),
898 TCB_RCV_WND_V(tcp_win), 0);
899 tx_info->prev_win = tcp_win;
904 /* get the actual length */
905 len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
907 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
909 /* fill len in wr field */
910 wr->flowid_len16 = htonl(wr_mid |
911 FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
913 ndesc = DIV_ROUND_UP(len, 64);
914 chcr_txq_advance(&q->q, ndesc);
915 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
921 * chcr_ktls_get_tx_flits
922 * returns number of flits to be sent out, it includes key context length, WR
923 * size and skb fragments.
926 chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
928 return chcr_sgl_len(nr_frags) +
929 DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
933 * chcr_ktls_check_tcp_options: To check if there is any TCP option available
934 * other than timestamp.
935 * @skb - skb contains partial record..
939 chcr_ktls_check_tcp_options(struct tcphdr *tcp)
941 int cnt, opt, optlen;
944 cp = (u_char *)(tcp + 1);
945 cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
946 for (; cnt > 0; cnt -= optlen, cp += optlen) {
948 if (opt == TCPOPT_EOL)
950 if (opt == TCPOPT_NOP) {
956 if (optlen < 2 || optlen > cnt)
970 * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
971 * send out separately.
972 * @tx_info - driver specific tls info.
973 * @skb - skb contains partial record..
975 * @tx_chan - channel number.
976 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
979 chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
980 struct sge_eth_txq *q, uint32_t tx_chan)
982 struct fw_eth_tx_pkt_wr *wr;
983 struct cpl_tx_pkt_core *cpl;
984 u32 ctrl, iplen, maclen;
996 iplen = skb_network_header_len(skb);
997 maclen = skb_mac_header_len(skb);
999 /* packet length = eth hdr len + ip hdr len + tcp hdr len
1000 * (including options).
1002 pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1004 ctrl = sizeof(*cpl) + pktlen;
1005 len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
1006 /* check how many descriptors needed */
1007 ndesc = DIV_ROUND_UP(len16, 4);
1009 credits = chcr_txq_avail(&q->q) - ndesc;
1010 if (unlikely(credits < 0)) {
1011 chcr_eth_txq_stop(q);
1012 return NETDEV_TX_BUSY;
1015 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1016 chcr_eth_txq_stop(q);
1017 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1020 pos = &q->q.desc[q->q.pidx];
1023 /* Firmware work request header */
1024 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1025 FW_WR_IMMDLEN_V(ctrl));
1027 wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1030 cpl = (void *)(wr + 1);
1033 cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
1034 TXPKT_PF_V(tx_info->adap->pf));
1036 cpl->len = htons(pktlen);
1038 memcpy(buf, skb->data, pktlen);
1039 if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
1040 /* we need to correct ip header len */
1041 ip = (struct iphdr *)(buf + maclen);
1042 ip->tot_len = htons(pktlen - maclen);
1043 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
1045 ip6 = (struct ipv6hdr *)(buf + maclen);
1046 ip6->payload_len = htons(pktlen - maclen - iplen);
1047 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
1050 cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
1051 TXPKT_IPHDR_LEN_V(iplen);
1052 /* checksum offload */
1053 cpl->ctrl1 = cpu_to_be64(cntrl1);
1057 /* now take care of the tcp header, if fin is not set then clear push
1058 * bit as well, and if fin is set, it will be sent at the last so we
1059 * need to update the tcp sequence number as per the last packet.
1061 tcp = (struct tcphdr *)(buf + maclen + iplen);
1066 tcp->seq = htonl(tx_info->prev_seq);
1068 chcr_copy_to_txd(buf, &q->q, pos, pktlen);
1070 chcr_txq_advance(&q->q, ndesc);
1071 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1076 * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
1077 * received has partial end part of the record, send out the complete record, so
1078 * that crypto block will be able to generate TAG/HASH.
1079 * @skb - segment which has complete or partial end part.
1080 * @tx_info - driver specific tls info.
1083 * @tcp_push - tcp push bit.
1084 * @mss - segment size.
1085 * return: NETDEV_TX_BUSY/NET_TX_OK.
1087 static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
1088 struct chcr_ktls_info *tx_info,
1089 struct sge_eth_txq *q, u32 tcp_seq,
1090 bool is_last_wr, u32 data_len,
1091 u32 skb_offset, u32 nfrags,
1092 bool tcp_push, u32 mss)
1094 u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
1095 struct adapter *adap = tx_info->adap;
1096 int credits, left, last_desc;
1097 struct tx_sw_desc *sgl_sdesc;
1098 struct cpl_tx_data *tx_data;
1099 struct cpl_tx_sec_pdu *cpl;
1100 struct ulptx_idata *idata;
1101 struct ulp_txpkt *ulptx;
1102 struct fw_ulptx_wr *wr;
1106 /* get the number of flits required */
1107 flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
1108 /* number of descriptors */
1109 ndesc = chcr_flits_to_desc(flits);
1110 /* check if enough credits available */
1111 credits = chcr_txq_avail(&q->q) - ndesc;
1112 if (unlikely(credits < 0)) {
1113 chcr_eth_txq_stop(q);
1114 return NETDEV_TX_BUSY;
1117 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1118 /* Credits are below the threshold values, stop the queue after
1119 * injecting the Work Request for this packet.
1121 chcr_eth_txq_stop(q);
1122 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1125 last_desc = q->q.pidx + ndesc - 1;
1126 if (last_desc >= q->q.size)
1127 last_desc -= q->q.size;
1128 sgl_sdesc = &q->q.sdesc[last_desc];
1130 if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1131 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1133 return NETDEV_TX_BUSY;
1139 pos = &q->q.desc[q->q.pidx];
1140 end = (u64 *)pos + flits;
1143 /* WR will need len16 */
1144 len16 = DIV_ROUND_UP(flits, 2);
1145 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1146 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1151 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1152 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1153 ULP_TXPKT_FID_V(q->q.cntxt_id) |
1155 ulptx->len = htonl(len16 - 1);
1156 /* ULPTX_IDATA sub-command */
1157 idata = (struct ulptx_idata *)(ulptx + 1);
1158 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1159 /* idata length will include cpl_tx_sec_pdu + key context size +
1160 * cpl_tx_data header.
1162 idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1165 cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1166 cpl->op_ivinsrtofst =
1167 htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1168 CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1169 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
1170 CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
1171 cpl->pldlen = htonl(data_len);
1173 /* encryption should start after tls header size + iv size */
1174 cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
1176 cpl->aadstart_cipherstop_hi =
1177 htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
1178 CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
1179 CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1181 /* authentication will also start after tls header + iv size */
1182 cpl->cipherstop_lo_authinsert =
1183 htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
1184 CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
1185 CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
1187 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1188 cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
1189 cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
1190 cpl->scmd1 = cpu_to_be64(tx_info->record_no);
1193 /* check if space left to fill the keys */
1194 left = (void *)q->q.stat - pos;
1196 left = (void *)end - (void *)q->q.stat;
1201 pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1202 tx_info->key_ctx_len);
1203 left = (void *)q->q.stat - pos;
1206 left = (void *)end - (void *)q->q.stat;
1211 tx_data = (void *)pos;
1212 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1213 tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
1215 tx_data->rsvd = htonl(tcp_seq);
1217 tx_data->flags = htonl(TX_BYPASS_F);
1219 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1221 /* check left again, it might go beyond queue limit */
1223 left = (void *)q->q.stat - pos;
1225 /* check the position again */
1227 left = (void *)end - (void *)q->q.stat;
1232 /* send the complete packet except the header */
1233 cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
1234 skb_offset, data_len);
1235 sgl_sdesc->skb = skb;
1237 chcr_txq_advance(&q->q, ndesc);
1238 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1239 atomic64_inc(&adap->ch_ktls_stats.ktls_tx_send_records);
1245 * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
1246 * a middle part of a record, fetch the prior data to make it 16 byte aligned
1247 * and then only send it out.
1249 * @skb - skb contains partial record..
1250 * @tx_info - driver specific tls info.
1253 * @tcp_push - tcp push bit.
1254 * @mss - segment size.
1255 * @tls_rec_offset - offset from start of the tls record.
1256 * @perior_data - data before the current segment, required to make this record
1258 * @prior_data_len - prior_data length (less than 16)
1259 * return: NETDEV_TX_BUSY/NET_TX_OK.
1261 static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
1262 struct chcr_ktls_info *tx_info,
1263 struct sge_eth_txq *q,
1264 u32 tcp_seq, bool tcp_push, u32 mss,
1265 u32 tls_rec_offset, u8 *prior_data,
1266 u32 prior_data_len, u32 data_len,
1269 u32 len16, wr_mid = 0, cipher_start, nfrags;
1270 struct adapter *adap = tx_info->adap;
1271 unsigned int flits = 0, ndesc;
1272 int credits, left, last_desc;
1273 struct tx_sw_desc *sgl_sdesc;
1274 struct cpl_tx_data *tx_data;
1275 struct cpl_tx_sec_pdu *cpl;
1276 struct ulptx_idata *idata;
1277 struct ulp_txpkt *ulptx;
1278 struct fw_ulptx_wr *wr;
1283 nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
1284 /* get the number of flits required, it's a partial record so 2 flits
1285 * (AES_BLOCK_SIZE) will be added.
1287 flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
1288 /* get the correct 8 byte IV of this record */
1289 iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
1290 /* If it's a middle record and not 16 byte aligned to run AES CTR, need
1291 * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
1292 * data will be added.
1296 /* number of descriptors */
1297 ndesc = chcr_flits_to_desc(flits);
1298 /* check if enough credits available */
1299 credits = chcr_txq_avail(&q->q) - ndesc;
1300 if (unlikely(credits < 0)) {
1301 chcr_eth_txq_stop(q);
1302 return NETDEV_TX_BUSY;
1305 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1306 chcr_eth_txq_stop(q);
1307 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1310 last_desc = q->q.pidx + ndesc - 1;
1311 if (last_desc >= q->q.size)
1312 last_desc -= q->q.size;
1313 sgl_sdesc = &q->q.sdesc[last_desc];
1315 if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1316 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1318 return NETDEV_TX_BUSY;
1321 pos = &q->q.desc[q->q.pidx];
1322 end = (u64 *)pos + flits;
1325 /* WR will need len16 */
1326 len16 = DIV_ROUND_UP(flits, 2);
1327 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1328 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1333 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1334 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1335 ULP_TXPKT_FID_V(q->q.cntxt_id) |
1337 ulptx->len = htonl(len16 - 1);
1338 /* ULPTX_IDATA sub-command */
1339 idata = (struct ulptx_idata *)(ulptx + 1);
1340 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1341 /* idata length will include cpl_tx_sec_pdu + key context size +
1342 * cpl_tx_data header.
1344 idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1345 sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
1347 cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1348 /* cipher start will have tls header + iv size extra if its a header
1349 * part of tls record. else only 16 byte IV will be added.
1353 (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
1355 cpl->op_ivinsrtofst =
1356 htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1357 CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1358 CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
1359 cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
1360 cpl->aadstart_cipherstop_hi =
1361 htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1362 cpl->cipherstop_lo_authinsert = 0;
1363 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1364 cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
1365 cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
1369 /* check if space left to fill the keys */
1370 left = (void *)q->q.stat - pos;
1372 left = (void *)end - (void *)q->q.stat;
1377 pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1378 tx_info->key_ctx_len);
1379 left = (void *)q->q.stat - pos;
1382 left = (void *)end - (void *)q->q.stat;
1387 tx_data = (void *)pos;
1388 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1389 tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1390 TX_LENGTH_V(data_len + prior_data_len));
1391 tx_data->rsvd = htonl(tcp_seq);
1392 tx_data->flags = htonl(TX_BYPASS_F);
1394 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1396 /* check left again, it might go beyond queue limit */
1398 left = (void *)q->q.stat - pos;
1400 /* check the position again */
1402 left = (void *)end - (void *)q->q.stat;
1406 /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
1407 * bytes of actual IV and 4 bytes of 16 byte-sequence.
1409 memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
1410 memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
1411 *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
1412 htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
1413 (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
1416 /* Prior_data_len will always be less than 16 bytes, fill the
1417 * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
1421 pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1422 /* send the complete packet except the header */
1423 cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
1424 skb_offset, data_len);
1425 sgl_sdesc->skb = skb;
1427 chcr_txq_advance(&q->q, ndesc);
1428 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1434 * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
1435 * only plain text (only tls header and iv)
1436 * @tx_info - driver specific tls info.
1437 * @skb - skb contains partial record..
1439 * @mss - segment size.
1440 * @tcp_push - tcp push bit.
1442 * @port_id : port number
1443 * @perior_data - data before the current segment, required to make this record
1445 * @prior_data_len - prior_data length (less than 16)
1446 * return: NETDEV_TX_BUSY/NET_TX_OK.
1448 static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
1449 struct sk_buff *skb, u32 tcp_seq, u32 mss,
1450 bool tcp_push, struct sge_eth_txq *q,
1451 u32 port_id, u8 *prior_data,
1452 u32 data_len, u32 skb_offset,
1455 int credits, left, len16, last_desc;
1456 unsigned int flits = 0, ndesc;
1457 struct tx_sw_desc *sgl_sdesc;
1458 struct cpl_tx_data *tx_data;
1459 struct ulptx_idata *idata;
1460 struct ulp_txpkt *ulptx;
1461 struct fw_ulptx_wr *wr;
1462 u32 wr_mid = 0, nfrags;
1466 flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
1467 nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
1468 flits += chcr_sgl_len(nfrags);
1472 /* WR will need len16 */
1473 len16 = DIV_ROUND_UP(flits, 2);
1474 /* check how many descriptors needed */
1475 ndesc = DIV_ROUND_UP(flits, 8);
1477 credits = chcr_txq_avail(&q->q) - ndesc;
1478 if (unlikely(credits < 0)) {
1479 chcr_eth_txq_stop(q);
1480 return NETDEV_TX_BUSY;
1483 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1484 chcr_eth_txq_stop(q);
1485 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1488 last_desc = q->q.pidx + ndesc - 1;
1489 if (last_desc >= q->q.size)
1490 last_desc -= q->q.size;
1491 sgl_sdesc = &q->q.sdesc[last_desc];
1493 if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
1494 sgl_sdesc->addr) < 0)) {
1495 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1497 return NETDEV_TX_BUSY;
1500 pos = &q->q.desc[q->q.pidx];
1501 end = (u64 *)pos + flits;
1504 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1505 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1509 ulptx = (struct ulp_txpkt *)(wr + 1);
1510 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1511 ULP_TXPKT_DATAMODIFY_V(0) |
1512 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1513 ULP_TXPKT_DEST_V(0) |
1514 ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
1515 ulptx->len = htonl(len16 - 1);
1516 /* ULPTX_IDATA sub-command */
1517 idata = (struct ulptx_idata *)(ulptx + 1);
1518 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1519 idata->len = htonl(sizeof(*tx_data) + prior_data_len);
1521 tx_data = (struct cpl_tx_data *)(idata + 1);
1522 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1523 tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1524 TX_LENGTH_V(data_len + prior_data_len));
1525 /* set tcp seq number */
1526 tx_data->rsvd = htonl(tcp_seq);
1527 tx_data->flags = htonl(TX_BYPASS_F);
1529 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1532 /* apart from prior_data_len, we should set remaining part of 16 bytes
1536 pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1538 /* check left again, it might go beyond queue limit */
1539 left = (void *)q->q.stat - pos;
1541 /* check the position again */
1543 left = (void *)end - (void *)q->q.stat;
1547 /* send the complete packet including the header */
1548 cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
1549 skb_offset, data_len);
1550 sgl_sdesc->skb = skb;
1552 chcr_txq_advance(&q->q, ndesc);
1553 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1557 static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
1558 struct sk_buff *skb,
1559 struct sge_eth_txq *q)
1561 u32 ctrl, iplen, maclen, wr_mid = 0, len16;
1562 struct tx_sw_desc *sgl_sdesc;
1563 struct fw_eth_tx_pkt_wr *wr;
1564 struct cpl_tx_pkt_core *cpl;
1565 unsigned int flits, ndesc;
1566 int credits, last_desc;
1570 ctrl = sizeof(*cpl);
1571 flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
1573 flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
1574 len16 = DIV_ROUND_UP(flits, 2);
1575 /* check how many descriptors needed */
1576 ndesc = DIV_ROUND_UP(flits, 8);
1578 credits = chcr_txq_avail(&q->q) - ndesc;
1579 if (unlikely(credits < 0)) {
1580 chcr_eth_txq_stop(q);
1584 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1585 chcr_eth_txq_stop(q);
1586 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1589 last_desc = q->q.pidx + ndesc - 1;
1590 if (last_desc >= q->q.size)
1591 last_desc -= q->q.size;
1592 sgl_sdesc = &q->q.sdesc[last_desc];
1594 if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
1595 sgl_sdesc->addr) < 0)) {
1596 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1601 iplen = skb_network_header_len(skb);
1602 maclen = skb_mac_header_len(skb);
1604 pos = &q->q.desc[q->q.pidx];
1605 end = (u64 *)pos + flits;
1608 /* Firmware work request header */
1609 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1610 FW_WR_IMMDLEN_V(ctrl));
1612 wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1615 cpl = (void *)(wr + 1);
1618 cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
1619 TXPKT_INTF_V(tx_info->tx_chan) |
1620 TXPKT_PF_V(tx_info->adap->pf));
1622 cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
1623 TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
1624 cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
1625 TXPKT_IPHDR_LEN_V(iplen);
1626 /* checksum offload */
1627 cpl->ctrl1 = cpu_to_be64(cntrl1);
1628 cpl->len = htons(skb->len);
1632 cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
1633 sgl_sdesc->skb = skb;
1634 chcr_txq_advance(&q->q, ndesc);
1635 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1640 * chcr_ktls_copy_record_in_skb
1641 * @nskb - new skb where the frags to be added.
1642 * @skb - old skb, to copy socket and destructor details.
1643 * @record - specific record which has complete 16k record in frags.
1645 static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
1646 struct sk_buff *skb,
1647 struct tls_record_info *record)
1651 for (i = 0; i < record->num_frags; i++) {
1652 skb_shinfo(nskb)->frags[i] = record->frags[i];
1653 /* increase the frag ref count */
1654 __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
1657 skb_shinfo(nskb)->nr_frags = record->num_frags;
1658 nskb->data_len = record->len;
1659 nskb->len += record->len;
1660 nskb->truesize += record->len;
1662 nskb->destructor = skb->destructor;
1663 refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
1667 * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
1668 * sending the same segment again. It will discard the segment which is before
1669 * the current tx max.
1670 * @tx_info - driver specific tls info.
1672 * return: NET_TX_OK/NET_XMIT_DROP.
1674 static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
1675 struct sge_eth_txq *q)
1677 struct fw_ulptx_wr *wr;
1683 len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
1684 ndesc = DIV_ROUND_UP(len, 64);
1686 credits = chcr_txq_avail(&q->q) - ndesc;
1687 if (unlikely(credits < 0)) {
1688 chcr_eth_txq_stop(q);
1689 return NETDEV_TX_BUSY;
1692 pos = &q->q.desc[q->q.pidx];
1696 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1698 /* fill len in wr field */
1699 wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
1703 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
1705 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
1706 TCB_SND_UNA_RAW_V(0), 0);
1708 chcr_txq_advance(&q->q, ndesc);
1709 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1715 * chcr_end_part_handler: This handler will handle the record which
1716 * is complete or if record's end part is received. T6 adapter has a issue that
1717 * it can't send out TAG with partial record so if its an end part then we have
1718 * to send TAG as well and for which we need to fetch the complete record and
1719 * send it to crypto module.
1720 * @tx_info - driver specific tls info.
1721 * @skb - skb contains partial record.
1722 * @record - complete record of 16K size.
1724 * @mss - segment size in which TP needs to chop a packet.
1725 * @tcp_push_no_fin - tcp push if fin is not set.
1727 * @tls_end_offset - offset from end of the record.
1728 * @last wr : check if this is the last part of the skb going out.
1729 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1731 static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
1732 struct sk_buff *skb,
1733 struct tls_record_info *record,
1734 u32 tcp_seq, int mss, bool tcp_push_no_fin,
1735 struct sge_eth_txq *q, u32 skb_offset,
1736 u32 tls_end_offset, bool last_wr)
1738 struct sk_buff *nskb = NULL;
1739 /* check if it is a complete record */
1740 if (tls_end_offset == record->len) {
1742 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
1744 nskb = alloc_skb(0, GFP_ATOMIC);
1746 dev_kfree_skb_any(skb);
1747 return NETDEV_TX_BUSY;
1750 /* copy complete record in skb */
1751 chcr_ktls_copy_record_in_skb(nskb, skb, record);
1752 /* packet is being sent from the beginning, update the tcp_seq
1755 tcp_seq = tls_record_start_seq(record);
1756 /* reset skb offset */
1760 dev_kfree_skb_any(skb);
1764 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
1767 if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
1768 last_wr, record->len, skb_offset,
1770 (last_wr && tcp_push_no_fin),
1774 tx_info->prev_seq = record->end_seq;
1777 dev_kfree_skb_any(nskb);
1778 return NETDEV_TX_BUSY;
1782 * chcr_short_record_handler: This handler will take care of the records which
1783 * doesn't have end part (1st part or the middle part(/s) of a record). In such
1784 * cases, AES CTR will be used in place of AES GCM to send out partial packet.
1785 * This partial record might be the first part of the record, or the middle
1786 * part. In case of middle record we should fetch the prior data to make it 16
1787 * byte aligned. If it has a partial tls header or iv then get to the start of
1788 * tls header. And if it has partial TAG, then remove the complete TAG and send
1790 * There is one more possibility that it gets a partial header, send that
1791 * portion as a plaintext.
1792 * @tx_info - driver specific tls info.
1793 * @skb - skb contains partial record..
1794 * @record - complete record of 16K size.
1796 * @mss - segment size in which TP needs to chop a packet.
1797 * @tcp_push_no_fin - tcp push if fin is not set.
1799 * @tls_end_offset - offset from end of the record.
1800 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1802 static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
1803 struct sk_buff *skb,
1804 struct tls_record_info *record,
1805 u32 tcp_seq, int mss, bool tcp_push_no_fin,
1806 u32 data_len, u32 skb_offset,
1807 struct sge_eth_txq *q, u32 tls_end_offset)
1809 u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
1810 u8 prior_data[16] = {0};
1811 u32 prior_data_len = 0;
1813 /* check if the skb is ending in middle of tag/HASH, its a big
1814 * trouble, send the packet before the HASH.
1816 int remaining_record = tls_end_offset - data_len;
1818 if (remaining_record > 0 &&
1819 remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
1820 int trimmed_len = 0;
1822 if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
1823 trimmed_len = data_len -
1824 (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
1829 WARN_ON(trimmed_len > data_len);
1831 data_len = trimmed_len;
1832 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
1835 /* check if it is only the header part. */
1836 if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
1837 if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
1839 tx_info->port_id, prior_data,
1840 data_len, skb_offset, prior_data_len))
1843 tx_info->prev_seq = tcp_seq + data_len;
1847 /* check if the middle record's start point is 16 byte aligned. CTR
1848 * needs 16 byte aligned start point to start encryption.
1850 if (tls_rec_offset) {
1851 /* there is an offset from start, means its a middle record */
1854 if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
1855 prior_data_len = tls_rec_offset;
1861 (TLS_HEADER_SIZE + tx_info->iv_size))
1863 remaining = tls_rec_offset - prior_data_len;
1866 /* if prior_data_len is not zero, means we need to fetch prior
1867 * data to make this record 16 byte aligned, or we need to reach
1870 if (prior_data_len) {
1875 int frag_size = 0, frag_delta = 0;
1877 while (remaining > 0) {
1878 frag_size = skb_frag_size(&record->frags[i]);
1879 if (remaining < frag_size)
1882 remaining -= frag_size;
1885 f = &record->frags[i];
1886 vaddr = kmap_atomic(skb_frag_page(f));
1888 data = vaddr + skb_frag_off(f) + remaining;
1889 frag_delta = skb_frag_size(f) - remaining;
1891 if (frag_delta >= prior_data_len) {
1892 memcpy(prior_data, data, prior_data_len);
1893 kunmap_atomic(vaddr);
1895 memcpy(prior_data, data, frag_delta);
1896 kunmap_atomic(vaddr);
1897 /* get the next page */
1898 f = &record->frags[i + 1];
1899 vaddr = kmap_atomic(skb_frag_page(f));
1900 data = vaddr + skb_frag_off(f);
1901 memcpy(prior_data + frag_delta,
1902 data, (prior_data_len - frag_delta));
1903 kunmap_atomic(vaddr);
1905 /* reset tcp_seq as per the prior_data_required len */
1906 tcp_seq -= prior_data_len;
1908 /* reset snd una, so the middle record won't send the already
1911 if (chcr_ktls_update_snd_una(tx_info, q))
1913 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
1915 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
1918 if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
1919 mss, tls_rec_offset, prior_data,
1920 prior_data_len, data_len, skb_offset)) {
1924 tx_info->prev_seq = tcp_seq + data_len + prior_data_len;
1927 dev_kfree_skb_any(skb);
1928 return NETDEV_TX_BUSY;
1931 static int chcr_ktls_sw_fallback(struct sk_buff *skb,
1932 struct chcr_ktls_info *tx_info,
1933 struct sge_eth_txq *q)
1935 u32 data_len, skb_offset;
1936 struct sk_buff *nskb;
1939 nskb = tls_encrypt_skb(skb);
1945 skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
1946 data_len = nskb->len - skb_offset;
1947 skb_tx_timestamp(nskb);
1949 if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
1952 tx_info->prev_seq = ntohl(th->seq) + data_len;
1953 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback);
1956 dev_kfree_skb_any(nskb);
1959 /* nic tls TX handler */
1960 static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1962 u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
1963 struct ch_ktls_port_stats_debug *port_stats;
1964 struct chcr_ktls_ofld_ctx_tx *tx_ctx;
1965 struct ch_ktls_stats_debug *stats;
1966 struct tcphdr *th = tcp_hdr(skb);
1967 int data_len, qidx, ret = 0, mss;
1968 struct tls_record_info *record;
1969 struct chcr_ktls_info *tx_info;
1970 struct tls_context *tls_ctx;
1971 struct sge_eth_txq *q;
1972 struct adapter *adap;
1973 unsigned long flags;
1975 tcp_seq = ntohl(th->seq);
1976 skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
1977 skb_data_len = skb->len - skb_offset;
1978 data_len = skb_data_len;
1980 mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
1982 tls_ctx = tls_get_ctx(skb->sk);
1983 if (unlikely(tls_ctx->netdev != dev))
1986 tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
1987 tx_info = tx_ctx->chcr_info;
1989 if (unlikely(!tx_info))
1992 adap = tx_info->adap;
1993 stats = &adap->ch_ktls_stats;
1994 port_stats = &stats->ktls_port[tx_info->port_id];
1996 qidx = skb->queue_mapping;
1997 q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
1998 cxgb4_reclaim_completed_tx(adap, &q->q, true);
1999 /* if tcp options are set but finish is not send the options first */
2000 if (!th->fin && chcr_ktls_check_tcp_options(th)) {
2001 ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
2004 return NETDEV_TX_BUSY;
2007 /* TCP segments can be in received either complete or partial.
2008 * chcr_end_part_handler will handle cases if complete record or end
2009 * part of the record is received. In case of partial end part of record,
2010 * we will send the complete record again.
2016 cxgb4_reclaim_completed_tx(adap, &q->q, true);
2018 spin_lock_irqsave(&tx_ctx->base.lock, flags);
2019 /* fetch the tls record */
2020 record = tls_get_record(&tx_ctx->base, tcp_seq,
2021 &tx_info->record_no);
2022 /* By the time packet reached to us, ACK is received, and record
2023 * won't be found in that case, handle it gracefully.
2025 if (unlikely(!record)) {
2026 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2027 atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
2031 tls_end_offset = record->end_seq - tcp_seq;
2033 pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
2034 tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
2035 /* update tcb for the skb */
2036 if (skb_data_len == data_len) {
2037 u32 tx_max = tcp_seq;
2039 if (!tls_record_is_start_marker(record) &&
2040 tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
2041 tx_max = record->end_seq -
2042 TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2044 ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
2050 spin_unlock_irqrestore(&tx_ctx->base.lock,
2059 if (unlikely(tls_record_is_start_marker(record))) {
2060 atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
2061 /* If tls_end_offset < data_len, means there is some
2062 * data after start marker, which needs encryption, send
2063 * plaintext first and take skb refcount. else send out
2064 * complete pkt as plaintext.
2066 if (tls_end_offset < data_len)
2069 tls_end_offset = data_len;
2071 ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
2072 (!th->fin && th->psh), q,
2073 tx_info->port_id, NULL,
2074 tls_end_offset, skb_offset,
2077 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2079 /* free the refcount taken earlier */
2080 if (tls_end_offset < data_len)
2081 dev_kfree_skb_any(skb);
2085 data_len -= tls_end_offset;
2086 tcp_seq = record->end_seq;
2087 skb_offset += tls_end_offset;
2091 /* increase page reference count of the record, so that there
2092 * won't be any chance of page free in middle if in case stack
2093 * receives ACK and try to delete the record.
2095 for (i = 0; i < record->num_frags; i++)
2096 __skb_frag_ref(&record->frags[i]);
2098 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2101 /* if a tls record is finishing in this SKB */
2102 if (tls_end_offset <= data_len) {
2103 ret = chcr_end_part_handler(tx_info, skb, record,
2105 (!th->fin && th->psh), q,
2109 tls_end_offset == skb->len);
2111 data_len -= tls_end_offset;
2112 /* tcp_seq increment is required to handle next record.
2114 tcp_seq += tls_end_offset;
2115 skb_offset += tls_end_offset;
2117 ret = chcr_short_record_handler(tx_info, skb,
2118 record, tcp_seq, mss,
2119 (!th->fin && th->psh),
2120 data_len, skb_offset,
2125 /* clear the frag ref count which increased locally before */
2126 for (i = 0; i < record->num_frags; i++) {
2127 /* clear the frag ref count */
2128 __skb_frag_unref(&record->frags[i]);
2130 /* if any failure, come out from the loop. */
2133 dev_kfree_skb_any(skb);
2135 if (ret == FALLBACK)
2136 return chcr_ktls_sw_fallback(skb, tx_info, q);
2138 return NETDEV_TX_OK;
2141 /* length should never be less than 0 */
2142 WARN_ON(data_len < 0);
2144 } while (data_len > 0);
2146 atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
2147 atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
2149 /* tcp finish is set, send a separate tcp msg including all the options
2153 chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
2154 dev_kfree_skb_any(skb);
2157 return NETDEV_TX_OK;
2159 dev_kfree_skb_any(skb);
2160 return NETDEV_TX_OK;
2163 static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
2165 struct chcr_ktls_uld_ctx *u_ctx;
2167 pr_info_once("%s - version %s\n", CHCR_KTLS_DRV_DESC,
2168 CHCR_KTLS_DRV_VERSION);
2169 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
2171 u_ctx = ERR_PTR(-ENOMEM);
2174 u_ctx->lldi = *lldi;
2179 static const struct tlsdev_ops chcr_ktls_ops = {
2180 .tls_dev_add = chcr_ktls_dev_add,
2181 .tls_dev_del = chcr_ktls_dev_del,
2184 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
2185 [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
2186 [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
2189 static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
2190 const struct pkt_gl *pgl)
2192 const struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)rsp;
2193 struct chcr_ktls_uld_ctx *u_ctx = handle;
2194 u8 opcode = rpl->ot.opcode;
2195 struct adapter *adap;
2197 adap = pci_get_drvdata(u_ctx->lldi.pdev);
2199 if (!work_handlers[opcode]) {
2200 pr_err("Unsupported opcode %d received\n", opcode);
2204 work_handlers[opcode](adap, (unsigned char *)&rsp[1]);
2208 static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
2210 struct chcr_ktls_uld_ctx *u_ctx = handle;
2212 switch (new_state) {
2213 case CXGB4_STATE_UP:
2214 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
2215 mutex_lock(&dev_mutex);
2216 list_add_tail(&u_ctx->entry, &uld_ctx_list);
2217 mutex_unlock(&dev_mutex);
2219 case CXGB4_STATE_START_RECOVERY:
2220 case CXGB4_STATE_DOWN:
2221 case CXGB4_STATE_DETACH:
2222 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
2223 mutex_lock(&dev_mutex);
2224 list_del(&u_ctx->entry);
2225 mutex_unlock(&dev_mutex);
2234 static struct cxgb4_uld_info chcr_ktls_uld_info = {
2235 .name = CHCR_KTLS_DRV_MODULE_NAME,
2238 .add = chcr_ktls_uld_add,
2239 .tx_handler = chcr_ktls_xmit,
2240 .rx_handler = chcr_ktls_uld_rx_handler,
2241 .state_change = chcr_ktls_uld_state_change,
2242 .tlsdev_ops = &chcr_ktls_ops,
2245 static int __init chcr_ktls_init(void)
2247 cxgb4_register_uld(CXGB4_ULD_KTLS, &chcr_ktls_uld_info);
2251 static void __exit chcr_ktls_exit(void)
2253 struct chcr_ktls_uld_ctx *u_ctx, *tmp;
2254 struct adapter *adap;
2256 pr_info("driver unloaded\n");
2258 mutex_lock(&dev_mutex);
2259 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
2260 adap = pci_get_drvdata(u_ctx->lldi.pdev);
2261 memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
2262 list_del(&u_ctx->entry);
2265 mutex_unlock(&dev_mutex);
2266 cxgb4_unregister_uld(CXGB4_ULD_KTLS);
2269 module_init(chcr_ktls_init);
2270 module_exit(chcr_ktls_exit);
2272 MODULE_DESCRIPTION("Chelsio NIC TLS ULD driver");
2273 MODULE_LICENSE("GPL");
2274 MODULE_AUTHOR("Chelsio Communications");
2275 MODULE_VERSION(CHCR_KTLS_DRV_VERSION);