1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/skbuff.h>
7 #include <linux/module.h>
8 #include <linux/highmem.h>
11 #include <linux/netdevice.h>
12 #include "chcr_ktls.h"
14 static LIST_HEAD(uld_ctx_list);
15 static DEFINE_MUTEX(dev_mutex);
17 static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
19 * chcr_ktls_save_keys: calculate and save crypto keys.
20 * @tx_info - driver specific tls info.
21 * @crypto_info - tls crypto information.
22 * @direction - TX/RX direction.
23 * return - SUCCESS/FAILURE.
25 static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
26 struct tls_crypto_info *crypto_info,
27 enum tls_offload_ctx_dir direction)
29 int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
30 unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
31 struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
32 struct ktls_key_ctx *kctx = &tx_info->key_ctx;
33 struct crypto_cipher *cipher;
34 unsigned char *key, *salt;
36 switch (crypto_info->cipher_type) {
37 case TLS_CIPHER_AES_GCM_128:
39 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
40 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
41 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
42 tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
43 mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
44 tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
45 tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
47 ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
48 key = info_128_gcm->key;
49 salt = info_128_gcm->salt;
50 tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
52 /* The SCMD fields used when encrypting a full TLS
53 * record. Its a one time calculation till the
56 tx_info->scmd0_seqno_numivs =
57 SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
58 SCMD_CIPH_AUTH_SEQ_CTRL_F |
59 SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
60 SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
61 SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
62 SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
65 /* keys will be sent inline. */
66 tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
68 /* The SCMD fields used when encrypting a partial TLS
69 * record (no trailer and possibly a truncated payload).
71 tx_info->scmd0_short_seqno_numivs =
72 SCMD_CIPH_AUTH_SEQ_CTRL_F |
73 SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
74 SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
75 SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
77 tx_info->scmd0_short_ivgen_hdrlen =
78 tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
83 pr_err("GCM: cipher type 0x%x not supported\n",
84 crypto_info->cipher_type);
89 key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
90 roundup(keylen, 16) + ghash_size;
91 /* Calculate the H = CIPH(K, 0 repeated 16 times).
92 * It will go in key context
94 cipher = crypto_alloc_cipher("aes", 0, 0);
100 ret = crypto_cipher_setkey(cipher, key, keylen);
104 memset(ghash_h, 0, ghash_size);
105 crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
107 /* fill the Key context */
108 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
109 kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
117 memcpy(kctx->salt, salt, tx_info->salt_size);
118 memcpy(kctx->key, key, keylen);
119 memcpy(kctx->key + keylen, ghash_h, ghash_size);
120 tx_info->key_ctx_len = key_ctx_size;
123 crypto_free_cipher(cipher);
129 * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
131 * @tx_info - driver specific tls info.
132 * @atid - connection active tid.
133 * return - send success/failure.
135 static int chcr_ktls_act_open_req(struct sock *sk,
136 struct chcr_ktls_info *tx_info,
139 struct inet_sock *inet = inet_sk(sk);
140 struct cpl_t6_act_open_req *cpl6;
141 struct cpl_act_open_req *cpl;
148 skb = alloc_skb(len, GFP_KERNEL);
151 /* mark it a control pkt */
152 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
154 cpl6 = __skb_put_zero(skb, len);
155 cpl = (struct cpl_act_open_req *)cpl6;
157 qid_atid = TID_QID_V(tx_info->rx_qid) |
159 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
160 cpl->local_port = inet->inet_sport;
161 cpl->peer_port = inet->inet_dport;
162 cpl->local_ip = inet->inet_rcv_saddr;
163 cpl->peer_ip = inet->inet_daddr;
165 /* fill first 64 bit option field. */
166 options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
167 SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
168 cpl->opt0 = cpu_to_be64(options);
170 /* next 64 bit option field. */
172 TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
173 cpl->opt2 = htonl(options);
175 return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
178 #if IS_ENABLED(CONFIG_IPV6)
180 * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
182 * @tx_info - driver specific tls info.
183 * @atid - connection active tid.
184 * return - send success/failure.
186 static int chcr_ktls_act_open_req6(struct sock *sk,
187 struct chcr_ktls_info *tx_info,
190 struct inet_sock *inet = inet_sk(sk);
191 struct cpl_t6_act_open_req6 *cpl6;
192 struct cpl_act_open_req6 *cpl;
199 skb = alloc_skb(len, GFP_KERNEL);
202 /* mark it a control pkt */
203 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
205 cpl6 = __skb_put_zero(skb, len);
206 cpl = (struct cpl_act_open_req6 *)cpl6;
208 qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
209 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
210 cpl->local_port = inet->inet_sport;
211 cpl->peer_port = inet->inet_dport;
212 cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
213 cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
214 cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
215 cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
217 /* first 64 bit option field. */
218 options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
219 SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
220 cpl->opt0 = cpu_to_be64(options);
221 /* next 64 bit option field. */
223 TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
224 cpl->opt2 = htonl(options);
226 return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
228 #endif /* #if IS_ENABLED(CONFIG_IPV6) */
231 * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
233 * @tx_info - driver specific tls info.
234 * return: NET_TX_OK/NET_XMIT_DROP
236 static int chcr_setup_connection(struct sock *sk,
237 struct chcr_ktls_info *tx_info)
239 struct tid_info *t = &tx_info->adap->tids;
242 atid = cxgb4_alloc_atid(t, tx_info);
246 tx_info->atid = atid;
248 if (tx_info->ip_family == AF_INET) {
249 ret = chcr_ktls_act_open_req(sk, tx_info, atid);
250 #if IS_ENABLED(CONFIG_IPV6)
252 ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
253 &sk->sk_v6_rcv_saddr,
257 ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
261 /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
262 * success, if any other return type clear atid and return that failure.
265 if (ret == NET_XMIT_CN) {
268 #if IS_ENABLED(CONFIG_IPV6)
269 /* clear clip entry */
270 if (tx_info->ip_family == AF_INET6)
271 cxgb4_clip_release(tx_info->netdev,
273 &sk->sk_v6_rcv_saddr,
276 cxgb4_free_atid(t, atid);
284 * chcr_set_tcb_field: update tcb fields.
285 * @tx_info - driver specific tls info.
287 * @mask - TCB word related mask.
288 * @val - TCB word related value.
289 * @no_reply - set 1 if not looking for TP response.
291 static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
292 u64 mask, u64 val, int no_reply)
294 struct cpl_set_tcb_field *req;
297 skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
301 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
302 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
303 req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
304 NO_REPLY_V(no_reply));
305 req->word_cookie = htons(TCB_WORD_V(word));
306 req->mask = cpu_to_be64(mask);
307 req->val = cpu_to_be64(val);
309 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
310 return cxgb4_ofld_send(tx_info->netdev, skb);
314 * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
315 * @tx_info - driver specific tls info.
316 * return: NET_TX_OK/NET_XMIT_DROP.
318 static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
320 return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
321 TCB_T_STATE_V(TCB_T_STATE_M),
322 CHCR_TCB_STATE_CLOSED, 1);
326 * chcr_ktls_dev_del: call back for tls_dev_del.
327 * Remove the tid and l2t entry and close the connection.
328 * it per connection basis.
329 * @netdev - net device.
330 * @tls_cts - tls context.
331 * @direction - TX/RX crypto direction
333 static void chcr_ktls_dev_del(struct net_device *netdev,
334 struct tls_context *tls_ctx,
335 enum tls_offload_ctx_dir direction)
337 struct chcr_ktls_ofld_ctx_tx *tx_ctx =
338 chcr_get_ktls_tx_context(tls_ctx);
339 struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
340 struct ch_ktls_port_stats_debug *port_stats;
345 /* clear l2t entry */
347 cxgb4_l2t_release(tx_info->l2te);
349 #if IS_ENABLED(CONFIG_IPV6)
350 /* clear clip entry */
351 if (tx_info->ip_family == AF_INET6)
352 cxgb4_clip_release(netdev, (const u32 *)
353 &tx_info->sk->sk_v6_rcv_saddr,
358 if (tx_info->tid != -1) {
359 /* clear tcb state and then release tid */
360 chcr_ktls_mark_tcb_close(tx_info);
361 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
362 tx_info->tid, tx_info->ip_family);
365 port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
366 atomic64_inc(&port_stats->ktls_tx_connection_close);
368 tx_ctx->chcr_info = NULL;
369 /* release module refcount */
370 module_put(THIS_MODULE);
374 * chcr_ktls_dev_add: call back for tls_dev_add.
375 * Create a tcb entry for TP. Also add l2t entry for the connection. And
376 * generate keys & save those keys locally.
377 * @netdev - net device.
378 * @tls_cts - tls context.
379 * @direction - TX/RX crypto direction
380 * return: SUCCESS/FAILURE.
382 static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
383 enum tls_offload_ctx_dir direction,
384 struct tls_crypto_info *crypto_info,
385 u32 start_offload_tcp_sn)
387 struct tls_context *tls_ctx = tls_get_ctx(sk);
388 struct ch_ktls_port_stats_debug *port_stats;
389 struct chcr_ktls_ofld_ctx_tx *tx_ctx;
390 struct chcr_ktls_info *tx_info;
391 struct dst_entry *dst;
392 struct adapter *adap;
393 struct port_info *pi;
398 tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
400 pi = netdev_priv(netdev);
402 port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
403 atomic64_inc(&port_stats->ktls_tx_connection_open);
405 if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
406 pr_err("not expecting for RX direction\n");
410 if (tx_ctx->chcr_info)
413 tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
418 spin_lock_init(&tx_info->lock);
419 /* initialize tid and atid to -1, 0 is a also a valid id. */
423 tx_info->adap = adap;
424 tx_info->netdev = netdev;
425 tx_info->first_qset = pi->first_qset;
426 tx_info->tx_chan = pi->tx_chan;
427 tx_info->smt_idx = pi->smt_idx;
428 tx_info->port_id = pi->port_id;
429 tx_info->prev_ack = 0;
430 tx_info->prev_win = 0;
432 tx_info->rx_qid = chcr_get_first_rx_qid(adap);
433 if (unlikely(tx_info->rx_qid < 0))
436 tx_info->prev_seq = start_offload_tcp_sn;
437 tx_info->tcp_start_seq_number = start_offload_tcp_sn;
439 /* save crypto keys */
440 ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
445 if (sk->sk_family == AF_INET) {
446 memcpy(daaddr, &sk->sk_daddr, 4);
447 tx_info->ip_family = AF_INET;
448 #if IS_ENABLED(CONFIG_IPV6)
450 if (!sk->sk_ipv6only &&
451 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
452 memcpy(daaddr, &sk->sk_daddr, 4);
453 tx_info->ip_family = AF_INET;
455 memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
456 tx_info->ip_family = AF_INET6;
461 /* get the l2t index */
462 dst = sk_dst_get(sk);
464 pr_err("DST entry not found\n");
467 n = dst_neigh_lookup(dst, daaddr);
469 pr_err("neighbour not found\n");
473 tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
478 if (!tx_info->l2te) {
479 pr_err("l2t entry not found\n");
483 /* Driver shouldn't be removed until any single connection exists */
484 if (!try_module_get(THIS_MODULE))
487 init_completion(&tx_info->completion);
488 /* create a filter and call cxgb4_l2t_send to send the packet out, which
489 * will take care of updating l2t entry in hw if not already done.
491 tx_info->open_state = CH_KTLS_OPEN_PENDING;
493 if (chcr_setup_connection(sk, tx_info))
497 wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
498 spin_lock_bh(&tx_info->lock);
499 if (tx_info->open_state) {
500 /* need to wait for hw response, can't free tx_info yet. */
501 if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
502 tx_info->pending_close = true;
503 /* free the lock after the cleanup */
506 spin_unlock_bh(&tx_info->lock);
509 reinit_completion(&tx_info->completion);
510 /* mark it pending for hw response */
511 tx_info->open_state = CH_KTLS_OPEN_PENDING;
513 if (chcr_init_tcb_fields(tx_info))
517 wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
518 spin_lock_bh(&tx_info->lock);
519 if (tx_info->open_state) {
520 /* need to wait for hw response, can't free tx_info yet. */
521 tx_info->pending_close = true;
522 /* free the lock after cleanup */
525 spin_unlock_bh(&tx_info->lock);
527 if (!cxgb4_check_l2t_valid(tx_info->l2te))
530 atomic64_inc(&port_stats->ktls_tx_ctx);
531 tx_ctx->chcr_info = tx_info;
536 chcr_ktls_mark_tcb_close(tx_info);
537 #if IS_ENABLED(CONFIG_IPV6)
538 /* clear clip entry */
539 if (tx_info->ip_family == AF_INET6)
540 cxgb4_clip_release(netdev, (const u32 *)
541 &sk->sk_v6_rcv_saddr,
544 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
545 tx_info->tid, tx_info->ip_family);
548 /* release module refcount */
549 module_put(THIS_MODULE);
551 cxgb4_l2t_release(tx_info->l2te);
553 if (tx_info->pending_close)
554 spin_unlock_bh(&tx_info->lock);
558 atomic64_inc(&port_stats->ktls_tx_connection_fail);
563 * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
565 * @tx_info - driver specific tls info.
566 * return: NET_TX_OK/NET_XMIT_DROP
568 static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
572 /* set tcb in offload and bypass */
574 chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
575 TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
576 TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
579 /* reset snd_una and snd_next fields in tcb */
580 ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
581 TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
582 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
588 ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
589 TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
594 /* update l2t index and request for tp reply to confirm tcb is
595 * initialised to handle tx traffic.
597 ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
598 TCB_L2T_IX_V(TCB_L2T_IX_M),
599 TCB_L2T_IX_V(tx_info->l2te->idx), 0);
604 * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
606 static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
607 unsigned char *input)
609 const struct cpl_act_open_rpl *p = (void *)input;
610 struct chcr_ktls_info *tx_info = NULL;
611 unsigned int atid, tid, status;
615 status = AOPEN_STATUS_G(ntohl(p->atid_status));
616 atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
619 tx_info = lookup_atid(t, atid);
621 if (!tx_info || tx_info->atid != atid) {
622 pr_err("%s: incorrect tx_info or atid\n", __func__);
626 cxgb4_free_atid(t, atid);
629 spin_lock(&tx_info->lock);
630 /* HW response is very close, finish pending cleanup */
631 if (tx_info->pending_close) {
632 spin_unlock(&tx_info->lock);
634 /* it's a late success, tcb status is establised,
637 chcr_ktls_mark_tcb_close(tx_info);
638 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
639 tid, tx_info->ip_family);
647 cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
648 tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
650 tx_info->open_state = CH_KTLS_OPEN_FAILURE;
652 spin_unlock(&tx_info->lock);
654 complete(&tx_info->completion);
659 * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
661 static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
663 const struct cpl_set_tcb_rpl *p = (void *)input;
664 struct chcr_ktls_info *tx_info = NULL;
671 tx_info = lookup_tid(t, tid);
673 if (!tx_info || tx_info->tid != tid) {
674 pr_err("%s: incorrect tx_info or tid\n", __func__);
678 spin_lock(&tx_info->lock);
679 if (tx_info->pending_close) {
680 spin_unlock(&tx_info->lock);
684 tx_info->open_state = false;
685 spin_unlock(&tx_info->lock);
687 complete(&tx_info->completion);
691 static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
692 u32 tid, void *pos, u16 word, u64 mask,
695 struct cpl_set_tcb_field_core *cpl;
696 struct ulptx_idata *idata;
697 struct ulp_txpkt *txpkt;
701 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
702 txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
704 /* ULPTX_IDATA sub-command */
705 idata = (struct ulptx_idata *)(txpkt + 1);
706 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
707 idata->len = htonl(sizeof(*cpl));
711 /* CPL_SET_TCB_FIELD */
712 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
713 cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
715 cpl->word_cookie = htons(TCB_WORD_V(word));
716 cpl->mask = cpu_to_be64(mask);
717 cpl->val = cpu_to_be64(val);
720 idata = (struct ulptx_idata *)(cpl + 1);
721 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
722 idata->len = htonl(0);
730 * chcr_write_cpl_set_tcb_ulp: update tcb values.
731 * TCB is responsible to create tcp headers, so all the related values
732 * should be correctly updated.
733 * @tx_info - driver specific tls info.
734 * @q - tx queue on which packet is going out.
735 * @tid - TCB identifier.
736 * @pos - current index where should we start writing.
738 * @mask - TCB word related mask.
739 * @val - TCB word related value.
740 * @reply - set 1 if looking for TP response.
741 * return - next position to write.
743 static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
744 struct sge_eth_txq *q, u32 tid,
745 void *pos, u16 word, u64 mask,
748 int left = (void *)q->q.stat - pos;
750 if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
756 __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
759 return chcr_copy_to_txd(buf, &q->q, pos,
760 CHCR_SET_TCB_FIELD_LEN);
764 pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
767 /* check again if we are at the end of the queue */
768 if (left == CHCR_SET_TCB_FIELD_LEN)
775 * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
776 * with updated values like tcp seq, ack, window etc.
777 * @tx_info - driver specific tls info.
782 * return: NETDEV_TX_BUSY/NET_TX_OK.
784 static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
785 struct sge_eth_txq *q, u64 tcp_seq,
786 u64 tcp_ack, u64 tcp_win)
788 bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
789 struct ch_ktls_port_stats_debug *port_stats;
790 u32 len, cpl = 0, ndesc, wr_len;
791 struct fw_ulptx_wr *wr;
795 wr_len = sizeof(*wr);
796 /* there can be max 4 cpls, check if we have enough credits */
797 len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
798 ndesc = DIV_ROUND_UP(len, 64);
800 credits = chcr_txq_avail(&q->q) - ndesc;
801 if (unlikely(credits < 0)) {
802 chcr_eth_txq_stop(q);
803 return NETDEV_TX_BUSY;
806 pos = &q->q.desc[q->q.pidx];
807 /* make space for WR, we'll fill it later when we know all the cpls
808 * being sent out and have complete length.
812 /* update tx_max if its a re-transmit or the first wr */
813 if (first_wr || tcp_seq != tx_info->prev_seq) {
814 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
816 TCB_TX_MAX_V(TCB_TX_MAX_M),
817 TCB_TX_MAX_V(tcp_seq), 0);
820 /* reset snd una if it's a re-transmit pkt */
821 if (tcp_seq != tx_info->prev_seq) {
824 &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
825 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
829 TCB_SND_UNA_RAW_V(0), 0);
830 atomic64_inc(&port_stats->ktls_tx_ooo);
834 if (first_wr || tx_info->prev_ack != tcp_ack) {
835 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
837 TCB_RCV_NXT_V(TCB_RCV_NXT_M),
838 TCB_RCV_NXT_V(tcp_ack), 0);
839 tx_info->prev_ack = tcp_ack;
842 /* update receive window */
843 if (first_wr || tx_info->prev_win != tcp_win) {
844 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
846 TCB_RCV_WND_V(TCB_RCV_WND_M),
847 TCB_RCV_WND_V(tcp_win), 0);
848 tx_info->prev_win = tcp_win;
853 /* get the actual length */
854 len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
856 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
858 /* fill len in wr field */
859 wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
861 ndesc = DIV_ROUND_UP(len, 64);
862 chcr_txq_advance(&q->q, ndesc);
863 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
870 * @nskb - new skb where the frags to be added.
871 * @skb - old skb from which frags will be copied.
873 static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
877 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
878 skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
879 __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
882 skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
883 nskb->len += skb->data_len;
884 nskb->data_len = skb->data_len;
885 nskb->truesize += skb->data_len;
889 * chcr_ktls_get_tx_flits
890 * returns number of flits to be sent out, it includes key context length, WR
891 * size and skb fragments.
894 chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
896 return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
897 DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
901 * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale
902 * other than timestamp.
903 * @skb - skb contains partial record..
907 chcr_ktls_check_tcp_options(struct tcphdr *tcp)
909 int cnt, opt, optlen;
912 cp = (u_char *)(tcp + 1);
913 cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
914 for (; cnt > 0; cnt -= optlen, cp += optlen) {
916 if (opt == TCPOPT_EOL)
918 if (opt == TCPOPT_NOP) {
924 if (optlen < 2 || optlen > cnt)
938 * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
939 * send out separately.
940 * @tx_info - driver specific tls info.
941 * @skb - skb contains partial record..
943 * @tx_chan - channel number.
944 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
947 chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
948 struct sge_eth_txq *q, uint32_t tx_chan)
950 struct fw_eth_tx_pkt_wr *wr;
951 struct cpl_tx_pkt_core *cpl;
952 u32 ctrl, iplen, maclen;
953 #if IS_ENABLED(CONFIG_IPV6)
965 iplen = skb_network_header_len(skb);
966 maclen = skb_mac_header_len(skb);
968 /* packet length = eth hdr len + ip hdr len + tcp hdr len
969 * (including options).
971 pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
973 ctrl = sizeof(*cpl) + pktlen;
974 len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
975 /* check how many descriptors needed */
976 ndesc = DIV_ROUND_UP(len16, 4);
978 credits = chcr_txq_avail(&q->q) - ndesc;
979 if (unlikely(credits < 0)) {
980 chcr_eth_txq_stop(q);
981 return NETDEV_TX_BUSY;
984 pos = &q->q.desc[q->q.pidx];
987 /* Firmware work request header */
988 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
989 FW_WR_IMMDLEN_V(ctrl));
991 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
994 cpl = (void *)(wr + 1);
997 cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
998 TXPKT_PF_V(tx_info->adap->pf));
1000 cpl->len = htons(pktlen);
1002 memcpy(buf, skb->data, pktlen);
1003 if (tx_info->ip_family == AF_INET) {
1004 /* we need to correct ip header len */
1005 ip = (struct iphdr *)(buf + maclen);
1006 ip->tot_len = htons(pktlen - maclen);
1007 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
1008 #if IS_ENABLED(CONFIG_IPV6)
1010 ip6 = (struct ipv6hdr *)(buf + maclen);
1011 ip6->payload_len = htons(pktlen - maclen - iplen);
1012 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
1016 cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
1017 TXPKT_IPHDR_LEN_V(iplen);
1018 /* checksum offload */
1019 cpl->ctrl1 = cpu_to_be64(cntrl1);
1023 /* now take care of the tcp header, if fin is not set then clear push
1024 * bit as well, and if fin is set, it will be sent at the last so we
1025 * need to update the tcp sequence number as per the last packet.
1027 tcp = (struct tcphdr *)(buf + maclen + iplen);
1032 tcp->seq = htonl(tx_info->prev_seq);
1034 chcr_copy_to_txd(buf, &q->q, pos, pktlen);
1036 chcr_txq_advance(&q->q, ndesc);
1037 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1041 /* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
1042 * @tgt- buffer into which tail data gets added
1043 * @skb- buffer from which the paged data comes from
1044 * @shiftlen- shift up to this many bytes
1046 static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1049 skb_frag_t *fragfrom, *fragto;
1052 WARN_ON(shiftlen > skb->data_len);
1057 fragfrom = &skb_shinfo(skb)->frags[from];
1059 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
1060 fragfrom = &skb_shinfo(skb)->frags[from];
1061 fragto = &skb_shinfo(tgt)->frags[to];
1063 if (todo >= skb_frag_size(fragfrom)) {
1064 *fragto = *fragfrom;
1065 todo -= skb_frag_size(fragfrom);
1070 __skb_frag_ref(fragfrom);
1071 skb_frag_page_copy(fragto, fragfrom);
1072 skb_frag_off_copy(fragto, fragfrom);
1073 skb_frag_size_set(fragto, todo);
1075 skb_frag_off_add(fragfrom, todo);
1076 skb_frag_size_sub(fragfrom, todo);
1084 /* Ready to "commit" this state change to tgt */
1085 skb_shinfo(tgt)->nr_frags = to;
1087 /* Reposition in the original skb */
1089 while (from < skb_shinfo(skb)->nr_frags)
1090 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
1092 skb_shinfo(skb)->nr_frags = to;
1094 WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
1096 skb->len -= shiftlen;
1097 skb->data_len -= shiftlen;
1098 skb->truesize -= shiftlen;
1099 tgt->len += shiftlen;
1100 tgt->data_len += shiftlen;
1101 tgt->truesize += shiftlen;
1107 * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
1108 * received has partial end part of the record, send out the complete record, so
1109 * that crypto block will be able to generate TAG/HASH.
1110 * @skb - segment which has complete or partial end part.
1111 * @tx_info - driver specific tls info.
1114 * @tcp_push - tcp push bit.
1115 * @mss - segment size.
1116 * return: NETDEV_TX_BUSY/NET_TX_OK.
1118 static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
1119 struct chcr_ktls_info *tx_info,
1120 struct sge_eth_txq *q, u32 tcp_seq,
1121 bool tcp_push, u32 mss)
1123 u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
1124 struct adapter *adap = tx_info->adap;
1125 int credits, left, last_desc;
1126 struct tx_sw_desc *sgl_sdesc;
1127 struct cpl_tx_data *tx_data;
1128 struct cpl_tx_sec_pdu *cpl;
1129 struct ulptx_idata *idata;
1130 struct ulp_txpkt *ulptx;
1131 struct fw_ulptx_wr *wr;
1135 /* get the number of flits required */
1136 flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
1137 /* number of descriptors */
1138 ndesc = chcr_flits_to_desc(flits);
1139 /* check if enough credits available */
1140 credits = chcr_txq_avail(&q->q) - ndesc;
1141 if (unlikely(credits < 0)) {
1142 chcr_eth_txq_stop(q);
1143 return NETDEV_TX_BUSY;
1146 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1147 /* Credits are below the threshold vaues, stop the queue after
1148 * injecting the Work Request for this packet.
1150 chcr_eth_txq_stop(q);
1151 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1154 last_desc = q->q.pidx + ndesc - 1;
1155 if (last_desc >= q->q.size)
1156 last_desc -= q->q.size;
1157 sgl_sdesc = &q->q.sdesc[last_desc];
1159 if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1160 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1162 return NETDEV_TX_BUSY;
1165 pos = &q->q.desc[q->q.pidx];
1166 end = (u64 *)pos + flits;
1169 /* WR will need len16 */
1170 len16 = DIV_ROUND_UP(flits, 2);
1171 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1172 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1177 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1178 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1179 ULP_TXPKT_FID_V(q->q.cntxt_id) |
1181 ulptx->len = htonl(len16 - 1);
1182 /* ULPTX_IDATA sub-command */
1183 idata = (struct ulptx_idata *)(ulptx + 1);
1184 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1185 /* idata length will include cpl_tx_sec_pdu + key context size +
1186 * cpl_tx_data header.
1188 idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1191 cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1192 cpl->op_ivinsrtofst =
1193 htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1194 CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1195 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
1196 CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
1197 cpl->pldlen = htonl(skb->data_len);
1199 /* encryption should start after tls header size + iv size */
1200 cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
1202 cpl->aadstart_cipherstop_hi =
1203 htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
1204 CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
1205 CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1207 /* authentication will also start after tls header + iv size */
1208 cpl->cipherstop_lo_authinsert =
1209 htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
1210 CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
1211 CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
1213 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1214 cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
1215 cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
1216 cpl->scmd1 = cpu_to_be64(tx_info->record_no);
1219 /* check if space left to fill the keys */
1220 left = (void *)q->q.stat - pos;
1222 left = (void *)end - (void *)q->q.stat;
1227 pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1228 tx_info->key_ctx_len);
1229 left = (void *)q->q.stat - pos;
1232 left = (void *)end - (void *)q->q.stat;
1237 tx_data = (void *)pos;
1238 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1239 tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
1241 tx_data->rsvd = htonl(tcp_seq);
1243 tx_data->flags = htonl(TX_BYPASS_F);
1245 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1247 /* check left again, it might go beyond queue limit */
1249 left = (void *)q->q.stat - pos;
1251 /* check the position again */
1253 left = (void *)end - (void *)q->q.stat;
1258 /* send the complete packet except the header */
1259 cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
1261 sgl_sdesc->skb = skb;
1263 chcr_txq_advance(&q->q, ndesc);
1264 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1265 atomic64_inc(&adap->ch_ktls_stats.ktls_tx_send_records);
1271 * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
1272 * a middle part of a record, fetch the prior data to make it 16 byte aligned
1273 * and then only send it out.
1275 * @skb - skb contains partial record..
1276 * @tx_info - driver specific tls info.
1279 * @tcp_push - tcp push bit.
1280 * @mss - segment size.
1281 * @tls_rec_offset - offset from start of the tls record.
1282 * @perior_data - data before the current segment, required to make this record
1284 * @prior_data_len - prior_data length (less than 16)
1285 * return: NETDEV_TX_BUSY/NET_TX_OK.
1287 static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
1288 struct chcr_ktls_info *tx_info,
1289 struct sge_eth_txq *q,
1290 u32 tcp_seq, bool tcp_push, u32 mss,
1291 u32 tls_rec_offset, u8 *prior_data,
1294 struct adapter *adap = tx_info->adap;
1295 u32 len16, wr_mid = 0, cipher_start;
1296 unsigned int flits = 0, ndesc;
1297 int credits, left, last_desc;
1298 struct tx_sw_desc *sgl_sdesc;
1299 struct cpl_tx_data *tx_data;
1300 struct cpl_tx_sec_pdu *cpl;
1301 struct ulptx_idata *idata;
1302 struct ulp_txpkt *ulptx;
1303 struct fw_ulptx_wr *wr;
1308 /* get the number of flits required, it's a partial record so 2 flits
1309 * (AES_BLOCK_SIZE) will be added.
1311 flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
1312 /* get the correct 8 byte IV of this record */
1313 iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
1314 /* If it's a middle record and not 16 byte aligned to run AES CTR, need
1315 * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
1316 * data will be added.
1320 /* number of descriptors */
1321 ndesc = chcr_flits_to_desc(flits);
1322 /* check if enough credits available */
1323 credits = chcr_txq_avail(&q->q) - ndesc;
1324 if (unlikely(credits < 0)) {
1325 chcr_eth_txq_stop(q);
1326 return NETDEV_TX_BUSY;
1329 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1330 chcr_eth_txq_stop(q);
1331 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1334 last_desc = q->q.pidx + ndesc - 1;
1335 if (last_desc >= q->q.size)
1336 last_desc -= q->q.size;
1337 sgl_sdesc = &q->q.sdesc[last_desc];
1339 if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1340 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1342 return NETDEV_TX_BUSY;
1345 pos = &q->q.desc[q->q.pidx];
1346 end = (u64 *)pos + flits;
1349 /* WR will need len16 */
1350 len16 = DIV_ROUND_UP(flits, 2);
1351 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1352 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1357 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1358 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1359 ULP_TXPKT_FID_V(q->q.cntxt_id) |
1361 ulptx->len = htonl(len16 - 1);
1362 /* ULPTX_IDATA sub-command */
1363 idata = (struct ulptx_idata *)(ulptx + 1);
1364 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1365 /* idata length will include cpl_tx_sec_pdu + key context size +
1366 * cpl_tx_data header.
1368 idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1369 sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
1371 cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1372 /* cipher start will have tls header + iv size extra if its a header
1373 * part of tls record. else only 16 byte IV will be added.
1377 (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
1379 cpl->op_ivinsrtofst =
1380 htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1381 CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1382 CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
1383 cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
1384 cpl->aadstart_cipherstop_hi =
1385 htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1386 cpl->cipherstop_lo_authinsert = 0;
1387 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1388 cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
1389 cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
1393 /* check if space left to fill the keys */
1394 left = (void *)q->q.stat - pos;
1396 left = (void *)end - (void *)q->q.stat;
1401 pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1402 tx_info->key_ctx_len);
1403 left = (void *)q->q.stat - pos;
1406 left = (void *)end - (void *)q->q.stat;
1411 tx_data = (void *)pos;
1412 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1413 tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1414 TX_LENGTH_V(skb->data_len + prior_data_len));
1415 tx_data->rsvd = htonl(tcp_seq);
1416 tx_data->flags = htonl(TX_BYPASS_F);
1418 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1420 /* check left again, it might go beyond queue limit */
1422 left = (void *)q->q.stat - pos;
1424 /* check the position again */
1426 left = (void *)end - (void *)q->q.stat;
1430 /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
1431 * bytes of actual IV and 4 bytes of 16 byte-sequence.
1433 memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
1434 memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
1435 *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
1436 htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
1437 (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
1440 /* Prior_data_len will always be less than 16 bytes, fill the
1441 * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
1445 pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1446 /* send the complete packet except the header */
1447 cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
1449 sgl_sdesc->skb = skb;
1451 chcr_txq_advance(&q->q, ndesc);
1452 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1458 * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
1459 * only plain text (only tls header and iv)
1460 * @tx_info - driver specific tls info.
1461 * @skb - skb contains partial record..
1463 * @mss - segment size.
1464 * @tcp_push - tcp push bit.
1466 * @port_id : port number
1467 * @perior_data - data before the current segment, required to make this record
1469 * @prior_data_len - prior_data length (less than 16)
1470 * return: NETDEV_TX_BUSY/NET_TX_OK.
1472 static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
1473 struct sk_buff *skb, u32 tcp_seq, u32 mss,
1474 bool tcp_push, struct sge_eth_txq *q,
1475 u32 port_id, u8 *prior_data,
1478 int credits, left, len16, last_desc;
1479 unsigned int flits = 0, ndesc;
1480 struct tx_sw_desc *sgl_sdesc;
1481 struct cpl_tx_data *tx_data;
1482 struct ulptx_idata *idata;
1483 struct ulp_txpkt *ulptx;
1484 struct fw_ulptx_wr *wr;
1489 flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
1490 flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
1493 /* WR will need len16 */
1494 len16 = DIV_ROUND_UP(flits, 2);
1495 /* check how many descriptors needed */
1496 ndesc = DIV_ROUND_UP(flits, 8);
1498 credits = chcr_txq_avail(&q->q) - ndesc;
1499 if (unlikely(credits < 0)) {
1500 chcr_eth_txq_stop(q);
1501 return NETDEV_TX_BUSY;
1504 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1505 chcr_eth_txq_stop(q);
1506 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1509 last_desc = q->q.pidx + ndesc - 1;
1510 if (last_desc >= q->q.size)
1511 last_desc -= q->q.size;
1512 sgl_sdesc = &q->q.sdesc[last_desc];
1514 if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
1515 sgl_sdesc->addr) < 0)) {
1516 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1518 return NETDEV_TX_BUSY;
1521 pos = &q->q.desc[q->q.pidx];
1522 end = (u64 *)pos + flits;
1525 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1526 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1530 ulptx = (struct ulp_txpkt *)(wr + 1);
1531 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1532 ULP_TXPKT_DATAMODIFY_V(0) |
1533 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1534 ULP_TXPKT_DEST_V(0) |
1535 ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
1536 ulptx->len = htonl(len16 - 1);
1537 /* ULPTX_IDATA sub-command */
1538 idata = (struct ulptx_idata *)(ulptx + 1);
1539 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1540 idata->len = htonl(sizeof(*tx_data) + prior_data_len);
1542 tx_data = (struct cpl_tx_data *)(idata + 1);
1543 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1544 tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1545 TX_LENGTH_V(skb->data_len + prior_data_len));
1546 /* set tcp seq number */
1547 tx_data->rsvd = htonl(tcp_seq);
1548 tx_data->flags = htonl(TX_BYPASS_F);
1550 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1553 /* apart from prior_data_len, we should set remaining part of 16 bytes
1557 pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1559 /* check left again, it might go beyond queue limit */
1560 left = (void *)q->q.stat - pos;
1562 /* check the position again */
1564 left = (void *)end - (void *)q->q.stat;
1568 /* send the complete packet including the header */
1569 cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
1571 sgl_sdesc->skb = skb;
1573 chcr_txq_advance(&q->q, ndesc);
1574 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1579 * chcr_ktls_copy_record_in_skb
1580 * @nskb - new skb where the frags to be added.
1581 * @record - specific record which has complete 16k record in frags.
1583 static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
1584 struct tls_record_info *record)
1588 for (i = 0; i < record->num_frags; i++) {
1589 skb_shinfo(nskb)->frags[i] = record->frags[i];
1590 /* increase the frag ref count */
1591 __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
1594 skb_shinfo(nskb)->nr_frags = record->num_frags;
1595 nskb->data_len = record->len;
1596 nskb->len += record->len;
1597 nskb->truesize += record->len;
1601 * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
1602 * sending the same segment again. It will discard the segment which is before
1603 * the current tx max.
1604 * @tx_info - driver specific tls info.
1606 * return: NET_TX_OK/NET_XMIT_DROP.
1608 static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
1609 struct sge_eth_txq *q)
1611 struct fw_ulptx_wr *wr;
1617 len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
1618 ndesc = DIV_ROUND_UP(len, 64);
1620 credits = chcr_txq_avail(&q->q) - ndesc;
1621 if (unlikely(credits < 0)) {
1622 chcr_eth_txq_stop(q);
1623 return NETDEV_TX_BUSY;
1626 pos = &q->q.desc[q->q.pidx];
1630 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1632 /* fill len in wr field */
1633 wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
1637 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
1639 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
1640 TCB_SND_UNA_RAW_V(0), 0);
1642 chcr_txq_advance(&q->q, ndesc);
1643 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1649 * chcr_end_part_handler: This handler will handle the record which
1650 * is complete or if record's end part is received. T6 adapter has a issue that
1651 * it can't send out TAG with partial record so if its an end part then we have
1652 * to send TAG as well and for which we need to fetch the complete record and
1653 * send it to crypto module.
1654 * @tx_info - driver specific tls info.
1655 * @skb - skb contains partial record.
1656 * @record - complete record of 16K size.
1658 * @mss - segment size in which TP needs to chop a packet.
1659 * @tcp_push_no_fin - tcp push if fin is not set.
1661 * @tls_end_offset - offset from end of the record.
1662 * @last wr : check if this is the last part of the skb going out.
1663 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1665 static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
1666 struct sk_buff *skb,
1667 struct tls_record_info *record,
1668 u32 tcp_seq, int mss, bool tcp_push_no_fin,
1669 struct sge_eth_txq *q,
1670 u32 tls_end_offset, bool last_wr)
1672 struct sk_buff *nskb = NULL;
1673 /* check if it is a complete record */
1674 if (tls_end_offset == record->len) {
1676 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
1678 dev_kfree_skb_any(skb);
1680 nskb = alloc_skb(0, GFP_KERNEL);
1682 return NETDEV_TX_BUSY;
1683 /* copy complete record in skb */
1684 chcr_ktls_copy_record_in_skb(nskb, record);
1685 /* packet is being sent from the beginning, update the tcp_seq
1688 tcp_seq = tls_record_start_seq(record);
1689 /* reset snd una, so the middle record won't send the already
1692 if (chcr_ktls_update_snd_una(tx_info, q))
1694 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
1697 if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
1698 (last_wr && tcp_push_no_fin),
1704 dev_kfree_skb_any(nskb);
1705 return NETDEV_TX_BUSY;
1709 * chcr_short_record_handler: This handler will take care of the records which
1710 * doesn't have end part (1st part or the middle part(/s) of a record). In such
1711 * cases, AES CTR will be used in place of AES GCM to send out partial packet.
1712 * This partial record might be the first part of the record, or the middle
1713 * part. In case of middle record we should fetch the prior data to make it 16
1714 * byte aligned. If it has a partial tls header or iv then get to the start of
1715 * tls header. And if it has partial TAG, then remove the complete TAG and send
1717 * There is one more possibility that it gets a partial header, send that
1718 * portion as a plaintext.
1719 * @tx_info - driver specific tls info.
1720 * @skb - skb contains partial record..
1721 * @record - complete record of 16K size.
1723 * @mss - segment size in which TP needs to chop a packet.
1724 * @tcp_push_no_fin - tcp push if fin is not set.
1726 * @tls_end_offset - offset from end of the record.
1727 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1729 static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
1730 struct sk_buff *skb,
1731 struct tls_record_info *record,
1732 u32 tcp_seq, int mss, bool tcp_push_no_fin,
1733 struct sge_eth_txq *q, u32 tls_end_offset)
1735 u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
1736 u8 prior_data[16] = {0};
1737 u32 prior_data_len = 0;
1740 /* check if the skb is ending in middle of tag/HASH, its a big
1741 * trouble, send the packet before the HASH.
1743 int remaining_record = tls_end_offset - skb->data_len;
1745 if (remaining_record > 0 &&
1746 remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
1747 int trimmed_len = skb->data_len -
1748 (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
1749 struct sk_buff *tmp_skb = NULL;
1750 /* don't process the pkt if it is only a partial tag */
1751 if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
1754 WARN_ON(trimmed_len > skb->data_len);
1756 /* shift to those many bytes */
1757 tmp_skb = alloc_skb(0, GFP_KERNEL);
1758 if (unlikely(!tmp_skb))
1761 chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
1762 /* free the last trimmed portion */
1763 dev_kfree_skb_any(skb);
1765 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
1767 data_len = skb->data_len;
1768 /* check if the middle record's start point is 16 byte aligned. CTR
1769 * needs 16 byte aligned start point to start encryption.
1771 if (tls_rec_offset) {
1772 /* there is an offset from start, means its a middle record */
1775 if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
1776 prior_data_len = tls_rec_offset;
1782 (TLS_HEADER_SIZE + tx_info->iv_size))
1784 remaining = tls_rec_offset - prior_data_len;
1787 /* if prior_data_len is not zero, means we need to fetch prior
1788 * data to make this record 16 byte aligned, or we need to reach
1791 if (prior_data_len) {
1796 int frag_size = 0, frag_delta = 0;
1798 while (remaining > 0) {
1799 frag_size = skb_frag_size(&record->frags[i]);
1800 if (remaining < frag_size)
1803 remaining -= frag_size;
1806 f = &record->frags[i];
1807 vaddr = kmap_atomic(skb_frag_page(f));
1809 data = vaddr + skb_frag_off(f) + remaining;
1810 frag_delta = skb_frag_size(f) - remaining;
1812 if (frag_delta >= prior_data_len) {
1813 memcpy(prior_data, data, prior_data_len);
1814 kunmap_atomic(vaddr);
1816 memcpy(prior_data, data, frag_delta);
1817 kunmap_atomic(vaddr);
1818 /* get the next page */
1819 f = &record->frags[i + 1];
1820 vaddr = kmap_atomic(skb_frag_page(f));
1821 data = vaddr + skb_frag_off(f);
1822 memcpy(prior_data + frag_delta,
1823 data, (prior_data_len - frag_delta));
1824 kunmap_atomic(vaddr);
1826 /* reset tcp_seq as per the prior_data_required len */
1827 tcp_seq -= prior_data_len;
1828 /* include prio_data_len for further calculation.
1830 data_len += prior_data_len;
1832 /* reset snd una, so the middle record won't send the already
1835 if (chcr_ktls_update_snd_una(tx_info, q))
1837 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
1839 /* Else means, its a partial first part of the record. Check if
1840 * its only the header, don't need to send for encryption then.
1842 if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
1843 if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
1852 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
1855 if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
1856 mss, tls_rec_offset, prior_data,
1863 dev_kfree_skb_any(skb);
1864 return NETDEV_TX_BUSY;
1867 /* nic tls TX handler */
1868 static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1870 u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
1871 struct ch_ktls_port_stats_debug *port_stats;
1872 struct chcr_ktls_ofld_ctx_tx *tx_ctx;
1873 struct ch_ktls_stats_debug *stats;
1874 struct tcphdr *th = tcp_hdr(skb);
1875 int data_len, qidx, ret = 0, mss;
1876 struct tls_record_info *record;
1877 struct chcr_ktls_info *tx_info;
1878 struct tls_context *tls_ctx;
1879 struct sk_buff *local_skb;
1880 struct sge_eth_txq *q;
1881 struct adapter *adap;
1882 unsigned long flags;
1884 tcp_seq = ntohl(th->seq);
1885 skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
1886 skb_data_len = skb->len - skb_offset;
1887 data_len = skb_data_len;
1889 mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
1891 tls_ctx = tls_get_ctx(skb->sk);
1892 if (unlikely(tls_ctx->netdev != dev))
1895 tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
1896 tx_info = tx_ctx->chcr_info;
1898 if (unlikely(!tx_info))
1901 /* don't touch the original skb, make a new skb to extract each records
1902 * and send them separately.
1904 local_skb = alloc_skb(0, GFP_KERNEL);
1906 if (unlikely(!local_skb))
1907 return NETDEV_TX_BUSY;
1909 adap = tx_info->adap;
1910 stats = &adap->ch_ktls_stats;
1911 port_stats = &stats->ktls_port[tx_info->port_id];
1913 qidx = skb->queue_mapping;
1914 q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
1915 cxgb4_reclaim_completed_tx(adap, &q->q, true);
1916 /* if tcp options are set but finish is not send the options first */
1917 if (!th->fin && chcr_ktls_check_tcp_options(th)) {
1918 ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
1921 return NETDEV_TX_BUSY;
1924 ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
1928 dev_kfree_skb_any(local_skb);
1929 return NETDEV_TX_BUSY;
1932 /* copy skb contents into local skb */
1933 chcr_ktls_skb_copy(skb, local_skb);
1935 /* TCP segments can be in received either complete or partial.
1936 * chcr_end_part_handler will handle cases if complete record or end
1937 * part of the record is received. Incase of partial end part of record,
1938 * we will send the complete record again.
1944 cxgb4_reclaim_completed_tx(adap, &q->q, true);
1946 spin_lock_irqsave(&tx_ctx->base.lock, flags);
1947 /* fetch the tls record */
1948 record = tls_get_record(&tx_ctx->base, tcp_seq,
1949 &tx_info->record_no);
1950 /* By the time packet reached to us, ACK is received, and record
1951 * won't be found in that case, handle it gracefully.
1953 if (unlikely(!record)) {
1954 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1955 atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
1959 if (unlikely(tls_record_is_start_marker(record))) {
1960 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1961 atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
1965 /* increase page reference count of the record, so that there
1966 * won't be any chance of page free in middle if in case stack
1967 * receives ACK and try to delete the record.
1969 for (i = 0; i < record->num_frags; i++)
1970 __skb_frag_ref(&record->frags[i]);
1972 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1974 tls_end_offset = record->end_seq - tcp_seq;
1976 pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
1977 tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
1978 /* if a tls record is finishing in this SKB */
1979 if (tls_end_offset <= data_len) {
1980 struct sk_buff *nskb = NULL;
1982 if (tls_end_offset < data_len) {
1983 nskb = alloc_skb(0, GFP_KERNEL);
1984 if (unlikely(!nskb)) {
1989 chcr_ktls_skb_shift(nskb, local_skb,
1992 /* its the only record in this skb, directly
1997 ret = chcr_end_part_handler(tx_info, nskb, record,
1999 (!th->fin && th->psh), q,
2001 (nskb == local_skb));
2003 if (ret && nskb != local_skb)
2004 dev_kfree_skb_any(local_skb);
2006 data_len -= tls_end_offset;
2007 /* tcp_seq increment is required to handle next record.
2009 tcp_seq += tls_end_offset;
2011 ret = chcr_short_record_handler(tx_info, local_skb,
2012 record, tcp_seq, mss,
2013 (!th->fin && th->psh),
2018 /* clear the frag ref count which increased locally before */
2019 for (i = 0; i < record->num_frags; i++) {
2020 /* clear the frag ref count */
2021 __skb_frag_unref(&record->frags[i]);
2023 /* if any failure, come out from the loop. */
2026 /* length should never be less than 0 */
2027 WARN_ON(data_len < 0);
2029 } while (data_len > 0);
2031 tx_info->prev_seq = ntohl(th->seq) + skb_data_len;
2032 atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
2033 atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
2035 /* tcp finish is set, send a separate tcp msg including all the options
2039 chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
2042 dev_kfree_skb_any(skb);
2043 return NETDEV_TX_OK;
2046 static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
2048 struct chcr_ktls_uld_ctx *u_ctx;
2050 pr_info_once("%s - version %s\n", CHCR_KTLS_DRV_DESC,
2051 CHCR_KTLS_DRV_VERSION);
2052 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
2054 u_ctx = ERR_PTR(-ENOMEM);
2057 u_ctx->lldi = *lldi;
2062 static const struct tlsdev_ops chcr_ktls_ops = {
2063 .tls_dev_add = chcr_ktls_dev_add,
2064 .tls_dev_del = chcr_ktls_dev_del,
2067 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
2068 [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
2069 [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
2072 static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
2073 const struct pkt_gl *pgl)
2075 const struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)rsp;
2076 struct chcr_ktls_uld_ctx *u_ctx = handle;
2077 u8 opcode = rpl->ot.opcode;
2078 struct adapter *adap;
2080 adap = pci_get_drvdata(u_ctx->lldi.pdev);
2082 if (!work_handlers[opcode]) {
2083 pr_err("Unsupported opcode %d received\n", opcode);
2087 work_handlers[opcode](adap, (unsigned char *)&rsp[1]);
2091 static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
2093 struct chcr_ktls_uld_ctx *u_ctx = handle;
2095 switch (new_state) {
2096 case CXGB4_STATE_UP:
2097 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
2098 mutex_lock(&dev_mutex);
2099 list_add_tail(&u_ctx->entry, &uld_ctx_list);
2100 mutex_unlock(&dev_mutex);
2102 case CXGB4_STATE_START_RECOVERY:
2103 case CXGB4_STATE_DOWN:
2104 case CXGB4_STATE_DETACH:
2105 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
2106 mutex_lock(&dev_mutex);
2107 list_del(&u_ctx->entry);
2108 mutex_unlock(&dev_mutex);
2117 static struct cxgb4_uld_info chcr_ktls_uld_info = {
2118 .name = CHCR_KTLS_DRV_MODULE_NAME,
2121 .add = chcr_ktls_uld_add,
2122 .tx_handler = chcr_ktls_xmit,
2123 .rx_handler = chcr_ktls_uld_rx_handler,
2124 .state_change = chcr_ktls_uld_state_change,
2125 .tlsdev_ops = &chcr_ktls_ops,
2128 static int __init chcr_ktls_init(void)
2130 cxgb4_register_uld(CXGB4_ULD_KTLS, &chcr_ktls_uld_info);
2134 static void __exit chcr_ktls_exit(void)
2136 struct chcr_ktls_uld_ctx *u_ctx, *tmp;
2137 struct adapter *adap;
2139 pr_info("driver unloaded\n");
2141 mutex_lock(&dev_mutex);
2142 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
2143 adap = pci_get_drvdata(u_ctx->lldi.pdev);
2144 memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
2145 list_del(&u_ctx->entry);
2148 mutex_unlock(&dev_mutex);
2149 cxgb4_unregister_uld(CXGB4_ULD_KTLS);
2152 module_init(chcr_ktls_init);
2153 module_exit(chcr_ktls_exit);
2155 MODULE_DESCRIPTION("Chelsio NIC TLS ULD driver");
2156 MODULE_LICENSE("GPL");
2157 MODULE_AUTHOR("Chelsio Communications");
2158 MODULE_VERSION(CHCR_KTLS_DRV_VERSION);