1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/skbuff.h>
7 #include <linux/module.h>
8 #include <linux/highmem.h>
11 #include <linux/netdevice.h>
12 #include "chcr_ktls.h"
14 static LIST_HEAD(uld_ctx_list);
15 static DEFINE_MUTEX(dev_mutex);
17 /* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
19 * @start: start offset.
20 * @len: how much data to send after @start
22 static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
24 struct skb_shared_info *si = skb_shinfo(skb);
25 u32 frag_size, skb_linear_data_len = skb_headlen(skb);
26 u8 nfrags = 0, frag_idx = 0;
29 /* if its a linear skb then return 1 */
30 if (!skb_is_nonlinear(skb))
33 if (unlikely(start < skb_linear_data_len)) {
34 frag_size = min(len, skb_linear_data_len - start);
37 start -= skb_linear_data_len;
39 frag = &si->frags[frag_idx];
40 frag_size = skb_frag_size(frag);
41 while (start >= frag_size) {
44 frag = &si->frags[frag_idx];
45 frag_size = skb_frag_size(frag);
47 frag_size = min(len, skb_frag_size(frag) - start);
53 frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
61 static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
63 * chcr_ktls_save_keys: calculate and save crypto keys.
64 * @tx_info - driver specific tls info.
65 * @crypto_info - tls crypto information.
66 * @direction - TX/RX direction.
67 * return - SUCCESS/FAILURE.
69 static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
70 struct tls_crypto_info *crypto_info,
71 enum tls_offload_ctx_dir direction)
73 int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
74 unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
75 struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
76 struct ktls_key_ctx *kctx = &tx_info->key_ctx;
77 struct crypto_cipher *cipher;
78 unsigned char *key, *salt;
80 switch (crypto_info->cipher_type) {
81 case TLS_CIPHER_AES_GCM_128:
83 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
84 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
85 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
86 tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
87 mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
88 tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
89 tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
91 ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
92 key = info_128_gcm->key;
93 salt = info_128_gcm->salt;
94 tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
96 /* The SCMD fields used when encrypting a full TLS
97 * record. Its a one time calculation till the
100 tx_info->scmd0_seqno_numivs =
101 SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
102 SCMD_CIPH_AUTH_SEQ_CTRL_F |
103 SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
104 SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
105 SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
106 SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
109 /* keys will be sent inline. */
110 tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
112 /* The SCMD fields used when encrypting a partial TLS
113 * record (no trailer and possibly a truncated payload).
115 tx_info->scmd0_short_seqno_numivs =
116 SCMD_CIPH_AUTH_SEQ_CTRL_F |
117 SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
118 SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
119 SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
121 tx_info->scmd0_short_ivgen_hdrlen =
122 tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
127 pr_err("GCM: cipher type 0x%x not supported\n",
128 crypto_info->cipher_type);
133 key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
134 roundup(keylen, 16) + ghash_size;
135 /* Calculate the H = CIPH(K, 0 repeated 16 times).
136 * It will go in key context
138 cipher = crypto_alloc_cipher("aes", 0, 0);
139 if (IS_ERR(cipher)) {
144 ret = crypto_cipher_setkey(cipher, key, keylen);
148 memset(ghash_h, 0, ghash_size);
149 crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
151 /* fill the Key context */
152 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
153 kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
161 memcpy(kctx->salt, salt, tx_info->salt_size);
162 memcpy(kctx->key, key, keylen);
163 memcpy(kctx->key + keylen, ghash_h, ghash_size);
164 tx_info->key_ctx_len = key_ctx_size;
167 crypto_free_cipher(cipher);
173 * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
175 * @tx_info - driver specific tls info.
176 * @atid - connection active tid.
177 * return - send success/failure.
179 static int chcr_ktls_act_open_req(struct sock *sk,
180 struct chcr_ktls_info *tx_info,
183 struct inet_sock *inet = inet_sk(sk);
184 struct cpl_t6_act_open_req *cpl6;
185 struct cpl_act_open_req *cpl;
192 skb = alloc_skb(len, GFP_KERNEL);
195 /* mark it a control pkt */
196 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
198 cpl6 = __skb_put_zero(skb, len);
199 cpl = (struct cpl_act_open_req *)cpl6;
201 qid_atid = TID_QID_V(tx_info->rx_qid) |
203 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
204 cpl->local_port = inet->inet_sport;
205 cpl->peer_port = inet->inet_dport;
206 cpl->local_ip = inet->inet_rcv_saddr;
207 cpl->peer_ip = inet->inet_daddr;
209 /* fill first 64 bit option field. */
210 options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
211 SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
212 cpl->opt0 = cpu_to_be64(options);
214 /* next 64 bit option field. */
216 TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
217 cpl->opt2 = htonl(options);
219 return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
222 #if IS_ENABLED(CONFIG_IPV6)
224 * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
226 * @tx_info - driver specific tls info.
227 * @atid - connection active tid.
228 * return - send success/failure.
230 static int chcr_ktls_act_open_req6(struct sock *sk,
231 struct chcr_ktls_info *tx_info,
234 struct inet_sock *inet = inet_sk(sk);
235 struct cpl_t6_act_open_req6 *cpl6;
236 struct cpl_act_open_req6 *cpl;
243 skb = alloc_skb(len, GFP_KERNEL);
246 /* mark it a control pkt */
247 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
249 cpl6 = __skb_put_zero(skb, len);
250 cpl = (struct cpl_act_open_req6 *)cpl6;
252 qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
253 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
254 cpl->local_port = inet->inet_sport;
255 cpl->peer_port = inet->inet_dport;
256 cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
257 cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
258 cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
259 cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
261 /* first 64 bit option field. */
262 options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
263 SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
264 cpl->opt0 = cpu_to_be64(options);
265 /* next 64 bit option field. */
267 TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
268 cpl->opt2 = htonl(options);
270 return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
272 #endif /* #if IS_ENABLED(CONFIG_IPV6) */
275 * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
277 * @tx_info - driver specific tls info.
278 * return: NET_TX_OK/NET_XMIT_DROP
280 static int chcr_setup_connection(struct sock *sk,
281 struct chcr_ktls_info *tx_info)
283 struct tid_info *t = &tx_info->adap->tids;
286 atid = cxgb4_alloc_atid(t, tx_info);
290 tx_info->atid = atid;
292 if (tx_info->ip_family == AF_INET) {
293 ret = chcr_ktls_act_open_req(sk, tx_info, atid);
294 #if IS_ENABLED(CONFIG_IPV6)
296 ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
297 &sk->sk_v6_rcv_saddr,
301 ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
305 /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
306 * success, if any other return type clear atid and return that failure.
309 if (ret == NET_XMIT_CN) {
312 #if IS_ENABLED(CONFIG_IPV6)
313 /* clear clip entry */
314 if (tx_info->ip_family == AF_INET6)
315 cxgb4_clip_release(tx_info->netdev,
317 &sk->sk_v6_rcv_saddr,
320 cxgb4_free_atid(t, atid);
328 * chcr_set_tcb_field: update tcb fields.
329 * @tx_info - driver specific tls info.
331 * @mask - TCB word related mask.
332 * @val - TCB word related value.
333 * @no_reply - set 1 if not looking for TP response.
335 static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
336 u64 mask, u64 val, int no_reply)
338 struct cpl_set_tcb_field *req;
341 skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
345 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
346 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
347 req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
348 NO_REPLY_V(no_reply));
349 req->word_cookie = htons(TCB_WORD_V(word));
350 req->mask = cpu_to_be64(mask);
351 req->val = cpu_to_be64(val);
353 set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
354 return cxgb4_ofld_send(tx_info->netdev, skb);
358 * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
359 * @tx_info - driver specific tls info.
360 * return: NET_TX_OK/NET_XMIT_DROP.
362 static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
364 return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
365 TCB_T_STATE_V(TCB_T_STATE_M),
366 CHCR_TCB_STATE_CLOSED, 1);
370 * chcr_ktls_dev_del: call back for tls_dev_del.
371 * Remove the tid and l2t entry and close the connection.
372 * it per connection basis.
373 * @netdev - net device.
374 * @tls_cts - tls context.
375 * @direction - TX/RX crypto direction
377 static void chcr_ktls_dev_del(struct net_device *netdev,
378 struct tls_context *tls_ctx,
379 enum tls_offload_ctx_dir direction)
381 struct chcr_ktls_ofld_ctx_tx *tx_ctx =
382 chcr_get_ktls_tx_context(tls_ctx);
383 struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
384 struct ch_ktls_port_stats_debug *port_stats;
389 /* clear l2t entry */
391 cxgb4_l2t_release(tx_info->l2te);
393 #if IS_ENABLED(CONFIG_IPV6)
394 /* clear clip entry */
395 if (tx_info->ip_family == AF_INET6)
396 cxgb4_clip_release(netdev, (const u32 *)
397 &tx_info->sk->sk_v6_rcv_saddr,
402 if (tx_info->tid != -1) {
403 /* clear tcb state and then release tid */
404 chcr_ktls_mark_tcb_close(tx_info);
405 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
406 tx_info->tid, tx_info->ip_family);
409 port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
410 atomic64_inc(&port_stats->ktls_tx_connection_close);
412 tx_ctx->chcr_info = NULL;
413 /* release module refcount */
414 module_put(THIS_MODULE);
418 * chcr_ktls_dev_add: call back for tls_dev_add.
419 * Create a tcb entry for TP. Also add l2t entry for the connection. And
420 * generate keys & save those keys locally.
421 * @netdev - net device.
422 * @tls_cts - tls context.
423 * @direction - TX/RX crypto direction
424 * return: SUCCESS/FAILURE.
426 static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
427 enum tls_offload_ctx_dir direction,
428 struct tls_crypto_info *crypto_info,
429 u32 start_offload_tcp_sn)
431 struct tls_context *tls_ctx = tls_get_ctx(sk);
432 struct ch_ktls_port_stats_debug *port_stats;
433 struct chcr_ktls_ofld_ctx_tx *tx_ctx;
434 struct chcr_ktls_info *tx_info;
435 struct dst_entry *dst;
436 struct adapter *adap;
437 struct port_info *pi;
442 tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
444 pi = netdev_priv(netdev);
446 port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
447 atomic64_inc(&port_stats->ktls_tx_connection_open);
449 if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
450 pr_err("not expecting for RX direction\n");
454 if (tx_ctx->chcr_info)
457 tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
462 spin_lock_init(&tx_info->lock);
463 /* initialize tid and atid to -1, 0 is a also a valid id. */
467 tx_info->adap = adap;
468 tx_info->netdev = netdev;
469 tx_info->first_qset = pi->first_qset;
470 tx_info->tx_chan = pi->tx_chan;
471 tx_info->smt_idx = pi->smt_idx;
472 tx_info->port_id = pi->port_id;
473 tx_info->prev_ack = 0;
474 tx_info->prev_win = 0;
476 tx_info->rx_qid = chcr_get_first_rx_qid(adap);
477 if (unlikely(tx_info->rx_qid < 0))
480 tx_info->prev_seq = start_offload_tcp_sn;
481 tx_info->tcp_start_seq_number = start_offload_tcp_sn;
483 /* save crypto keys */
484 ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
489 if (sk->sk_family == AF_INET) {
490 memcpy(daaddr, &sk->sk_daddr, 4);
491 tx_info->ip_family = AF_INET;
492 #if IS_ENABLED(CONFIG_IPV6)
494 if (!sk->sk_ipv6only &&
495 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
496 memcpy(daaddr, &sk->sk_daddr, 4);
497 tx_info->ip_family = AF_INET;
499 memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
500 tx_info->ip_family = AF_INET6;
505 /* get the l2t index */
506 dst = sk_dst_get(sk);
508 pr_err("DST entry not found\n");
511 n = dst_neigh_lookup(dst, daaddr);
513 pr_err("neighbour not found\n");
517 tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
522 if (!tx_info->l2te) {
523 pr_err("l2t entry not found\n");
527 /* Driver shouldn't be removed until any single connection exists */
528 if (!try_module_get(THIS_MODULE))
531 init_completion(&tx_info->completion);
532 /* create a filter and call cxgb4_l2t_send to send the packet out, which
533 * will take care of updating l2t entry in hw if not already done.
535 tx_info->open_state = CH_KTLS_OPEN_PENDING;
537 if (chcr_setup_connection(sk, tx_info))
541 wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
542 spin_lock_bh(&tx_info->lock);
543 if (tx_info->open_state) {
544 /* need to wait for hw response, can't free tx_info yet. */
545 if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
546 tx_info->pending_close = true;
547 /* free the lock after the cleanup */
550 spin_unlock_bh(&tx_info->lock);
553 reinit_completion(&tx_info->completion);
554 /* mark it pending for hw response */
555 tx_info->open_state = CH_KTLS_OPEN_PENDING;
557 if (chcr_init_tcb_fields(tx_info))
561 wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
562 spin_lock_bh(&tx_info->lock);
563 if (tx_info->open_state) {
564 /* need to wait for hw response, can't free tx_info yet. */
565 tx_info->pending_close = true;
566 /* free the lock after cleanup */
569 spin_unlock_bh(&tx_info->lock);
571 if (!cxgb4_check_l2t_valid(tx_info->l2te))
574 atomic64_inc(&port_stats->ktls_tx_ctx);
575 tx_ctx->chcr_info = tx_info;
580 chcr_ktls_mark_tcb_close(tx_info);
581 #if IS_ENABLED(CONFIG_IPV6)
582 /* clear clip entry */
583 if (tx_info->ip_family == AF_INET6)
584 cxgb4_clip_release(netdev, (const u32 *)
585 &sk->sk_v6_rcv_saddr,
588 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
589 tx_info->tid, tx_info->ip_family);
592 /* release module refcount */
593 module_put(THIS_MODULE);
595 cxgb4_l2t_release(tx_info->l2te);
597 if (tx_info->pending_close)
598 spin_unlock_bh(&tx_info->lock);
602 atomic64_inc(&port_stats->ktls_tx_connection_fail);
607 * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
609 * @tx_info - driver specific tls info.
610 * return: NET_TX_OK/NET_XMIT_DROP
612 static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
616 /* set tcb in offload and bypass */
618 chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
619 TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
620 TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
623 /* reset snd_una and snd_next fields in tcb */
624 ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
625 TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
626 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
632 ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
633 TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
638 /* update l2t index and request for tp reply to confirm tcb is
639 * initialised to handle tx traffic.
641 ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
642 TCB_L2T_IX_V(TCB_L2T_IX_M),
643 TCB_L2T_IX_V(tx_info->l2te->idx), 0);
648 * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
650 static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
651 unsigned char *input)
653 const struct cpl_act_open_rpl *p = (void *)input;
654 struct chcr_ktls_info *tx_info = NULL;
655 unsigned int atid, tid, status;
659 status = AOPEN_STATUS_G(ntohl(p->atid_status));
660 atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
663 tx_info = lookup_atid(t, atid);
665 if (!tx_info || tx_info->atid != atid) {
666 pr_err("%s: incorrect tx_info or atid\n", __func__);
670 cxgb4_free_atid(t, atid);
673 spin_lock(&tx_info->lock);
674 /* HW response is very close, finish pending cleanup */
675 if (tx_info->pending_close) {
676 spin_unlock(&tx_info->lock);
678 /* it's a late success, tcb status is establised,
681 chcr_ktls_mark_tcb_close(tx_info);
682 cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
683 tid, tx_info->ip_family);
691 cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
692 tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
694 tx_info->open_state = CH_KTLS_OPEN_FAILURE;
696 spin_unlock(&tx_info->lock);
698 complete(&tx_info->completion);
703 * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
705 static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
707 const struct cpl_set_tcb_rpl *p = (void *)input;
708 struct chcr_ktls_info *tx_info = NULL;
715 tx_info = lookup_tid(t, tid);
717 if (!tx_info || tx_info->tid != tid) {
718 pr_err("%s: incorrect tx_info or tid\n", __func__);
722 spin_lock(&tx_info->lock);
723 if (tx_info->pending_close) {
724 spin_unlock(&tx_info->lock);
728 tx_info->open_state = false;
729 spin_unlock(&tx_info->lock);
731 complete(&tx_info->completion);
735 static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
736 u32 tid, void *pos, u16 word, u64 mask,
739 struct cpl_set_tcb_field_core *cpl;
740 struct ulptx_idata *idata;
741 struct ulp_txpkt *txpkt;
745 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
746 txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
748 /* ULPTX_IDATA sub-command */
749 idata = (struct ulptx_idata *)(txpkt + 1);
750 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
751 idata->len = htonl(sizeof(*cpl));
755 /* CPL_SET_TCB_FIELD */
756 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
757 cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
759 cpl->word_cookie = htons(TCB_WORD_V(word));
760 cpl->mask = cpu_to_be64(mask);
761 cpl->val = cpu_to_be64(val);
764 idata = (struct ulptx_idata *)(cpl + 1);
765 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
766 idata->len = htonl(0);
774 * chcr_write_cpl_set_tcb_ulp: update tcb values.
775 * TCB is responsible to create tcp headers, so all the related values
776 * should be correctly updated.
777 * @tx_info - driver specific tls info.
778 * @q - tx queue on which packet is going out.
779 * @tid - TCB identifier.
780 * @pos - current index where should we start writing.
782 * @mask - TCB word related mask.
783 * @val - TCB word related value.
784 * @reply - set 1 if looking for TP response.
785 * return - next position to write.
787 static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
788 struct sge_eth_txq *q, u32 tid,
789 void *pos, u16 word, u64 mask,
792 int left = (void *)q->q.stat - pos;
794 if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
800 __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
803 return chcr_copy_to_txd(buf, &q->q, pos,
804 CHCR_SET_TCB_FIELD_LEN);
808 pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
811 /* check again if we are at the end of the queue */
812 if (left == CHCR_SET_TCB_FIELD_LEN)
819 * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
820 * with updated values like tcp seq, ack, window etc.
821 * @tx_info - driver specific tls info.
826 * return: NETDEV_TX_BUSY/NET_TX_OK.
828 static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
829 struct sge_eth_txq *q, u64 tcp_seq,
830 u64 tcp_ack, u64 tcp_win)
832 bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
833 struct ch_ktls_port_stats_debug *port_stats;
834 u32 len, cpl = 0, ndesc, wr_len;
835 struct fw_ulptx_wr *wr;
839 wr_len = sizeof(*wr);
840 /* there can be max 4 cpls, check if we have enough credits */
841 len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
842 ndesc = DIV_ROUND_UP(len, 64);
844 credits = chcr_txq_avail(&q->q) - ndesc;
845 if (unlikely(credits < 0)) {
846 chcr_eth_txq_stop(q);
847 return NETDEV_TX_BUSY;
850 pos = &q->q.desc[q->q.pidx];
851 /* make space for WR, we'll fill it later when we know all the cpls
852 * being sent out and have complete length.
856 /* update tx_max if its a re-transmit or the first wr */
857 if (first_wr || tcp_seq != tx_info->prev_seq) {
858 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
860 TCB_TX_MAX_V(TCB_TX_MAX_M),
861 TCB_TX_MAX_V(tcp_seq), 0);
864 /* reset snd una if it's a re-transmit pkt */
865 if (tcp_seq != tx_info->prev_seq) {
868 &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
869 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
873 TCB_SND_UNA_RAW_V(0), 0);
874 atomic64_inc(&port_stats->ktls_tx_ooo);
878 if (first_wr || tx_info->prev_ack != tcp_ack) {
879 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
881 TCB_RCV_NXT_V(TCB_RCV_NXT_M),
882 TCB_RCV_NXT_V(tcp_ack), 0);
883 tx_info->prev_ack = tcp_ack;
886 /* update receive window */
887 if (first_wr || tx_info->prev_win != tcp_win) {
888 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
890 TCB_RCV_WND_V(TCB_RCV_WND_M),
891 TCB_RCV_WND_V(tcp_win), 0);
892 tx_info->prev_win = tcp_win;
897 /* get the actual length */
898 len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
900 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
902 /* fill len in wr field */
903 wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
905 ndesc = DIV_ROUND_UP(len, 64);
906 chcr_txq_advance(&q->q, ndesc);
907 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
913 * chcr_ktls_get_tx_flits
914 * returns number of flits to be sent out, it includes key context length, WR
915 * size and skb fragments.
918 chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
920 return chcr_sgl_len(nr_frags) +
921 DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
925 * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale
926 * other than timestamp.
927 * @skb - skb contains partial record..
931 chcr_ktls_check_tcp_options(struct tcphdr *tcp)
933 int cnt, opt, optlen;
936 cp = (u_char *)(tcp + 1);
937 cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
938 for (; cnt > 0; cnt -= optlen, cp += optlen) {
940 if (opt == TCPOPT_EOL)
942 if (opt == TCPOPT_NOP) {
948 if (optlen < 2 || optlen > cnt)
962 * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
963 * send out separately.
964 * @tx_info - driver specific tls info.
965 * @skb - skb contains partial record..
967 * @tx_chan - channel number.
968 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
971 chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
972 struct sge_eth_txq *q, uint32_t tx_chan)
974 struct fw_eth_tx_pkt_wr *wr;
975 struct cpl_tx_pkt_core *cpl;
976 u32 ctrl, iplen, maclen;
977 #if IS_ENABLED(CONFIG_IPV6)
989 iplen = skb_network_header_len(skb);
990 maclen = skb_mac_header_len(skb);
992 /* packet length = eth hdr len + ip hdr len + tcp hdr len
993 * (including options).
995 pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
997 ctrl = sizeof(*cpl) + pktlen;
998 len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
999 /* check how many descriptors needed */
1000 ndesc = DIV_ROUND_UP(len16, 4);
1002 credits = chcr_txq_avail(&q->q) - ndesc;
1003 if (unlikely(credits < 0)) {
1004 chcr_eth_txq_stop(q);
1005 return NETDEV_TX_BUSY;
1008 pos = &q->q.desc[q->q.pidx];
1011 /* Firmware work request header */
1012 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1013 FW_WR_IMMDLEN_V(ctrl));
1015 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
1018 cpl = (void *)(wr + 1);
1021 cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
1022 TXPKT_PF_V(tx_info->adap->pf));
1024 cpl->len = htons(pktlen);
1026 memcpy(buf, skb->data, pktlen);
1027 if (tx_info->ip_family == AF_INET) {
1028 /* we need to correct ip header len */
1029 ip = (struct iphdr *)(buf + maclen);
1030 ip->tot_len = htons(pktlen - maclen);
1031 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
1032 #if IS_ENABLED(CONFIG_IPV6)
1034 ip6 = (struct ipv6hdr *)(buf + maclen);
1035 ip6->payload_len = htons(pktlen - maclen - iplen);
1036 cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
1040 cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
1041 TXPKT_IPHDR_LEN_V(iplen);
1042 /* checksum offload */
1043 cpl->ctrl1 = cpu_to_be64(cntrl1);
1047 /* now take care of the tcp header, if fin is not set then clear push
1048 * bit as well, and if fin is set, it will be sent at the last so we
1049 * need to update the tcp sequence number as per the last packet.
1051 tcp = (struct tcphdr *)(buf + maclen + iplen);
1056 tcp->seq = htonl(tx_info->prev_seq);
1058 chcr_copy_to_txd(buf, &q->q, pos, pktlen);
1060 chcr_txq_advance(&q->q, ndesc);
1061 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1066 * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
1067 * received has partial end part of the record, send out the complete record, so
1068 * that crypto block will be able to generate TAG/HASH.
1069 * @skb - segment which has complete or partial end part.
1070 * @tx_info - driver specific tls info.
1073 * @tcp_push - tcp push bit.
1074 * @mss - segment size.
1075 * return: NETDEV_TX_BUSY/NET_TX_OK.
1077 static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
1078 struct chcr_ktls_info *tx_info,
1079 struct sge_eth_txq *q, u32 tcp_seq,
1080 bool is_last_wr, u32 data_len,
1081 u32 skb_offset, u32 nfrags,
1082 bool tcp_push, u32 mss)
1084 u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
1085 struct adapter *adap = tx_info->adap;
1086 int credits, left, last_desc;
1087 struct tx_sw_desc *sgl_sdesc;
1088 struct cpl_tx_data *tx_data;
1089 struct cpl_tx_sec_pdu *cpl;
1090 struct ulptx_idata *idata;
1091 struct ulp_txpkt *ulptx;
1092 struct fw_ulptx_wr *wr;
1096 /* get the number of flits required */
1097 flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
1098 /* number of descriptors */
1099 ndesc = chcr_flits_to_desc(flits);
1100 /* check if enough credits available */
1101 credits = chcr_txq_avail(&q->q) - ndesc;
1102 if (unlikely(credits < 0)) {
1103 chcr_eth_txq_stop(q);
1104 return NETDEV_TX_BUSY;
1107 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1108 /* Credits are below the threshold vaues, stop the queue after
1109 * injecting the Work Request for this packet.
1111 chcr_eth_txq_stop(q);
1112 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1115 last_desc = q->q.pidx + ndesc - 1;
1116 if (last_desc >= q->q.size)
1117 last_desc -= q->q.size;
1118 sgl_sdesc = &q->q.sdesc[last_desc];
1120 if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1121 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1123 return NETDEV_TX_BUSY;
1129 pos = &q->q.desc[q->q.pidx];
1130 end = (u64 *)pos + flits;
1133 /* WR will need len16 */
1134 len16 = DIV_ROUND_UP(flits, 2);
1135 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1136 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1141 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1142 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1143 ULP_TXPKT_FID_V(q->q.cntxt_id) |
1145 ulptx->len = htonl(len16 - 1);
1146 /* ULPTX_IDATA sub-command */
1147 idata = (struct ulptx_idata *)(ulptx + 1);
1148 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1149 /* idata length will include cpl_tx_sec_pdu + key context size +
1150 * cpl_tx_data header.
1152 idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1155 cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1156 cpl->op_ivinsrtofst =
1157 htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1158 CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1159 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
1160 CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
1161 cpl->pldlen = htonl(data_len);
1163 /* encryption should start after tls header size + iv size */
1164 cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
1166 cpl->aadstart_cipherstop_hi =
1167 htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
1168 CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
1169 CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1171 /* authentication will also start after tls header + iv size */
1172 cpl->cipherstop_lo_authinsert =
1173 htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
1174 CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
1175 CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
1177 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1178 cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
1179 cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
1180 cpl->scmd1 = cpu_to_be64(tx_info->record_no);
1183 /* check if space left to fill the keys */
1184 left = (void *)q->q.stat - pos;
1186 left = (void *)end - (void *)q->q.stat;
1191 pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1192 tx_info->key_ctx_len);
1193 left = (void *)q->q.stat - pos;
1196 left = (void *)end - (void *)q->q.stat;
1201 tx_data = (void *)pos;
1202 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1203 tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
1205 tx_data->rsvd = htonl(tcp_seq);
1207 tx_data->flags = htonl(TX_BYPASS_F);
1209 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1211 /* check left again, it might go beyond queue limit */
1213 left = (void *)q->q.stat - pos;
1215 /* check the position again */
1217 left = (void *)end - (void *)q->q.stat;
1222 /* send the complete packet except the header */
1223 cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
1224 skb_offset, data_len);
1225 sgl_sdesc->skb = skb;
1227 chcr_txq_advance(&q->q, ndesc);
1228 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1229 atomic64_inc(&adap->ch_ktls_stats.ktls_tx_send_records);
1235 * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
1236 * a middle part of a record, fetch the prior data to make it 16 byte aligned
1237 * and then only send it out.
1239 * @skb - skb contains partial record..
1240 * @tx_info - driver specific tls info.
1243 * @tcp_push - tcp push bit.
1244 * @mss - segment size.
1245 * @tls_rec_offset - offset from start of the tls record.
1246 * @perior_data - data before the current segment, required to make this record
1248 * @prior_data_len - prior_data length (less than 16)
1249 * return: NETDEV_TX_BUSY/NET_TX_OK.
1251 static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
1252 struct chcr_ktls_info *tx_info,
1253 struct sge_eth_txq *q,
1254 u32 tcp_seq, bool tcp_push, u32 mss,
1255 u32 tls_rec_offset, u8 *prior_data,
1256 u32 prior_data_len, u32 data_len,
1259 u32 len16, wr_mid = 0, cipher_start, nfrags;
1260 struct adapter *adap = tx_info->adap;
1261 unsigned int flits = 0, ndesc;
1262 int credits, left, last_desc;
1263 struct tx_sw_desc *sgl_sdesc;
1264 struct cpl_tx_data *tx_data;
1265 struct cpl_tx_sec_pdu *cpl;
1266 struct ulptx_idata *idata;
1267 struct ulp_txpkt *ulptx;
1268 struct fw_ulptx_wr *wr;
1273 nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
1274 /* get the number of flits required, it's a partial record so 2 flits
1275 * (AES_BLOCK_SIZE) will be added.
1277 flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
1278 /* get the correct 8 byte IV of this record */
1279 iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
1280 /* If it's a middle record and not 16 byte aligned to run AES CTR, need
1281 * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
1282 * data will be added.
1286 /* number of descriptors */
1287 ndesc = chcr_flits_to_desc(flits);
1288 /* check if enough credits available */
1289 credits = chcr_txq_avail(&q->q) - ndesc;
1290 if (unlikely(credits < 0)) {
1291 chcr_eth_txq_stop(q);
1292 return NETDEV_TX_BUSY;
1295 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1296 chcr_eth_txq_stop(q);
1297 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1300 last_desc = q->q.pidx + ndesc - 1;
1301 if (last_desc >= q->q.size)
1302 last_desc -= q->q.size;
1303 sgl_sdesc = &q->q.sdesc[last_desc];
1305 if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1306 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1308 return NETDEV_TX_BUSY;
1311 pos = &q->q.desc[q->q.pidx];
1312 end = (u64 *)pos + flits;
1315 /* WR will need len16 */
1316 len16 = DIV_ROUND_UP(flits, 2);
1317 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1318 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1323 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1324 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1325 ULP_TXPKT_FID_V(q->q.cntxt_id) |
1327 ulptx->len = htonl(len16 - 1);
1328 /* ULPTX_IDATA sub-command */
1329 idata = (struct ulptx_idata *)(ulptx + 1);
1330 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1331 /* idata length will include cpl_tx_sec_pdu + key context size +
1332 * cpl_tx_data header.
1334 idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1335 sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
1337 cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1338 /* cipher start will have tls header + iv size extra if its a header
1339 * part of tls record. else only 16 byte IV will be added.
1343 (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
1345 cpl->op_ivinsrtofst =
1346 htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1347 CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1348 CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
1349 cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
1350 cpl->aadstart_cipherstop_hi =
1351 htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1352 cpl->cipherstop_lo_authinsert = 0;
1353 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1354 cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
1355 cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
1359 /* check if space left to fill the keys */
1360 left = (void *)q->q.stat - pos;
1362 left = (void *)end - (void *)q->q.stat;
1367 pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1368 tx_info->key_ctx_len);
1369 left = (void *)q->q.stat - pos;
1372 left = (void *)end - (void *)q->q.stat;
1377 tx_data = (void *)pos;
1378 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1379 tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1380 TX_LENGTH_V(data_len + prior_data_len));
1381 tx_data->rsvd = htonl(tcp_seq);
1382 tx_data->flags = htonl(TX_BYPASS_F);
1384 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1386 /* check left again, it might go beyond queue limit */
1388 left = (void *)q->q.stat - pos;
1390 /* check the position again */
1392 left = (void *)end - (void *)q->q.stat;
1396 /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
1397 * bytes of actual IV and 4 bytes of 16 byte-sequence.
1399 memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
1400 memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
1401 *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
1402 htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
1403 (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
1406 /* Prior_data_len will always be less than 16 bytes, fill the
1407 * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
1411 pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1412 /* send the complete packet except the header */
1413 cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
1414 skb_offset, data_len);
1415 sgl_sdesc->skb = skb;
1417 chcr_txq_advance(&q->q, ndesc);
1418 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1424 * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
1425 * only plain text (only tls header and iv)
1426 * @tx_info - driver specific tls info.
1427 * @skb - skb contains partial record..
1429 * @mss - segment size.
1430 * @tcp_push - tcp push bit.
1432 * @port_id : port number
1433 * @perior_data - data before the current segment, required to make this record
1435 * @prior_data_len - prior_data length (less than 16)
1436 * return: NETDEV_TX_BUSY/NET_TX_OK.
1438 static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
1439 struct sk_buff *skb, u32 tcp_seq, u32 mss,
1440 bool tcp_push, struct sge_eth_txq *q,
1441 u32 port_id, u8 *prior_data,
1442 u32 data_len, u32 skb_offset,
1445 int credits, left, len16, last_desc;
1446 unsigned int flits = 0, ndesc;
1447 struct tx_sw_desc *sgl_sdesc;
1448 struct cpl_tx_data *tx_data;
1449 struct ulptx_idata *idata;
1450 struct ulp_txpkt *ulptx;
1451 struct fw_ulptx_wr *wr;
1452 u32 wr_mid = 0, nfrags;
1456 flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
1457 nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
1458 flits += chcr_sgl_len(nfrags);
1462 /* WR will need len16 */
1463 len16 = DIV_ROUND_UP(flits, 2);
1464 /* check how many descriptors needed */
1465 ndesc = DIV_ROUND_UP(flits, 8);
1467 credits = chcr_txq_avail(&q->q) - ndesc;
1468 if (unlikely(credits < 0)) {
1469 chcr_eth_txq_stop(q);
1470 return NETDEV_TX_BUSY;
1473 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1474 chcr_eth_txq_stop(q);
1475 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1478 last_desc = q->q.pidx + ndesc - 1;
1479 if (last_desc >= q->q.size)
1480 last_desc -= q->q.size;
1481 sgl_sdesc = &q->q.sdesc[last_desc];
1483 if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
1484 sgl_sdesc->addr) < 0)) {
1485 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1487 return NETDEV_TX_BUSY;
1490 pos = &q->q.desc[q->q.pidx];
1491 end = (u64 *)pos + flits;
1494 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1495 wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1499 ulptx = (struct ulp_txpkt *)(wr + 1);
1500 ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1501 ULP_TXPKT_DATAMODIFY_V(0) |
1502 ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1503 ULP_TXPKT_DEST_V(0) |
1504 ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
1505 ulptx->len = htonl(len16 - 1);
1506 /* ULPTX_IDATA sub-command */
1507 idata = (struct ulptx_idata *)(ulptx + 1);
1508 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1509 idata->len = htonl(sizeof(*tx_data) + prior_data_len);
1511 tx_data = (struct cpl_tx_data *)(idata + 1);
1512 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1513 tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1514 TX_LENGTH_V(data_len + prior_data_len));
1515 /* set tcp seq number */
1516 tx_data->rsvd = htonl(tcp_seq);
1517 tx_data->flags = htonl(TX_BYPASS_F);
1519 tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1522 /* apart from prior_data_len, we should set remaining part of 16 bytes
1526 pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1528 /* check left again, it might go beyond queue limit */
1529 left = (void *)q->q.stat - pos;
1531 /* check the position again */
1533 left = (void *)end - (void *)q->q.stat;
1537 /* send the complete packet including the header */
1538 cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
1539 skb_offset, data_len);
1540 sgl_sdesc->skb = skb;
1542 chcr_txq_advance(&q->q, ndesc);
1543 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1548 * chcr_ktls_copy_record_in_skb
1549 * @nskb - new skb where the frags to be added.
1550 * @skb - old skb, to copy socket and destructor details.
1551 * @record - specific record which has complete 16k record in frags.
1553 static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
1554 struct sk_buff *skb,
1555 struct tls_record_info *record)
1559 for (i = 0; i < record->num_frags; i++) {
1560 skb_shinfo(nskb)->frags[i] = record->frags[i];
1561 /* increase the frag ref count */
1562 __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
1565 skb_shinfo(nskb)->nr_frags = record->num_frags;
1566 nskb->data_len = record->len;
1567 nskb->len += record->len;
1568 nskb->truesize += record->len;
1570 nskb->destructor = skb->destructor;
1571 refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
1575 * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
1576 * sending the same segment again. It will discard the segment which is before
1577 * the current tx max.
1578 * @tx_info - driver specific tls info.
1580 * return: NET_TX_OK/NET_XMIT_DROP.
1582 static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
1583 struct sge_eth_txq *q)
1585 struct fw_ulptx_wr *wr;
1591 len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
1592 ndesc = DIV_ROUND_UP(len, 64);
1594 credits = chcr_txq_avail(&q->q) - ndesc;
1595 if (unlikely(credits < 0)) {
1596 chcr_eth_txq_stop(q);
1597 return NETDEV_TX_BUSY;
1600 pos = &q->q.desc[q->q.pidx];
1604 wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1606 /* fill len in wr field */
1607 wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
1611 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
1613 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
1614 TCB_SND_UNA_RAW_V(0), 0);
1616 chcr_txq_advance(&q->q, ndesc);
1617 cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1623 * chcr_end_part_handler: This handler will handle the record which
1624 * is complete or if record's end part is received. T6 adapter has a issue that
1625 * it can't send out TAG with partial record so if its an end part then we have
1626 * to send TAG as well and for which we need to fetch the complete record and
1627 * send it to crypto module.
1628 * @tx_info - driver specific tls info.
1629 * @skb - skb contains partial record.
1630 * @record - complete record of 16K size.
1632 * @mss - segment size in which TP needs to chop a packet.
1633 * @tcp_push_no_fin - tcp push if fin is not set.
1635 * @tls_end_offset - offset from end of the record.
1636 * @last wr : check if this is the last part of the skb going out.
1637 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1639 static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
1640 struct sk_buff *skb,
1641 struct tls_record_info *record,
1642 u32 tcp_seq, int mss, bool tcp_push_no_fin,
1643 struct sge_eth_txq *q, u32 skb_offset,
1644 u32 tls_end_offset, bool last_wr)
1646 struct sk_buff *nskb = NULL;
1647 /* check if it is a complete record */
1648 if (tls_end_offset == record->len) {
1650 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
1652 nskb = alloc_skb(0, GFP_ATOMIC);
1654 dev_kfree_skb_any(skb);
1655 return NETDEV_TX_BUSY;
1658 /* copy complete record in skb */
1659 chcr_ktls_copy_record_in_skb(nskb, skb, record);
1660 /* packet is being sent from the beginning, update the tcp_seq
1663 tcp_seq = tls_record_start_seq(record);
1664 /* reset snd una, so the middle record won't send the already
1667 if (chcr_ktls_update_snd_una(tx_info, q))
1669 /* reset skb offset */
1673 dev_kfree_skb_any(skb);
1677 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
1680 if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
1681 last_wr, record->len, skb_offset,
1683 (last_wr && tcp_push_no_fin),
1689 dev_kfree_skb_any(nskb);
1690 return NETDEV_TX_BUSY;
1694 * chcr_short_record_handler: This handler will take care of the records which
1695 * doesn't have end part (1st part or the middle part(/s) of a record). In such
1696 * cases, AES CTR will be used in place of AES GCM to send out partial packet.
1697 * This partial record might be the first part of the record, or the middle
1698 * part. In case of middle record we should fetch the prior data to make it 16
1699 * byte aligned. If it has a partial tls header or iv then get to the start of
1700 * tls header. And if it has partial TAG, then remove the complete TAG and send
1702 * There is one more possibility that it gets a partial header, send that
1703 * portion as a plaintext.
1704 * @tx_info - driver specific tls info.
1705 * @skb - skb contains partial record..
1706 * @record - complete record of 16K size.
1708 * @mss - segment size in which TP needs to chop a packet.
1709 * @tcp_push_no_fin - tcp push if fin is not set.
1711 * @tls_end_offset - offset from end of the record.
1712 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1714 static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
1715 struct sk_buff *skb,
1716 struct tls_record_info *record,
1717 u32 tcp_seq, int mss, bool tcp_push_no_fin,
1718 u32 data_len, u32 skb_offset,
1719 struct sge_eth_txq *q, u32 tls_end_offset)
1721 u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
1722 u8 prior_data[16] = {0};
1723 u32 prior_data_len = 0;
1725 /* check if the skb is ending in middle of tag/HASH, its a big
1726 * trouble, send the packet before the HASH.
1728 int remaining_record = tls_end_offset - data_len;
1730 if (remaining_record > 0 &&
1731 remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
1732 int trimmed_len = 0;
1734 if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
1735 trimmed_len = data_len -
1736 (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
1741 WARN_ON(trimmed_len > data_len);
1743 data_len = trimmed_len;
1744 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
1747 /* check if it is only the header part. */
1748 if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
1749 if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
1751 tx_info->port_id, prior_data,
1752 data_len, skb_offset, prior_data_len))
1758 /* check if the middle record's start point is 16 byte aligned. CTR
1759 * needs 16 byte aligned start point to start encryption.
1761 if (tls_rec_offset) {
1762 /* there is an offset from start, means its a middle record */
1765 if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
1766 prior_data_len = tls_rec_offset;
1772 (TLS_HEADER_SIZE + tx_info->iv_size))
1774 remaining = tls_rec_offset - prior_data_len;
1777 /* if prior_data_len is not zero, means we need to fetch prior
1778 * data to make this record 16 byte aligned, or we need to reach
1781 if (prior_data_len) {
1786 int frag_size = 0, frag_delta = 0;
1788 while (remaining > 0) {
1789 frag_size = skb_frag_size(&record->frags[i]);
1790 if (remaining < frag_size)
1793 remaining -= frag_size;
1796 f = &record->frags[i];
1797 vaddr = kmap_atomic(skb_frag_page(f));
1799 data = vaddr + skb_frag_off(f) + remaining;
1800 frag_delta = skb_frag_size(f) - remaining;
1802 if (frag_delta >= prior_data_len) {
1803 memcpy(prior_data, data, prior_data_len);
1804 kunmap_atomic(vaddr);
1806 memcpy(prior_data, data, frag_delta);
1807 kunmap_atomic(vaddr);
1808 /* get the next page */
1809 f = &record->frags[i + 1];
1810 vaddr = kmap_atomic(skb_frag_page(f));
1811 data = vaddr + skb_frag_off(f);
1812 memcpy(prior_data + frag_delta,
1813 data, (prior_data_len - frag_delta));
1814 kunmap_atomic(vaddr);
1816 /* reset tcp_seq as per the prior_data_required len */
1817 tcp_seq -= prior_data_len;
1819 /* reset snd una, so the middle record won't send the already
1822 if (chcr_ktls_update_snd_una(tx_info, q))
1824 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
1826 atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
1829 if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
1830 mss, tls_rec_offset, prior_data,
1831 prior_data_len, data_len, skb_offset)) {
1837 dev_kfree_skb_any(skb);
1838 return NETDEV_TX_BUSY;
1841 /* nic tls TX handler */
1842 static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1844 u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
1845 struct ch_ktls_port_stats_debug *port_stats;
1846 struct chcr_ktls_ofld_ctx_tx *tx_ctx;
1847 struct ch_ktls_stats_debug *stats;
1848 struct tcphdr *th = tcp_hdr(skb);
1849 int data_len, qidx, ret = 0, mss;
1850 struct tls_record_info *record;
1851 struct chcr_ktls_info *tx_info;
1852 struct tls_context *tls_ctx;
1853 struct sge_eth_txq *q;
1854 struct adapter *adap;
1855 unsigned long flags;
1857 tcp_seq = ntohl(th->seq);
1858 skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
1859 skb_data_len = skb->len - skb_offset;
1860 data_len = skb_data_len;
1862 mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
1864 tls_ctx = tls_get_ctx(skb->sk);
1865 if (unlikely(tls_ctx->netdev != dev))
1868 tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
1869 tx_info = tx_ctx->chcr_info;
1871 if (unlikely(!tx_info))
1874 adap = tx_info->adap;
1875 stats = &adap->ch_ktls_stats;
1876 port_stats = &stats->ktls_port[tx_info->port_id];
1878 qidx = skb->queue_mapping;
1879 q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
1880 cxgb4_reclaim_completed_tx(adap, &q->q, true);
1881 /* if tcp options are set but finish is not send the options first */
1882 if (!th->fin && chcr_ktls_check_tcp_options(th)) {
1883 ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
1886 return NETDEV_TX_BUSY;
1889 ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
1893 return NETDEV_TX_BUSY;
1896 /* TCP segments can be in received either complete or partial.
1897 * chcr_end_part_handler will handle cases if complete record or end
1898 * part of the record is received. Incase of partial end part of record,
1899 * we will send the complete record again.
1905 cxgb4_reclaim_completed_tx(adap, &q->q, true);
1907 spin_lock_irqsave(&tx_ctx->base.lock, flags);
1908 /* fetch the tls record */
1909 record = tls_get_record(&tx_ctx->base, tcp_seq,
1910 &tx_info->record_no);
1911 /* By the time packet reached to us, ACK is received, and record
1912 * won't be found in that case, handle it gracefully.
1914 if (unlikely(!record)) {
1915 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1916 atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
1920 if (unlikely(tls_record_is_start_marker(record))) {
1921 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1922 atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
1925 /* increase page reference count of the record, so that there
1926 * won't be any chance of page free in middle if in case stack
1927 * receives ACK and try to delete the record.
1929 for (i = 0; i < record->num_frags; i++)
1930 __skb_frag_ref(&record->frags[i]);
1932 spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1934 tls_end_offset = record->end_seq - tcp_seq;
1936 pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
1937 tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
1938 /* if a tls record is finishing in this SKB */
1939 if (tls_end_offset <= data_len) {
1940 ret = chcr_end_part_handler(tx_info, skb, record,
1942 (!th->fin && th->psh), q,
1946 tls_end_offset == skb->len);
1948 data_len -= tls_end_offset;
1949 /* tcp_seq increment is required to handle next record.
1951 tcp_seq += tls_end_offset;
1952 skb_offset += tls_end_offset;
1954 ret = chcr_short_record_handler(tx_info, skb,
1955 record, tcp_seq, mss,
1956 (!th->fin && th->psh),
1957 data_len, skb_offset,
1962 /* clear the frag ref count which increased locally before */
1963 for (i = 0; i < record->num_frags; i++) {
1964 /* clear the frag ref count */
1965 __skb_frag_unref(&record->frags[i]);
1967 /* if any failure, come out from the loop. */
1969 return NETDEV_TX_OK;
1971 /* length should never be less than 0 */
1972 WARN_ON(data_len < 0);
1974 } while (data_len > 0);
1976 tx_info->prev_seq = ntohl(th->seq) + skb_data_len;
1977 atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
1978 atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
1980 /* tcp finish is set, send a separate tcp msg including all the options
1984 chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
1986 return NETDEV_TX_OK;
1988 dev_kfree_skb_any(skb);
1989 return NETDEV_TX_OK;
1992 static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
1994 struct chcr_ktls_uld_ctx *u_ctx;
1996 pr_info_once("%s - version %s\n", CHCR_KTLS_DRV_DESC,
1997 CHCR_KTLS_DRV_VERSION);
1998 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
2000 u_ctx = ERR_PTR(-ENOMEM);
2003 u_ctx->lldi = *lldi;
2008 static const struct tlsdev_ops chcr_ktls_ops = {
2009 .tls_dev_add = chcr_ktls_dev_add,
2010 .tls_dev_del = chcr_ktls_dev_del,
2013 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
2014 [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
2015 [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
2018 static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
2019 const struct pkt_gl *pgl)
2021 const struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)rsp;
2022 struct chcr_ktls_uld_ctx *u_ctx = handle;
2023 u8 opcode = rpl->ot.opcode;
2024 struct adapter *adap;
2026 adap = pci_get_drvdata(u_ctx->lldi.pdev);
2028 if (!work_handlers[opcode]) {
2029 pr_err("Unsupported opcode %d received\n", opcode);
2033 work_handlers[opcode](adap, (unsigned char *)&rsp[1]);
2037 static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
2039 struct chcr_ktls_uld_ctx *u_ctx = handle;
2041 switch (new_state) {
2042 case CXGB4_STATE_UP:
2043 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
2044 mutex_lock(&dev_mutex);
2045 list_add_tail(&u_ctx->entry, &uld_ctx_list);
2046 mutex_unlock(&dev_mutex);
2048 case CXGB4_STATE_START_RECOVERY:
2049 case CXGB4_STATE_DOWN:
2050 case CXGB4_STATE_DETACH:
2051 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
2052 mutex_lock(&dev_mutex);
2053 list_del(&u_ctx->entry);
2054 mutex_unlock(&dev_mutex);
2063 static struct cxgb4_uld_info chcr_ktls_uld_info = {
2064 .name = CHCR_KTLS_DRV_MODULE_NAME,
2067 .add = chcr_ktls_uld_add,
2068 .tx_handler = chcr_ktls_xmit,
2069 .rx_handler = chcr_ktls_uld_rx_handler,
2070 .state_change = chcr_ktls_uld_state_change,
2071 .tlsdev_ops = &chcr_ktls_ops,
2074 static int __init chcr_ktls_init(void)
2076 cxgb4_register_uld(CXGB4_ULD_KTLS, &chcr_ktls_uld_info);
2080 static void __exit chcr_ktls_exit(void)
2082 struct chcr_ktls_uld_ctx *u_ctx, *tmp;
2083 struct adapter *adap;
2085 pr_info("driver unloaded\n");
2087 mutex_lock(&dev_mutex);
2088 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
2089 adap = pci_get_drvdata(u_ctx->lldi.pdev);
2090 memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
2091 list_del(&u_ctx->entry);
2094 mutex_unlock(&dev_mutex);
2095 cxgb4_unregister_uld(CXGB4_ULD_KTLS);
2098 module_init(chcr_ktls_init);
2099 module_exit(chcr_ktls_exit);
2101 MODULE_DESCRIPTION("Chelsio NIC TLS ULD driver");
2102 MODULE_LICENSE("GPL");
2103 MODULE_AUTHOR("Chelsio Communications");
2104 MODULE_VERSION(CHCR_KTLS_DRV_VERSION);