1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/workqueue.h>
11 #include <linux/skbuff.h>
12 #include <linux/timer.h>
13 #include <linux/notifier.h>
14 #include <linux/inetdevice.h>
16 #include <linux/tcp.h>
17 #include <linux/sched/signal.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kprobes.h>
20 #include <linux/if_vlan.h>
21 #include <linux/ipv6.h>
23 #include <net/transp_v6.h>
24 #include <net/ip6_route.h>
25 #include <net/inet_common.h>
29 #include <net/addrconf.h>
30 #include <net/secure_seq.h>
37 * State transitions and actions for close. Note that if we are in SYN_SENT
38 * we remain in that state as we cannot control a connection while it's in
39 * SYN_SENT; such connections are allowed to establish and are then aborted.
41 static unsigned char new_state[16] = {
42 /* current state: new state: action: */
43 /* (Invalid) */ TCP_CLOSE,
44 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
45 /* TCP_SYN_SENT */ TCP_SYN_SENT,
46 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
47 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
48 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
49 /* TCP_TIME_WAIT */ TCP_CLOSE,
50 /* TCP_CLOSE */ TCP_CLOSE,
51 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
52 /* TCP_LAST_ACK */ TCP_LAST_ACK,
53 /* TCP_LISTEN */ TCP_CLOSE,
54 /* TCP_CLOSING */ TCP_CLOSING,
57 static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
59 struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
64 csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
65 if (!csk->txdata_skb_cache) {
70 kref_init(&csk->kref);
72 skb_queue_head_init(&csk->txq);
73 csk->wr_skb_head = NULL;
74 csk->wr_skb_tail = NULL;
77 csk->tlshws.txkey = -1;
78 csk->tlshws.rxkey = -1;
79 csk->tlshws.mfs = TLS_MFS;
80 skb_queue_head_init(&csk->tlshws.sk_recv_queue);
84 static void chtls_sock_release(struct kref *ref)
86 struct chtls_sock *csk =
87 container_of(ref, struct chtls_sock, kref);
92 static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
95 struct net_device *ndev = cdev->ports[0];
96 #if IS_ENABLED(CONFIG_IPV6)
97 struct net_device *temp;
101 switch (sk->sk_family) {
103 if (likely(!inet_sk(sk)->inet_rcv_saddr))
105 ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
107 #if IS_ENABLED(CONFIG_IPV6)
109 addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
110 if (likely(addr_type == IPV6_ADDR_ANY))
113 for_each_netdev_rcu(&init_net, temp) {
114 if (ipv6_chk_addr(&init_net, (struct in6_addr *)
115 &sk->sk_v6_rcv_saddr, temp, 1)) {
129 if (is_vlan_dev(ndev))
130 return vlan_dev_real_dev(ndev);
134 static void assign_rxopt(struct sock *sk, unsigned int opt)
136 const struct chtls_dev *cdev;
137 struct chtls_sock *csk;
140 csk = rcu_dereference_sk_user_data(sk);
144 tp->tcp_header_len = sizeof(struct tcphdr);
145 tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
146 tp->mss_cache = tp->rx_opt.mss_clamp;
147 tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt);
148 tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt);
149 tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt);
150 SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt);
151 if (!tp->rx_opt.wscale_ok)
152 tp->rx_opt.rcv_wscale = 0;
153 if (tp->rx_opt.tstamp_ok) {
154 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
155 tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
156 } else if (csk->opt2 & TSTAMPS_EN_F) {
157 csk->opt2 &= ~TSTAMPS_EN_F;
158 csk->mtu_idx = TCPOPT_MSS_G(opt);
162 static void chtls_purge_receive_queue(struct sock *sk)
166 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
167 skb_dst_set(skb, (void *)NULL);
172 static void chtls_purge_write_queue(struct sock *sk)
174 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
177 while ((skb = __skb_dequeue(&csk->txq))) {
178 sk->sk_wmem_queued -= skb->truesize;
183 static void chtls_purge_recv_queue(struct sock *sk)
185 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
186 struct chtls_hws *tlsk = &csk->tlshws;
189 while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
190 skb_dst_set(skb, NULL);
195 static void abort_arp_failure(void *handle, struct sk_buff *skb)
197 struct cpl_abort_req *req = cplhdr(skb);
198 struct chtls_dev *cdev;
200 cdev = (struct chtls_dev *)handle;
201 req->cmd = CPL_ABORT_NO_RST;
202 cxgb4_ofld_send(cdev->lldi->ports[0], skb);
205 static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
207 if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
209 refcount_add(2, &skb->users);
211 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
216 static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
218 struct cpl_abort_req *req;
219 struct chtls_sock *csk;
222 csk = rcu_dereference_sk_user_data(sk);
226 skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
228 req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
229 INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
230 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
231 req->rsvd0 = htonl(tp->snd_nxt);
232 req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
234 t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
235 send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
238 static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
240 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
242 if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
244 if (sk->sk_state == TCP_SYN_RECV)
245 csk_set_flag(csk, CSK_RST_ABORTED);
249 if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
250 struct tcp_sock *tp = tcp_sk(sk);
252 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
253 WARN_ONCE(1, "send tx flowc error");
254 csk_set_flag(csk, CSK_TX_DATA_SENT);
257 csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
258 chtls_purge_write_queue(sk);
260 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
261 if (sk->sk_state != TCP_SYN_RECV)
262 chtls_send_abort(sk, mode, skb);
271 static void release_tcp_port(struct sock *sk)
273 if (inet_csk(sk)->icsk_bind_hash)
277 static void tcp_uncork(struct sock *sk)
279 struct tcp_sock *tp = tcp_sk(sk);
281 if (tp->nonagle & TCP_NAGLE_CORK) {
282 tp->nonagle &= ~TCP_NAGLE_CORK;
283 chtls_tcp_push(sk, 0);
287 static void chtls_close_conn(struct sock *sk)
289 struct cpl_close_con_req *req;
290 struct chtls_sock *csk;
295 len = roundup(sizeof(struct cpl_close_con_req), 16);
296 csk = rcu_dereference_sk_user_data(sk);
299 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
300 req = (struct cpl_close_con_req *)__skb_put(skb, len);
302 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
303 FW_WR_IMMDLEN_V(sizeof(*req) -
305 req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
306 FW_WR_FLOWID_V(tid));
308 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
311 skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
312 if (sk->sk_state != TCP_SYN_SENT)
313 chtls_push_frames(csk, 1);
317 * Perform a state transition during close and return the actions indicated
318 * for the transition. Do not make this function inline, the main reason
319 * it exists at all is to avoid multiple inlining of tcp_set_state.
321 static int make_close_transition(struct sock *sk)
323 int next = (int)new_state[sk->sk_state];
325 tcp_set_state(sk, next & TCP_STATE_MASK);
326 return next & TCP_ACTION_FIN;
329 void chtls_close(struct sock *sk, long timeout)
331 int data_lost, prev_state;
332 struct chtls_sock *csk;
334 csk = rcu_dereference_sk_user_data(sk);
337 sk->sk_shutdown |= SHUTDOWN_MASK;
339 data_lost = skb_queue_len(&sk->sk_receive_queue);
340 data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
341 chtls_purge_recv_queue(sk);
342 chtls_purge_receive_queue(sk);
344 if (sk->sk_state == TCP_CLOSE) {
346 } else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
347 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
348 release_tcp_port(sk);
350 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
351 sk->sk_prot->disconnect(sk, 0);
352 } else if (make_close_transition(sk)) {
353 chtls_close_conn(sk);
357 sk_stream_wait_close(sk, timeout);
360 prev_state = sk->sk_state;
369 if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
372 if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
373 !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
376 skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
378 chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
381 if (sk->sk_state == TCP_CLOSE)
382 inet_csk_destroy_sock(sk);
391 * Wait until a socket enters on of the given states.
393 static int wait_for_states(struct sock *sk, unsigned int states)
395 DECLARE_WAITQUEUE(wait, current);
396 struct socket_wq _sk_wq;
403 * We want this to work even when there's no associated struct socket.
404 * In that case we provide a temporary wait_queue_head_t.
407 init_waitqueue_head(&_sk_wq.wait);
408 _sk_wq.fasync_list = NULL;
409 init_rcu_head_on_stack(&_sk_wq.rcu);
410 RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
413 add_wait_queue(sk_sleep(sk), &wait);
414 while (!sk_in_state(sk, states)) {
415 if (!current_timeo) {
419 if (signal_pending(current)) {
420 err = sock_intr_errno(current_timeo);
423 set_current_state(TASK_UNINTERRUPTIBLE);
425 if (!sk_in_state(sk, states))
426 current_timeo = schedule_timeout(current_timeo);
427 __set_current_state(TASK_RUNNING);
430 remove_wait_queue(sk_sleep(sk), &wait);
432 if (rcu_dereference(sk->sk_wq) == &_sk_wq)
437 int chtls_disconnect(struct sock *sk, int flags)
443 chtls_purge_recv_queue(sk);
444 chtls_purge_receive_queue(sk);
445 chtls_purge_write_queue(sk);
447 if (sk->sk_state != TCP_CLOSE) {
448 sk->sk_err = ECONNRESET;
449 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
450 err = wait_for_states(sk, TCPF_CLOSE);
454 chtls_purge_recv_queue(sk);
455 chtls_purge_receive_queue(sk);
456 tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
457 return tcp_disconnect(sk, flags);
460 #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
461 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
462 void chtls_shutdown(struct sock *sk, int how)
464 if ((how & SEND_SHUTDOWN) &&
465 sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
466 make_close_transition(sk))
467 chtls_close_conn(sk);
470 void chtls_destroy_sock(struct sock *sk)
472 struct chtls_sock *csk;
474 csk = rcu_dereference_sk_user_data(sk);
475 chtls_purge_recv_queue(sk);
476 csk->ulp_mode = ULP_MODE_NONE;
477 chtls_purge_write_queue(sk);
479 kref_put(&csk->kref, chtls_sock_release);
481 if (sk->sk_family == AF_INET)
482 sk->sk_prot = &tcp_prot;
483 #if IS_ENABLED(CONFIG_IPV6)
485 sk->sk_prot = &tcpv6_prot;
487 sk->sk_prot->destroy(sk);
490 static void reset_listen_child(struct sock *child)
492 struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
495 skb = alloc_ctrl_skb(csk->txdata_skb_cache,
496 sizeof(struct cpl_abort_req));
498 chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
500 INC_ORPHAN_COUNT(child);
501 if (child->sk_state == TCP_CLOSE)
502 inet_csk_destroy_sock(child);
505 static void chtls_disconnect_acceptq(struct sock *listen_sk)
507 struct request_sock **pprev;
509 pprev = ACCEPT_QUEUE(listen_sk);
511 struct request_sock *req = *pprev;
513 if (req->rsk_ops == &chtls_rsk_ops ||
514 req->rsk_ops == &chtls_rsk_opsv6) {
515 struct sock *child = req->sk;
517 *pprev = req->dl_next;
518 sk_acceptq_removed(listen_sk);
523 release_tcp_port(child);
524 reset_listen_child(child);
525 bh_unlock_sock(child);
529 pprev = &req->dl_next;
534 static int listen_hashfn(const struct sock *sk)
536 return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
539 static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
543 struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
546 int key = listen_hashfn(sk);
550 spin_lock(&cdev->listen_lock);
551 p->next = cdev->listen_hash_tab[key];
552 cdev->listen_hash_tab[key] = p;
553 spin_unlock(&cdev->listen_lock);
558 static int listen_hash_find(struct chtls_dev *cdev,
561 struct listen_info *p;
565 key = listen_hashfn(sk);
567 spin_lock(&cdev->listen_lock);
568 for (p = cdev->listen_hash_tab[key]; p; p = p->next)
573 spin_unlock(&cdev->listen_lock);
577 static int listen_hash_del(struct chtls_dev *cdev,
580 struct listen_info *p, **prev;
584 key = listen_hashfn(sk);
585 prev = &cdev->listen_hash_tab[key];
587 spin_lock(&cdev->listen_lock);
588 for (p = *prev; p; prev = &p->next, p = p->next)
595 spin_unlock(&cdev->listen_lock);
599 static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
601 struct request_sock *req;
602 struct chtls_sock *csk;
604 csk = rcu_dereference_sk_user_data(child);
605 req = csk->passive_reap_next;
607 reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
608 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
609 chtls_reqsk_free(req);
610 csk->passive_reap_next = NULL;
613 static void chtls_reset_synq(struct listen_ctx *listen_ctx)
615 struct sock *listen_sk = listen_ctx->lsk;
617 while (!skb_queue_empty(&listen_ctx->synq)) {
618 struct chtls_sock *csk =
619 container_of((struct synq *)__skb_dequeue
620 (&listen_ctx->synq), struct chtls_sock, synq);
621 struct sock *child = csk->sk;
623 cleanup_syn_rcv_conn(child, listen_sk);
627 release_tcp_port(child);
628 reset_listen_child(child);
629 bh_unlock_sock(child);
635 int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
637 struct net_device *ndev;
638 #if IS_ENABLED(CONFIG_IPV6)
639 bool clip_valid = false;
641 struct listen_ctx *ctx;
642 struct adapter *adap;
643 struct port_info *pi;
648 ndev = chtls_find_netdev(cdev, sk);
653 pi = netdev_priv(ndev);
655 if (!(adap->flags & CXGB4_FULL_INIT_DONE))
658 if (listen_hash_find(cdev, sk) >= 0) /* already have it */
661 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
665 __module_get(THIS_MODULE);
668 ctx->state = T4_LISTEN_START_PENDING;
669 skb_queue_head_init(&ctx->synq);
671 stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
676 if (!listen_hash_add(cdev, sk, stid))
679 if (sk->sk_family == PF_INET) {
680 ret = cxgb4_create_server(ndev, stid,
681 inet_sk(sk)->inet_rcv_saddr,
682 inet_sk(sk)->inet_sport, 0,
683 cdev->lldi->rxq_ids[0]);
684 #if IS_ENABLED(CONFIG_IPV6)
688 addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
689 if (addr_type != IPV6_ADDR_ANY) {
690 ret = cxgb4_clip_get(ndev, (const u32 *)
691 &sk->sk_v6_rcv_saddr, 1);
696 ret = cxgb4_create_server6(ndev, stid,
697 &sk->sk_v6_rcv_saddr,
698 inet_sk(sk)->inet_sport,
699 cdev->lldi->rxq_ids[0]);
703 ret = net_xmit_errno(ret);
708 #if IS_ENABLED(CONFIG_IPV6)
710 cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1);
712 listen_hash_del(cdev, sk);
714 cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
718 module_put(THIS_MODULE);
722 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
724 struct listen_ctx *listen_ctx;
727 stid = listen_hash_del(cdev, sk);
731 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
732 chtls_reset_synq(listen_ctx);
734 cxgb4_remove_server(cdev->lldi->ports[0], stid,
735 cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6);
737 #if IS_ENABLED(CONFIG_IPV6)
738 if (sk->sk_family == PF_INET6) {
739 struct chtls_sock *csk;
742 csk = rcu_dereference_sk_user_data(sk);
743 addr_type = ipv6_addr_type((const struct in6_addr *)
744 &sk->sk_v6_rcv_saddr);
745 if (addr_type != IPV6_ADDR_ANY)
746 cxgb4_clip_release(csk->egress_dev, (const u32 *)
747 &sk->sk_v6_rcv_saddr, 1);
750 chtls_disconnect_acceptq(sk);
753 static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
755 struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
756 unsigned int stid = GET_TID(rpl);
757 struct listen_ctx *listen_ctx;
759 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
761 return CPL_RET_BUF_DONE;
763 if (listen_ctx->state == T4_LISTEN_START_PENDING) {
764 listen_ctx->state = T4_LISTEN_STARTED;
765 return CPL_RET_BUF_DONE;
768 if (rpl->status != CPL_ERR_NONE) {
769 pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
771 return CPL_RET_BUF_DONE;
773 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
774 sock_put(listen_ctx->lsk);
776 module_put(THIS_MODULE);
781 static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
783 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
784 struct listen_ctx *listen_ctx;
789 data = lookup_stid(cdev->tids, stid);
790 listen_ctx = (struct listen_ctx *)data;
792 if (rpl->status != CPL_ERR_NONE) {
793 pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
795 return CPL_RET_BUF_DONE;
798 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
799 sock_put(listen_ctx->lsk);
801 module_put(THIS_MODULE);
806 static void chtls_purge_wr_queue(struct sock *sk)
810 while ((skb = dequeue_wr(sk)) != NULL)
814 static void chtls_release_resources(struct sock *sk)
816 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
817 struct chtls_dev *cdev = csk->cdev;
818 unsigned int tid = csk->tid;
819 struct tid_info *tids;
825 kfree_skb(csk->txdata_skb_cache);
826 csk->txdata_skb_cache = NULL;
828 if (csk->wr_credits != csk->wr_max_credits) {
829 chtls_purge_wr_queue(sk);
830 chtls_reset_wr_list(csk);
833 if (csk->l2t_entry) {
834 cxgb4_l2t_release(csk->l2t_entry);
835 csk->l2t_entry = NULL;
838 if (sk->sk_state != TCP_SYN_SENT) {
839 cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
844 static void chtls_conn_done(struct sock *sk)
846 if (sock_flag(sk, SOCK_DEAD))
847 chtls_purge_receive_queue(sk);
848 sk_wakeup_sleepers(sk, 0);
852 static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
855 * If the server is still open we clean up the child connection,
856 * otherwise the server already did the clean up as it was purging
857 * its SYN queue and the skb was just sitting in its backlog.
859 if (likely(parent->sk_state == TCP_LISTEN)) {
860 cleanup_syn_rcv_conn(child, parent);
861 /* Without the below call to sock_orphan,
862 * we leak the socket resource with syn_flood test
863 * as inet_csk_destroy_sock will not be called
864 * in tcp_done since SOCK_DEAD flag is not set.
865 * Kernel handles this differently where new socket is
866 * created only after 3 way handshake is done.
869 percpu_counter_inc((child)->sk_prot->orphan_count);
870 chtls_release_resources(child);
871 chtls_conn_done(child);
873 if (csk_flag(child, CSK_RST_ABORTED)) {
874 chtls_release_resources(child);
875 chtls_conn_done(child);
880 static void pass_open_abort(struct sock *child, struct sock *parent,
883 do_abort_syn_rcv(child, parent);
887 static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
889 pass_open_abort(skb->sk, lsk, skb);
892 static void chtls_pass_open_arp_failure(struct sock *sk,
895 const struct request_sock *oreq;
896 struct chtls_sock *csk;
897 struct chtls_dev *cdev;
901 csk = rcu_dereference_sk_user_data(sk);
905 * If the connection is being aborted due to the parent listening
906 * socket going away there's nothing to do, the ABORT_REQ will close
909 if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
914 oreq = csk->passive_reap_next;
915 data = lookup_stid(cdev->tids, oreq->ts_recent);
916 parent = ((struct listen_ctx *)data)->lsk;
918 bh_lock_sock(parent);
919 if (!sock_owned_by_user(parent)) {
920 pass_open_abort(sk, parent, skb);
922 BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
923 __sk_add_backlog(parent, skb);
925 bh_unlock_sock(parent);
928 static void chtls_accept_rpl_arp_failure(void *handle,
931 struct sock *sk = (struct sock *)handle;
934 process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
938 static unsigned int chtls_select_mss(const struct chtls_sock *csk,
940 struct cpl_pass_accept_req *req)
942 struct chtls_dev *cdev;
943 struct dst_entry *dst;
944 unsigned int tcpoptsz;
945 unsigned int iphdrsz;
946 unsigned int mtu_idx;
951 mss = ntohs(req->tcpopt.mss);
953 dst = __sk_dst_get(sk);
958 #if IS_ENABLED(CONFIG_IPV6)
959 if (sk->sk_family == AF_INET6)
960 iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
963 iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
964 if (req->tcpopt.tstamp)
965 tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
967 tp->advmss = dst_metric_advmss(dst);
968 if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
969 tp->advmss = USER_MSS(tp);
970 if (tp->advmss > pmtu - iphdrsz)
971 tp->advmss = pmtu - iphdrsz;
972 if (mss && tp->advmss > mss)
975 tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
977 tp->advmss - tcpoptsz,
979 tp->advmss -= iphdrsz;
981 inet_csk(sk)->icsk_pmtu_cookie = pmtu;
985 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
989 if (space > MAX_RCV_WND)
991 if (win_clamp && win_clamp < space)
995 while (wscale < 14 && (65535 << wscale) < space)
1001 static void chtls_pass_accept_rpl(struct sk_buff *skb,
1002 struct cpl_pass_accept_req *req,
1006 struct cpl_t5_pass_accept_rpl *rpl5;
1007 struct cxgb4_lld_info *lldi;
1008 const struct tcphdr *tcph;
1009 const struct tcp_sock *tp;
1010 struct chtls_sock *csk;
1018 csk = sk->sk_user_data;
1020 lldi = csk->cdev->lldi;
1021 len = roundup(sizeof(*rpl5), 16);
1023 rpl5 = __skb_put_zero(skb, len);
1024 INIT_TP_WR(rpl5, tid);
1026 OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1028 csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
1030 opt0 = TCAM_BYPASS_F |
1031 WND_SCALE_V(RCV_WSCALE(tp)) |
1032 MSS_IDX_V(csk->mtu_idx) |
1033 L2T_IDX_V(csk->l2t_entry->idx) |
1034 NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
1035 TX_CHAN_V(csk->tx_chan) |
1036 SMAC_SEL_V(csk->smac_idx) |
1037 DSCP_V(csk->tos >> 2) |
1038 ULP_MODE_V(ULP_MODE_TLS) |
1039 RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
1041 opt2 = RX_CHANNEL_V(0) |
1042 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1044 if (!is_t5(lldi->adapter_type))
1045 opt2 |= RX_FC_DISABLE_F;
1046 if (req->tcpopt.tstamp)
1047 opt2 |= TSTAMPS_EN_F;
1048 if (req->tcpopt.sack)
1050 hlen = ntohl(req->hdr_len);
1052 tcph = (struct tcphdr *)((u8 *)(req + 1) +
1053 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1054 if (tcph->ece && tcph->cwr)
1055 opt2 |= CCTRL_ECN_V(1);
1056 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1058 opt2 |= T5_OPT_2_VALID_F;
1059 opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
1060 rpl5->opt0 = cpu_to_be64(opt0);
1061 rpl5->opt2 = cpu_to_be32(opt2);
1062 rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1063 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
1064 t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
1065 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
1068 static void inet_inherit_port(struct inet_hashinfo *hash_info,
1069 struct sock *lsk, struct sock *newsk)
1072 __inet_inherit_port(lsk, newsk);
1076 static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1078 if (skb->protocol) {
1082 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
1086 static void chtls_set_tcp_window(struct chtls_sock *csk)
1088 struct net_device *ndev = csk->egress_dev;
1089 struct port_info *pi = netdev_priv(ndev);
1090 unsigned int linkspeed;
1093 linkspeed = pi->link_cfg.speed;
1094 scale = linkspeed / SPEED_10000;
1095 #define CHTLS_10G_RCVWIN (256 * 1024)
1096 csk->rcv_win = CHTLS_10G_RCVWIN;
1098 csk->rcv_win *= scale;
1099 #define CHTLS_10G_SNDWIN (256 * 1024)
1100 csk->snd_win = CHTLS_10G_SNDWIN;
1102 csk->snd_win *= scale;
1105 static struct sock *chtls_recv_sock(struct sock *lsk,
1106 struct request_sock *oreq,
1108 const struct cpl_pass_accept_req *req,
1109 struct chtls_dev *cdev)
1111 struct neighbour *n = NULL;
1112 struct inet_sock *newinet;
1113 const struct iphdr *iph;
1114 struct tls_context *ctx;
1115 struct net_device *ndev;
1116 struct chtls_sock *csk;
1117 struct dst_entry *dst;
1118 struct tcp_sock *tp;
1124 iph = (const struct iphdr *)network_hdr;
1125 newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
1129 if (lsk->sk_family == AF_INET) {
1130 dst = inet_csk_route_child_sock(lsk, newsk, oreq);
1134 n = dst_neigh_lookup(dst, &iph->saddr);
1135 #if IS_ENABLED(CONFIG_IPV6)
1137 const struct ipv6hdr *ip6h;
1140 ip6h = (const struct ipv6hdr *)network_hdr;
1141 memset(&fl6, 0, sizeof(fl6));
1142 fl6.flowi6_proto = IPPROTO_TCP;
1143 fl6.saddr = ip6h->daddr;
1144 fl6.daddr = ip6h->saddr;
1145 fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
1146 fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
1147 security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
1148 dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
1151 n = dst_neigh_lookup(dst, &ip6h->saddr);
1160 port_id = cxgb4_port_idx(ndev);
1162 csk = chtls_sock_create(cdev);
1166 csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
1167 if (!csk->l2t_entry)
1170 newsk->sk_user_data = csk;
1171 newsk->sk_backlog_rcv = chtls_backlog_rcv;
1174 newinet = inet_sk(newsk);
1176 if (iph->version == 0x4) {
1177 newinet->inet_daddr = iph->saddr;
1178 newinet->inet_rcv_saddr = iph->daddr;
1179 newinet->inet_saddr = iph->daddr;
1180 #if IS_ENABLED(CONFIG_IPV6)
1182 struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk;
1183 struct inet_request_sock *treq = inet_rsk(oreq);
1184 struct ipv6_pinfo *newnp = inet6_sk(newsk);
1185 struct ipv6_pinfo *np = inet6_sk(lsk);
1187 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1188 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1189 newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
1190 newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
1191 inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
1192 newnp->ipv6_fl_list = NULL;
1193 newnp->pktoptions = NULL;
1194 newsk->sk_bound_dev_if = treq->ir_iif;
1195 newinet->inet_opt = NULL;
1196 newinet->inet_daddr = LOOPBACK4_IPV6;
1197 newinet->inet_saddr = LOOPBACK4_IPV6;
1201 oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1202 sk_setup_caps(newsk, dst);
1203 ctx = tls_get_ctx(lsk);
1204 newsk->sk_destruct = ctx->sk_destruct;
1206 csk->passive_reap_next = oreq;
1207 csk->tx_chan = cxgb4_port_chan(ndev);
1208 csk->port_id = port_id;
1209 csk->egress_dev = ndev;
1210 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1211 chtls_set_tcp_window(csk);
1212 tp->rcv_wnd = csk->rcv_win;
1213 csk->sndbuf = csk->snd_win;
1214 csk->ulp_mode = ULP_MODE_TLS;
1215 step = cdev->lldi->nrxq / cdev->lldi->nchan;
1216 csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
1217 rxq_idx = port_id * step;
1218 csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
1220 csk->sndbuf = newsk->sk_sndbuf;
1221 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1222 RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
1224 ipv4.sysctl_tcp_window_scaling,
1227 inet_inherit_port(&tcp_hashinfo, lsk, newsk);
1228 csk_set_flag(csk, CSK_CONN_INLINE);
1229 bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
1233 chtls_sock_release(&csk->kref);
1237 inet_csk_prepare_forced_close(newsk);
1240 chtls_reqsk_free(oreq);
1245 * Populate a TID_RELEASE WR. The skb must be already propely sized.
1247 static void mk_tid_release(struct sk_buff *skb,
1248 unsigned int chan, unsigned int tid)
1250 struct cpl_tid_release *req;
1253 len = roundup(sizeof(struct cpl_tid_release), 16);
1254 req = (struct cpl_tid_release *)__skb_put(skb, len);
1255 memset(req, 0, len);
1256 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1257 INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
1260 static int chtls_get_module(struct sock *sk)
1262 struct inet_connection_sock *icsk = inet_csk(sk);
1264 if (!try_module_get(icsk->icsk_ulp_ops->owner))
1270 static void chtls_pass_accept_request(struct sock *sk,
1271 struct sk_buff *skb)
1273 struct cpl_t5_pass_accept_rpl *rpl;
1274 struct cpl_pass_accept_req *req;
1275 struct listen_ctx *listen_ctx;
1276 struct vlan_ethhdr *vlan_eh;
1277 struct request_sock *oreq;
1278 struct sk_buff *reply_skb;
1279 struct chtls_sock *csk;
1280 struct chtls_dev *cdev;
1281 struct ipv6hdr *ip6h;
1282 struct tcphdr *tcph;
1291 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1295 req = cplhdr(skb) + RSS_HDR;
1297 cdev = BLOG_SKB_CB(skb)->cdev;
1298 newsk = lookup_tid(cdev->tids, tid);
1299 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1301 pr_info("tid (%d) already in use\n", tid);
1305 len = roundup(sizeof(*rpl), 16);
1306 reply_skb = alloc_skb(len, GFP_ATOMIC);
1308 cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
1313 if (sk->sk_state != TCP_LISTEN)
1316 if (inet_csk_reqsk_queue_is_full(sk))
1319 if (sk_acceptq_is_full(sk))
1323 eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
1324 if (eth_hdr_len == ETH_HLEN) {
1325 eh = (struct ethhdr *)(req + 1);
1326 iph = (struct iphdr *)(eh + 1);
1327 ip6h = (struct ipv6hdr *)(eh + 1);
1328 network_hdr = (void *)(eh + 1);
1330 vlan_eh = (struct vlan_ethhdr *)(req + 1);
1331 iph = (struct iphdr *)(vlan_eh + 1);
1332 ip6h = (struct ipv6hdr *)(vlan_eh + 1);
1333 network_hdr = (void *)(vlan_eh + 1);
1336 if (iph->version == 0x4) {
1337 tcph = (struct tcphdr *)(iph + 1);
1338 skb_set_network_header(skb, (void *)iph - (void *)req);
1339 oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
1341 tcph = (struct tcphdr *)(ip6h + 1);
1342 skb_set_network_header(skb, (void *)ip6h - (void *)req);
1343 oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false);
1349 oreq->rsk_rcv_wnd = 0;
1350 oreq->rsk_window_clamp = 0;
1351 oreq->syncookie = 0;
1353 oreq->ts_recent = 0;
1355 tcp_rsk(oreq)->tfo_listener = false;
1356 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
1357 chtls_set_req_port(oreq, tcph->source, tcph->dest);
1358 if (iph->version == 0x4) {
1359 chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
1360 ip_dsfield = ipv4_get_dsfield(iph);
1361 #if IS_ENABLED(CONFIG_IPV6)
1363 inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
1364 inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
1365 ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb));
1368 if (req->tcpopt.wsf <= 14 &&
1369 sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
1370 inet_rsk(oreq)->wscale_ok = 1;
1371 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
1373 inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
1374 th_ecn = tcph->ece && tcph->cwr;
1376 ect = !INET_ECN_is_not_ect(ip_dsfield);
1377 ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
1378 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
1379 inet_rsk(oreq)->ecn_ok = 1;
1382 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
1386 if (chtls_get_module(newsk))
1388 inet_csk_reqsk_queue_added(sk);
1389 reply_skb->sk = newsk;
1390 chtls_install_cpl_ops(newsk);
1391 cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
1392 csk = rcu_dereference_sk_user_data(newsk);
1393 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
1394 csk->listen_ctx = listen_ctx;
1395 __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
1396 chtls_pass_accept_rpl(reply_skb, req, tid);
1401 chtls_reqsk_free(oreq);
1403 mk_tid_release(reply_skb, 0, tid);
1404 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1409 * Handle a CPL_PASS_ACCEPT_REQ message.
1411 static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
1413 struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
1414 struct listen_ctx *ctx;
1420 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1423 data = lookup_stid(cdev->tids, stid);
1427 ctx = (struct listen_ctx *)data;
1430 if (unlikely(tid_out_of_range(cdev->tids, tid))) {
1431 pr_info("passive open TID %u too large\n", tid);
1435 BLOG_SKB_CB(skb)->cdev = cdev;
1436 process_cpl_msg(chtls_pass_accept_request, lsk, skb);
1441 * Completes some final bits of initialization for just established connections
1442 * and changes their state to TCP_ESTABLISHED.
1444 * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
1446 static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
1448 struct tcp_sock *tp = tcp_sk(sk);
1450 tp->pushed_seq = snd_isn;
1451 tp->write_seq = snd_isn;
1452 tp->snd_nxt = snd_isn;
1453 tp->snd_una = snd_isn;
1454 inet_sk(sk)->inet_id = prandom_u32();
1455 assign_rxopt(sk, opt);
1457 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
1458 tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
1461 tcp_set_state(sk, TCP_ESTABLISHED);
1464 static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
1466 struct sk_buff *abort_skb;
1468 abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
1470 chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
1473 static struct sock *reap_list;
1474 static DEFINE_SPINLOCK(reap_list_lock);
1477 * Process the reap list.
1479 DECLARE_TASK_FUNC(process_reap_list, task_param)
1481 spin_lock_bh(&reap_list_lock);
1483 struct sock *sk = reap_list;
1484 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1486 reap_list = csk->passive_reap_next;
1487 csk->passive_reap_next = NULL;
1488 spin_unlock(&reap_list_lock);
1492 chtls_abort_conn(sk, NULL);
1494 if (sk->sk_state == TCP_CLOSE)
1495 inet_csk_destroy_sock(sk);
1498 spin_lock(&reap_list_lock);
1500 spin_unlock_bh(&reap_list_lock);
1503 static DECLARE_WORK(reap_task, process_reap_list);
1505 static void add_to_reap_list(struct sock *sk)
1507 struct chtls_sock *csk = sk->sk_user_data;
1511 release_tcp_port(sk); /* release the port immediately */
1513 spin_lock(&reap_list_lock);
1514 csk->passive_reap_next = reap_list;
1516 if (!csk->passive_reap_next)
1517 schedule_work(&reap_task);
1518 spin_unlock(&reap_list_lock);
1523 static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
1524 struct chtls_dev *cdev)
1526 struct request_sock *oreq;
1527 struct chtls_sock *csk;
1529 if (lsk->sk_state != TCP_LISTEN)
1532 csk = child->sk_user_data;
1533 oreq = csk->passive_reap_next;
1534 csk->passive_reap_next = NULL;
1536 reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
1537 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
1539 if (sk_acceptq_is_full(lsk)) {
1540 chtls_reqsk_free(oreq);
1541 add_to_reap_list(child);
1543 refcount_set(&oreq->rsk_refcnt, 1);
1544 inet_csk_reqsk_queue_add(lsk, oreq, child);
1545 lsk->sk_data_ready(lsk);
1549 static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
1551 struct sock *child = skb->sk;
1554 add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
1558 static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
1560 struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
1561 struct chtls_sock *csk;
1562 struct sock *lsk, *sk;
1565 hwtid = GET_TID(req);
1566 sk = lookup_tid(cdev->tids, hwtid);
1568 return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
1571 if (unlikely(sock_owned_by_user(sk))) {
1577 csk = sk->sk_user_data;
1578 csk->wr_max_credits = 64;
1579 csk->wr_credits = 64;
1580 csk->wr_unacked = 0;
1581 make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
1582 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1583 sk->sk_state_change(sk);
1584 if (unlikely(sk->sk_socket))
1585 sk_wake_async(sk, 0, POLL_OUT);
1587 data = lookup_stid(cdev->tids, stid);
1588 lsk = ((struct listen_ctx *)data)->lsk;
1591 if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
1592 /* removed from synq */
1593 bh_unlock_sock(lsk);
1598 if (likely(!sock_owned_by_user(lsk))) {
1600 add_pass_open_to_parent(sk, lsk, cdev);
1603 BLOG_SKB_CB(skb)->cdev = cdev;
1604 BLOG_SKB_CB(skb)->backlog_rcv =
1605 bl_add_pass_open_to_parent;
1606 __sk_add_backlog(lsk, skb);
1608 bh_unlock_sock(lsk);
1616 * Handle receipt of an urgent pointer.
1618 static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
1620 struct tcp_sock *tp = tcp_sk(sk);
1623 if (tp->urg_data && !after(urg_seq, tp->urg_seq))
1624 return; /* duplicate pointer */
1627 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
1628 !sock_flag(sk, SOCK_URGINLINE) &&
1629 tp->copied_seq != tp->rcv_nxt) {
1630 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1633 if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
1634 chtls_free_skb(sk, skb);
1637 tp->urg_data = TCP_URG_NOTYET;
1638 tp->urg_seq = urg_seq;
1641 static void check_sk_callbacks(struct chtls_sock *csk)
1643 struct sock *sk = csk->sk;
1645 if (unlikely(sk->sk_user_data &&
1646 !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
1647 csk_set_flag(csk, CSK_CALLBACKS_CHKD);
1651 * Handles Rx data that arrives in a state where the socket isn't accepting
1654 static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
1656 if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
1657 chtls_abort_conn(sk, skb);
1662 static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
1664 struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
1665 struct chtls_sock *csk;
1666 struct tcp_sock *tp;
1668 csk = rcu_dereference_sk_user_data(sk);
1671 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1672 handle_excess_rx(sk, skb);
1676 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1677 ULP_SKB_CB(skb)->psh = hdr->psh;
1678 skb_ulp_mode(skb) = ULP_MODE_NONE;
1680 skb_reset_transport_header(skb);
1681 __skb_pull(skb, sizeof(*hdr) + RSS_HDR);
1683 __skb_trim(skb, ntohs(hdr->len));
1685 if (unlikely(hdr->urg))
1686 handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
1687 if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
1688 tp->urg_seq - tp->rcv_nxt < skb->len))
1689 tp->urg_data = TCP_URG_VALID |
1690 skb->data[tp->urg_seq - tp->rcv_nxt];
1692 if (unlikely(hdr->dack_mode != csk->delack_mode)) {
1693 csk->delack_mode = hdr->dack_mode;
1694 csk->delack_seq = tp->rcv_nxt;
1697 tcp_hdr(skb)->fin = 0;
1698 tp->rcv_nxt += skb->len;
1700 __skb_queue_tail(&sk->sk_receive_queue, skb);
1702 if (!sock_flag(sk, SOCK_DEAD)) {
1703 check_sk_callbacks(csk);
1704 sk->sk_data_ready(sk);
1708 static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
1710 struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
1711 unsigned int hwtid = GET_TID(req);
1714 sk = lookup_tid(cdev->tids, hwtid);
1715 if (unlikely(!sk)) {
1716 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1719 skb_dst_set(skb, NULL);
1720 process_cpl_msg(chtls_recv_data, sk, skb);
1724 static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
1726 struct cpl_tls_data *hdr = cplhdr(skb);
1727 struct chtls_sock *csk;
1728 struct chtls_hws *tlsk;
1729 struct tcp_sock *tp;
1731 csk = rcu_dereference_sk_user_data(sk);
1732 tlsk = &csk->tlshws;
1735 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1736 handle_excess_rx(sk, skb);
1740 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1741 ULP_SKB_CB(skb)->flags = 0;
1742 skb_ulp_mode(skb) = ULP_MODE_TLS;
1744 skb_reset_transport_header(skb);
1745 __skb_pull(skb, sizeof(*hdr));
1748 CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
1750 if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
1751 tp->rcv_nxt < skb->len))
1752 tp->urg_data = TCP_URG_VALID |
1753 skb->data[tp->urg_seq - tp->rcv_nxt];
1755 tcp_hdr(skb)->fin = 0;
1756 tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
1757 __skb_queue_tail(&tlsk->sk_recv_queue, skb);
1760 static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
1762 struct cpl_tls_data *req = cplhdr(skb);
1763 unsigned int hwtid = GET_TID(req);
1766 sk = lookup_tid(cdev->tids, hwtid);
1767 if (unlikely(!sk)) {
1768 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1771 skb_dst_set(skb, NULL);
1772 process_cpl_msg(chtls_recv_pdu, sk, skb);
1776 static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
1778 struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
1780 skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
1781 tls_cmp_hdr->length = ntohs((__force __be16)nlen);
1784 static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
1786 struct tlsrx_cmp_hdr *tls_hdr_pkt;
1787 struct cpl_rx_tls_cmp *cmp_cpl;
1788 struct sk_buff *skb_rec;
1789 struct chtls_sock *csk;
1790 struct chtls_hws *tlsk;
1791 struct tcp_sock *tp;
1793 cmp_cpl = cplhdr(skb);
1794 csk = rcu_dereference_sk_user_data(sk);
1795 tlsk = &csk->tlshws;
1798 ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
1799 ULP_SKB_CB(skb)->flags = 0;
1801 skb_reset_transport_header(skb);
1802 __skb_pull(skb, sizeof(*cmp_cpl));
1803 tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
1804 if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
1805 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1807 __skb_trim(skb, TLS_HEADER_LENGTH);
1810 CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
1812 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
1813 skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
1815 __skb_queue_tail(&sk->sk_receive_queue, skb);
1817 chtls_set_hdrlen(skb, tlsk->pldlen);
1819 __skb_queue_tail(&sk->sk_receive_queue, skb);
1820 __skb_queue_tail(&sk->sk_receive_queue, skb_rec);
1823 if (!sock_flag(sk, SOCK_DEAD)) {
1824 check_sk_callbacks(csk);
1825 sk->sk_data_ready(sk);
1829 static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
1831 struct cpl_rx_tls_cmp *req = cplhdr(skb);
1832 unsigned int hwtid = GET_TID(req);
1835 sk = lookup_tid(cdev->tids, hwtid);
1836 if (unlikely(!sk)) {
1837 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1840 skb_dst_set(skb, NULL);
1841 process_cpl_msg(chtls_rx_hdr, sk, skb);
1846 static void chtls_timewait(struct sock *sk)
1848 struct tcp_sock *tp = tcp_sk(sk);
1851 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
1853 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
1856 static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
1858 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1860 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1863 sk->sk_shutdown |= RCV_SHUTDOWN;
1864 sock_set_flag(sk, SOCK_DONE);
1866 switch (sk->sk_state) {
1868 case TCP_ESTABLISHED:
1869 tcp_set_state(sk, TCP_CLOSE_WAIT);
1872 tcp_set_state(sk, TCP_CLOSING);
1875 chtls_release_resources(sk);
1876 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1877 chtls_conn_done(sk);
1882 pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
1885 if (!sock_flag(sk, SOCK_DEAD)) {
1886 sk->sk_state_change(sk);
1887 /* Do not send POLL_HUP for half duplex close. */
1889 if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1890 sk->sk_state == TCP_CLOSE)
1891 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
1893 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1899 static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
1901 struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
1902 struct chtls_sock *csk;
1903 struct tcp_sock *tp;
1905 csk = rcu_dereference_sk_user_data(sk);
1907 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1912 tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
1914 switch (sk->sk_state) {
1916 chtls_release_resources(sk);
1917 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1918 chtls_conn_done(sk);
1923 chtls_release_resources(sk);
1924 chtls_conn_done(sk);
1927 tcp_set_state(sk, TCP_FIN_WAIT2);
1928 sk->sk_shutdown |= SEND_SHUTDOWN;
1930 if (!sock_flag(sk, SOCK_DEAD))
1931 sk->sk_state_change(sk);
1932 else if (tcp_sk(sk)->linger2 < 0 &&
1933 !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
1934 chtls_abort_conn(sk, skb);
1937 pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
1943 static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
1944 size_t len, gfp_t gfp)
1946 if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
1947 WARN_ONCE(skb->len < len, "skb alloc error");
1948 __skb_trim(skb, len);
1951 skb = alloc_skb(len, gfp);
1953 __skb_put(skb, len);
1958 static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
1961 struct cpl_abort_rpl *rpl = cplhdr(skb);
1963 INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
1967 static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
1969 struct cpl_abort_req_rss *req = cplhdr(skb);
1970 struct sk_buff *reply_skb;
1972 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1973 GFP_KERNEL | __GFP_NOFAIL);
1974 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
1975 set_abort_rpl_wr(reply_skb, GET_TID(req),
1976 (req->status & CPL_ABORT_NO_RST));
1977 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
1978 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1983 * Add an skb to the deferred skb queue for processing from process context.
1985 static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
1986 defer_handler_t handler)
1988 DEFERRED_SKB_CB(skb)->handler = handler;
1989 spin_lock_bh(&cdev->deferq.lock);
1990 __skb_queue_tail(&cdev->deferq, skb);
1991 if (skb_queue_len(&cdev->deferq) == 1)
1992 schedule_work(&cdev->deferq_task);
1993 spin_unlock_bh(&cdev->deferq.lock);
1996 static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1997 struct chtls_dev *cdev, int status, int queue)
1999 struct cpl_abort_req_rss *req = cplhdr(skb);
2000 struct sk_buff *reply_skb;
2001 struct chtls_sock *csk;
2003 csk = rcu_dereference_sk_user_data(sk);
2005 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
2009 req->status = (queue << 1);
2010 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
2014 set_abort_rpl_wr(reply_skb, GET_TID(req), status);
2017 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
2018 if (csk_conn_inline(csk)) {
2019 struct l2t_entry *e = csk->l2t_entry;
2021 if (e && sk->sk_state != TCP_SYN_RECV) {
2022 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
2026 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
2029 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
2030 struct chtls_dev *cdev,
2031 int status, int queue)
2033 struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
2034 struct sk_buff *reply_skb;
2035 struct chtls_sock *csk;
2038 csk = rcu_dereference_sk_user_data(sk);
2041 reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
2043 req->status = (queue << 1) | status;
2044 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
2048 set_abort_rpl_wr(reply_skb, tid, status);
2050 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
2051 if (csk_conn_inline(csk)) {
2052 struct l2t_entry *e = csk->l2t_entry;
2054 if (e && sk->sk_state != TCP_SYN_RECV) {
2055 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
2059 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
2063 * This is run from a listener's backlog to abort a child connection in
2064 * SYN_RCV state (i.e., one on the listener's SYN queue).
2066 static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
2068 struct chtls_sock *csk;
2073 csk = rcu_dereference_sk_user_data(child);
2074 queue = csk->txq_idx;
2077 do_abort_syn_rcv(child, lsk);
2078 send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
2079 CPL_ABORT_NO_RST, queue);
2082 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
2084 const struct request_sock *oreq;
2085 struct listen_ctx *listen_ctx;
2086 struct chtls_sock *csk;
2087 struct chtls_dev *cdev;
2091 csk = sk->sk_user_data;
2092 oreq = csk->passive_reap_next;
2098 ctx = lookup_stid(cdev->tids, oreq->ts_recent);
2102 listen_ctx = (struct listen_ctx *)ctx;
2103 psk = listen_ctx->lsk;
2106 if (!sock_owned_by_user(psk)) {
2107 int queue = csk->txq_idx;
2109 do_abort_syn_rcv(sk, psk);
2110 send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
2113 BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
2114 __sk_add_backlog(psk, skb);
2116 bh_unlock_sock(psk);
2120 static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
2122 const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
2123 struct chtls_sock *csk = sk->sk_user_data;
2124 int rst_status = CPL_ABORT_NO_RST;
2125 int queue = csk->txq_idx;
2127 if (is_neg_adv(req->status)) {
2128 if (sk->sk_state == TCP_SYN_RECV)
2129 chtls_set_tcb_tflag(sk, 0, 0);
2135 csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
2137 if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
2138 !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
2139 struct tcp_sock *tp = tcp_sk(sk);
2141 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
2142 WARN_ONCE(1, "send_tx_flowc error");
2143 csk_set_flag(csk, CSK_TX_DATA_SENT);
2146 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
2148 if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
2149 sk->sk_err = ETIMEDOUT;
2151 if (!sock_flag(sk, SOCK_DEAD))
2152 sk->sk_error_report(sk);
2154 if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
2157 chtls_release_resources(sk);
2158 chtls_conn_done(sk);
2161 chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
2165 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
2167 struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
2168 struct chtls_sock *csk;
2169 struct chtls_dev *cdev;
2171 csk = rcu_dereference_sk_user_data(sk);
2174 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
2175 csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
2176 if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
2177 if (sk->sk_state == TCP_SYN_SENT) {
2178 cxgb4_remove_tid(cdev->tids,
2184 chtls_release_resources(sk);
2185 chtls_conn_done(sk);
2191 static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
2193 struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
2194 void (*fn)(struct sock *sk, struct sk_buff *skb);
2195 unsigned int hwtid = GET_TID(req);
2196 struct chtls_sock *csk;
2200 opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
2202 sk = lookup_tid(cdev->tids, hwtid);
2206 csk = sk->sk_user_data;
2209 case CPL_PEER_CLOSE:
2210 fn = chtls_peer_close;
2212 case CPL_CLOSE_CON_RPL:
2213 fn = chtls_close_con_rpl;
2215 case CPL_ABORT_REQ_RSS:
2217 * Save the offload device in the skb, we may process this
2218 * message after the socket has closed.
2220 BLOG_SKB_CB(skb)->cdev = csk->cdev;
2221 fn = chtls_abort_req_rss;
2223 case CPL_ABORT_RPL_RSS:
2224 fn = chtls_abort_rpl_rss;
2230 process_cpl_msg(fn, sk, skb);
2238 static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
2240 struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
2241 struct chtls_sock *csk = sk->sk_user_data;
2242 struct tcp_sock *tp = tcp_sk(sk);
2243 u32 credits = hdr->credits;
2246 snd_una = ntohl(hdr->snd_una);
2247 csk->wr_credits += credits;
2249 if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
2250 csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
2253 struct sk_buff *pskb = csk->wr_skb_head;
2256 if (unlikely(!pskb)) {
2257 if (csk->wr_nondata)
2258 csk->wr_nondata -= credits;
2261 csum = (__force u32)pskb->csum;
2262 if (unlikely(credits < csum)) {
2263 pskb->csum = (__force __wsum)(csum - credits);
2270 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
2271 if (unlikely(before(snd_una, tp->snd_una))) {
2276 if (tp->snd_una != snd_una) {
2277 tp->snd_una = snd_una;
2278 tp->rcv_tstamp = tcp_time_stamp(tp);
2279 if (tp->snd_una == tp->snd_nxt &&
2280 !csk_flag_nochk(csk, CSK_TX_FAILOVER))
2281 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2285 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
2286 unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
2288 csk->wr_credits -= fclen16;
2289 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2290 csk_reset_flag(csk, CSK_TX_FAILOVER);
2292 if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
2293 sk->sk_write_space(sk);
2298 static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
2300 struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
2301 unsigned int hwtid = GET_TID(rpl);
2304 sk = lookup_tid(cdev->tids, hwtid);
2305 if (unlikely(!sk)) {
2306 pr_err("can't find conn. for hwtid %u.\n", hwtid);
2309 process_cpl_msg(chtls_rx_ack, sk, skb);
2314 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
2315 [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
2316 [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
2317 [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req,
2318 [CPL_PASS_ESTABLISH] = chtls_pass_establish,
2319 [CPL_RX_DATA] = chtls_rx_data,
2320 [CPL_TLS_DATA] = chtls_rx_pdu,
2321 [CPL_RX_TLS_CMP] = chtls_rx_cmp,
2322 [CPL_PEER_CLOSE] = chtls_conn_cpl,
2323 [CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
2324 [CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
2325 [CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
2326 [CPL_FW4_ACK] = chtls_wr_ack,