1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/workqueue.h>
11 #include <linux/skbuff.h>
12 #include <linux/timer.h>
13 #include <linux/notifier.h>
14 #include <linux/inetdevice.h>
16 #include <linux/tcp.h>
17 #include <linux/sched/signal.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kprobes.h>
20 #include <linux/if_vlan.h>
21 #include <linux/ipv6.h>
23 #include <net/transp_v6.h>
24 #include <net/ip6_route.h>
25 #include <net/inet_common.h>
29 #include <net/addrconf.h>
30 #include <net/secure_seq.h>
37 * State transitions and actions for close. Note that if we are in SYN_SENT
38 * we remain in that state as we cannot control a connection while it's in
39 * SYN_SENT; such connections are allowed to establish and are then aborted.
41 static unsigned char new_state[16] = {
42 /* current state: new state: action: */
43 /* (Invalid) */ TCP_CLOSE,
44 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
45 /* TCP_SYN_SENT */ TCP_SYN_SENT,
46 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
47 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
48 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
49 /* TCP_TIME_WAIT */ TCP_CLOSE,
50 /* TCP_CLOSE */ TCP_CLOSE,
51 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
52 /* TCP_LAST_ACK */ TCP_LAST_ACK,
53 /* TCP_LISTEN */ TCP_CLOSE,
54 /* TCP_CLOSING */ TCP_CLOSING,
57 static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
59 struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
64 csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
65 if (!csk->txdata_skb_cache) {
70 kref_init(&csk->kref);
72 skb_queue_head_init(&csk->txq);
73 csk->wr_skb_head = NULL;
74 csk->wr_skb_tail = NULL;
77 csk->tlshws.txkey = -1;
78 csk->tlshws.rxkey = -1;
79 csk->tlshws.mfs = TLS_MFS;
80 skb_queue_head_init(&csk->tlshws.sk_recv_queue);
84 static void chtls_sock_release(struct kref *ref)
86 struct chtls_sock *csk =
87 container_of(ref, struct chtls_sock, kref);
92 static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
95 struct adapter *adap = pci_get_drvdata(cdev->pdev);
96 struct net_device *ndev = cdev->ports[0];
97 #if IS_ENABLED(CONFIG_IPV6)
98 struct net_device *temp;
103 switch (sk->sk_family) {
105 if (likely(!inet_sk(sk)->inet_rcv_saddr))
107 ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
109 #if IS_ENABLED(CONFIG_IPV6)
111 addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
112 if (likely(addr_type == IPV6_ADDR_ANY))
115 for_each_netdev_rcu(&init_net, temp) {
116 if (ipv6_chk_addr(&init_net, (struct in6_addr *)
117 &sk->sk_v6_rcv_saddr, temp, 1)) {
131 if (is_vlan_dev(ndev))
132 ndev = vlan_dev_real_dev(ndev);
134 for_each_port(adap, i)
135 if (cdev->ports[i] == ndev)
140 static void assign_rxopt(struct sock *sk, unsigned int opt)
142 const struct chtls_dev *cdev;
143 struct chtls_sock *csk;
146 csk = rcu_dereference_sk_user_data(sk);
150 tp->tcp_header_len = sizeof(struct tcphdr);
151 tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
152 tp->mss_cache = tp->rx_opt.mss_clamp;
153 tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt);
154 tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt);
155 tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt);
156 SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt);
157 if (!tp->rx_opt.wscale_ok)
158 tp->rx_opt.rcv_wscale = 0;
159 if (tp->rx_opt.tstamp_ok) {
160 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
161 tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
162 } else if (csk->opt2 & TSTAMPS_EN_F) {
163 csk->opt2 &= ~TSTAMPS_EN_F;
164 csk->mtu_idx = TCPOPT_MSS_G(opt);
168 static void chtls_purge_receive_queue(struct sock *sk)
172 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
173 skb_dst_set(skb, (void *)NULL);
178 static void chtls_purge_write_queue(struct sock *sk)
180 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
183 while ((skb = __skb_dequeue(&csk->txq))) {
184 sk->sk_wmem_queued -= skb->truesize;
189 static void chtls_purge_recv_queue(struct sock *sk)
191 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
192 struct chtls_hws *tlsk = &csk->tlshws;
195 while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
196 skb_dst_set(skb, NULL);
201 static void abort_arp_failure(void *handle, struct sk_buff *skb)
203 struct cpl_abort_req *req = cplhdr(skb);
204 struct chtls_dev *cdev;
206 cdev = (struct chtls_dev *)handle;
207 req->cmd = CPL_ABORT_NO_RST;
208 cxgb4_ofld_send(cdev->lldi->ports[0], skb);
211 static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
213 if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
215 refcount_inc(&skb->users);
217 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
222 static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
224 struct cpl_abort_req *req;
225 struct chtls_sock *csk;
228 csk = rcu_dereference_sk_user_data(sk);
232 skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
234 req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
235 INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
236 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
237 req->rsvd0 = htonl(tp->snd_nxt);
238 req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
240 t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
241 send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
244 static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
246 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
248 if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
250 if (sk->sk_state == TCP_SYN_RECV)
251 csk_set_flag(csk, CSK_RST_ABORTED);
255 if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
256 struct tcp_sock *tp = tcp_sk(sk);
258 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
259 WARN_ONCE(1, "send tx flowc error");
260 csk_set_flag(csk, CSK_TX_DATA_SENT);
263 csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
264 chtls_purge_write_queue(sk);
266 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
267 if (sk->sk_state != TCP_SYN_RECV)
268 chtls_send_abort(sk, mode, skb);
277 static void release_tcp_port(struct sock *sk)
279 if (inet_csk(sk)->icsk_bind_hash)
283 static void tcp_uncork(struct sock *sk)
285 struct tcp_sock *tp = tcp_sk(sk);
287 if (tp->nonagle & TCP_NAGLE_CORK) {
288 tp->nonagle &= ~TCP_NAGLE_CORK;
289 chtls_tcp_push(sk, 0);
293 static void chtls_close_conn(struct sock *sk)
295 struct cpl_close_con_req *req;
296 struct chtls_sock *csk;
301 len = roundup(sizeof(struct cpl_close_con_req), 16);
302 csk = rcu_dereference_sk_user_data(sk);
305 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
306 req = (struct cpl_close_con_req *)__skb_put(skb, len);
308 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
309 FW_WR_IMMDLEN_V(sizeof(*req) -
311 req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
312 FW_WR_FLOWID_V(tid));
314 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
317 skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
318 if (sk->sk_state != TCP_SYN_SENT)
319 chtls_push_frames(csk, 1);
323 * Perform a state transition during close and return the actions indicated
324 * for the transition. Do not make this function inline, the main reason
325 * it exists at all is to avoid multiple inlining of tcp_set_state.
327 static int make_close_transition(struct sock *sk)
329 int next = (int)new_state[sk->sk_state];
331 tcp_set_state(sk, next & TCP_STATE_MASK);
332 return next & TCP_ACTION_FIN;
335 void chtls_close(struct sock *sk, long timeout)
337 int data_lost, prev_state;
338 struct chtls_sock *csk;
340 csk = rcu_dereference_sk_user_data(sk);
343 sk->sk_shutdown |= SHUTDOWN_MASK;
345 data_lost = skb_queue_len(&sk->sk_receive_queue);
346 data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
347 chtls_purge_recv_queue(sk);
348 chtls_purge_receive_queue(sk);
350 if (sk->sk_state == TCP_CLOSE) {
352 } else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
353 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
354 release_tcp_port(sk);
356 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
357 sk->sk_prot->disconnect(sk, 0);
358 } else if (make_close_transition(sk)) {
359 chtls_close_conn(sk);
363 sk_stream_wait_close(sk, timeout);
366 prev_state = sk->sk_state;
375 if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
378 if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
379 !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
382 skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
384 chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
387 if (sk->sk_state == TCP_CLOSE)
388 inet_csk_destroy_sock(sk);
397 * Wait until a socket enters on of the given states.
399 static int wait_for_states(struct sock *sk, unsigned int states)
401 DECLARE_WAITQUEUE(wait, current);
402 struct socket_wq _sk_wq;
409 * We want this to work even when there's no associated struct socket.
410 * In that case we provide a temporary wait_queue_head_t.
413 init_waitqueue_head(&_sk_wq.wait);
414 _sk_wq.fasync_list = NULL;
415 init_rcu_head_on_stack(&_sk_wq.rcu);
416 RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
419 add_wait_queue(sk_sleep(sk), &wait);
420 while (!sk_in_state(sk, states)) {
421 if (!current_timeo) {
425 if (signal_pending(current)) {
426 err = sock_intr_errno(current_timeo);
429 set_current_state(TASK_UNINTERRUPTIBLE);
431 if (!sk_in_state(sk, states))
432 current_timeo = schedule_timeout(current_timeo);
433 __set_current_state(TASK_RUNNING);
436 remove_wait_queue(sk_sleep(sk), &wait);
438 if (rcu_dereference(sk->sk_wq) == &_sk_wq)
443 int chtls_disconnect(struct sock *sk, int flags)
449 chtls_purge_recv_queue(sk);
450 chtls_purge_receive_queue(sk);
451 chtls_purge_write_queue(sk);
453 if (sk->sk_state != TCP_CLOSE) {
454 sk->sk_err = ECONNRESET;
455 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
456 err = wait_for_states(sk, TCPF_CLOSE);
460 chtls_purge_recv_queue(sk);
461 chtls_purge_receive_queue(sk);
462 tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
463 return tcp_disconnect(sk, flags);
466 #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
467 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
468 void chtls_shutdown(struct sock *sk, int how)
470 if ((how & SEND_SHUTDOWN) &&
471 sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
472 make_close_transition(sk))
473 chtls_close_conn(sk);
476 void chtls_destroy_sock(struct sock *sk)
478 struct chtls_sock *csk;
480 csk = rcu_dereference_sk_user_data(sk);
481 chtls_purge_recv_queue(sk);
482 csk->ulp_mode = ULP_MODE_NONE;
483 chtls_purge_write_queue(sk);
485 kref_put(&csk->kref, chtls_sock_release);
486 if (sk->sk_family == AF_INET)
487 sk->sk_prot = &tcp_prot;
488 #if IS_ENABLED(CONFIG_IPV6)
490 sk->sk_prot = &tcpv6_prot;
492 sk->sk_prot->destroy(sk);
495 static void reset_listen_child(struct sock *child)
497 struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
500 skb = alloc_ctrl_skb(csk->txdata_skb_cache,
501 sizeof(struct cpl_abort_req));
503 chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
505 INC_ORPHAN_COUNT(child);
506 if (child->sk_state == TCP_CLOSE)
507 inet_csk_destroy_sock(child);
510 static void chtls_disconnect_acceptq(struct sock *listen_sk)
512 struct request_sock **pprev;
514 pprev = ACCEPT_QUEUE(listen_sk);
516 struct request_sock *req = *pprev;
518 if (req->rsk_ops == &chtls_rsk_ops ||
519 req->rsk_ops == &chtls_rsk_opsv6) {
520 struct sock *child = req->sk;
522 *pprev = req->dl_next;
523 sk_acceptq_removed(listen_sk);
528 release_tcp_port(child);
529 reset_listen_child(child);
530 bh_unlock_sock(child);
534 pprev = &req->dl_next;
539 static int listen_hashfn(const struct sock *sk)
541 return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
544 static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
548 struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
551 int key = listen_hashfn(sk);
555 spin_lock(&cdev->listen_lock);
556 p->next = cdev->listen_hash_tab[key];
557 cdev->listen_hash_tab[key] = p;
558 spin_unlock(&cdev->listen_lock);
563 static int listen_hash_find(struct chtls_dev *cdev,
566 struct listen_info *p;
570 key = listen_hashfn(sk);
572 spin_lock(&cdev->listen_lock);
573 for (p = cdev->listen_hash_tab[key]; p; p = p->next)
578 spin_unlock(&cdev->listen_lock);
582 static int listen_hash_del(struct chtls_dev *cdev,
585 struct listen_info *p, **prev;
589 key = listen_hashfn(sk);
590 prev = &cdev->listen_hash_tab[key];
592 spin_lock(&cdev->listen_lock);
593 for (p = *prev; p; prev = &p->next, p = p->next)
600 spin_unlock(&cdev->listen_lock);
604 static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
606 struct request_sock *req;
607 struct chtls_sock *csk;
609 csk = rcu_dereference_sk_user_data(child);
610 req = csk->passive_reap_next;
612 reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
613 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
614 chtls_reqsk_free(req);
615 csk->passive_reap_next = NULL;
618 static void chtls_reset_synq(struct listen_ctx *listen_ctx)
620 struct sock *listen_sk = listen_ctx->lsk;
622 while (!skb_queue_empty(&listen_ctx->synq)) {
623 struct chtls_sock *csk =
624 container_of((struct synq *)__skb_dequeue
625 (&listen_ctx->synq), struct chtls_sock, synq);
626 struct sock *child = csk->sk;
628 cleanup_syn_rcv_conn(child, listen_sk);
632 release_tcp_port(child);
633 reset_listen_child(child);
634 bh_unlock_sock(child);
640 int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
642 struct net_device *ndev;
643 #if IS_ENABLED(CONFIG_IPV6)
644 bool clip_valid = false;
646 struct listen_ctx *ctx;
647 struct adapter *adap;
648 struct port_info *pi;
653 ndev = chtls_find_netdev(cdev, sk);
658 pi = netdev_priv(ndev);
660 if (!(adap->flags & CXGB4_FULL_INIT_DONE))
663 if (listen_hash_find(cdev, sk) >= 0) /* already have it */
666 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
670 __module_get(THIS_MODULE);
673 ctx->state = T4_LISTEN_START_PENDING;
674 skb_queue_head_init(&ctx->synq);
676 stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
681 if (!listen_hash_add(cdev, sk, stid))
684 if (sk->sk_family == PF_INET) {
685 ret = cxgb4_create_server(ndev, stid,
686 inet_sk(sk)->inet_rcv_saddr,
687 inet_sk(sk)->inet_sport, 0,
688 cdev->lldi->rxq_ids[0]);
689 #if IS_ENABLED(CONFIG_IPV6)
693 addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
694 if (addr_type != IPV6_ADDR_ANY) {
695 ret = cxgb4_clip_get(ndev, (const u32 *)
696 &sk->sk_v6_rcv_saddr, 1);
701 ret = cxgb4_create_server6(ndev, stid,
702 &sk->sk_v6_rcv_saddr,
703 inet_sk(sk)->inet_sport,
704 cdev->lldi->rxq_ids[0]);
708 ret = net_xmit_errno(ret);
713 #if IS_ENABLED(CONFIG_IPV6)
715 cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1);
717 listen_hash_del(cdev, sk);
719 cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
723 module_put(THIS_MODULE);
727 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
729 struct listen_ctx *listen_ctx;
732 stid = listen_hash_del(cdev, sk);
736 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
737 chtls_reset_synq(listen_ctx);
739 cxgb4_remove_server(cdev->lldi->ports[0], stid,
740 cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6);
742 #if IS_ENABLED(CONFIG_IPV6)
743 if (sk->sk_family == PF_INET6) {
744 struct net_device *ndev = chtls_find_netdev(cdev, sk);
747 addr_type = ipv6_addr_type((const struct in6_addr *)
748 &sk->sk_v6_rcv_saddr);
749 if (addr_type != IPV6_ADDR_ANY)
750 cxgb4_clip_release(ndev, (const u32 *)
751 &sk->sk_v6_rcv_saddr, 1);
754 chtls_disconnect_acceptq(sk);
757 static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
759 struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
760 unsigned int stid = GET_TID(rpl);
761 struct listen_ctx *listen_ctx;
763 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
765 return CPL_RET_BUF_DONE;
767 if (listen_ctx->state == T4_LISTEN_START_PENDING) {
768 listen_ctx->state = T4_LISTEN_STARTED;
769 return CPL_RET_BUF_DONE;
772 if (rpl->status != CPL_ERR_NONE) {
773 pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
776 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
777 sock_put(listen_ctx->lsk);
779 module_put(THIS_MODULE);
781 return CPL_RET_BUF_DONE;
784 static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
786 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
787 struct listen_ctx *listen_ctx;
792 data = lookup_stid(cdev->tids, stid);
793 listen_ctx = (struct listen_ctx *)data;
795 if (rpl->status != CPL_ERR_NONE) {
796 pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
799 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
800 sock_put(listen_ctx->lsk);
802 module_put(THIS_MODULE);
804 return CPL_RET_BUF_DONE;
807 static void chtls_purge_wr_queue(struct sock *sk)
811 while ((skb = dequeue_wr(sk)) != NULL)
815 static void chtls_release_resources(struct sock *sk)
817 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
818 struct chtls_dev *cdev = csk->cdev;
819 unsigned int tid = csk->tid;
820 struct tid_info *tids;
826 kfree_skb(csk->txdata_skb_cache);
827 csk->txdata_skb_cache = NULL;
829 if (csk->wr_credits != csk->wr_max_credits) {
830 chtls_purge_wr_queue(sk);
831 chtls_reset_wr_list(csk);
834 if (csk->l2t_entry) {
835 cxgb4_l2t_release(csk->l2t_entry);
836 csk->l2t_entry = NULL;
839 if (sk->sk_state != TCP_SYN_SENT) {
840 cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
845 static void chtls_conn_done(struct sock *sk)
847 if (sock_flag(sk, SOCK_DEAD))
848 chtls_purge_receive_queue(sk);
849 sk_wakeup_sleepers(sk, 0);
853 static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
856 * If the server is still open we clean up the child connection,
857 * otherwise the server already did the clean up as it was purging
858 * its SYN queue and the skb was just sitting in its backlog.
860 if (likely(parent->sk_state == TCP_LISTEN)) {
861 cleanup_syn_rcv_conn(child, parent);
862 /* Without the below call to sock_orphan,
863 * we leak the socket resource with syn_flood test
864 * as inet_csk_destroy_sock will not be called
865 * in tcp_done since SOCK_DEAD flag is not set.
866 * Kernel handles this differently where new socket is
867 * created only after 3 way handshake is done.
870 percpu_counter_inc((child)->sk_prot->orphan_count);
871 chtls_release_resources(child);
872 chtls_conn_done(child);
874 if (csk_flag(child, CSK_RST_ABORTED)) {
875 chtls_release_resources(child);
876 chtls_conn_done(child);
881 static void pass_open_abort(struct sock *child, struct sock *parent,
884 do_abort_syn_rcv(child, parent);
888 static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
890 pass_open_abort(skb->sk, lsk, skb);
893 static void chtls_pass_open_arp_failure(struct sock *sk,
896 const struct request_sock *oreq;
897 struct chtls_sock *csk;
898 struct chtls_dev *cdev;
902 csk = rcu_dereference_sk_user_data(sk);
906 * If the connection is being aborted due to the parent listening
907 * socket going away there's nothing to do, the ABORT_REQ will close
910 if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
915 oreq = csk->passive_reap_next;
916 data = lookup_stid(cdev->tids, oreq->ts_recent);
917 parent = ((struct listen_ctx *)data)->lsk;
919 bh_lock_sock(parent);
920 if (!sock_owned_by_user(parent)) {
921 pass_open_abort(sk, parent, skb);
923 BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
924 __sk_add_backlog(parent, skb);
926 bh_unlock_sock(parent);
929 static void chtls_accept_rpl_arp_failure(void *handle,
932 struct sock *sk = (struct sock *)handle;
935 process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
939 static unsigned int chtls_select_mss(const struct chtls_sock *csk,
941 struct cpl_pass_accept_req *req)
943 struct chtls_dev *cdev;
944 struct dst_entry *dst;
945 unsigned int tcpoptsz;
946 unsigned int iphdrsz;
947 unsigned int mtu_idx;
952 mss = ntohs(req->tcpopt.mss);
954 dst = __sk_dst_get(sk);
959 #if IS_ENABLED(CONFIG_IPV6)
960 if (sk->sk_family == AF_INET6)
961 iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
964 iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
965 if (req->tcpopt.tstamp)
966 tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
968 tp->advmss = dst_metric_advmss(dst);
969 if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
970 tp->advmss = USER_MSS(tp);
971 if (tp->advmss > pmtu - iphdrsz)
972 tp->advmss = pmtu - iphdrsz;
973 if (mss && tp->advmss > mss)
976 tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
978 tp->advmss - tcpoptsz,
980 tp->advmss -= iphdrsz;
982 inet_csk(sk)->icsk_pmtu_cookie = pmtu;
986 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
990 if (space > MAX_RCV_WND)
992 if (win_clamp && win_clamp < space)
996 while (wscale < 14 && (65535 << wscale) < space)
1002 static void chtls_pass_accept_rpl(struct sk_buff *skb,
1003 struct cpl_pass_accept_req *req,
1007 struct cpl_t5_pass_accept_rpl *rpl5;
1008 struct cxgb4_lld_info *lldi;
1009 const struct tcphdr *tcph;
1010 const struct tcp_sock *tp;
1011 struct chtls_sock *csk;
1019 csk = sk->sk_user_data;
1021 lldi = csk->cdev->lldi;
1022 len = roundup(sizeof(*rpl5), 16);
1024 rpl5 = __skb_put_zero(skb, len);
1025 INIT_TP_WR(rpl5, tid);
1027 OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1029 csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
1031 opt0 = TCAM_BYPASS_F |
1032 WND_SCALE_V(RCV_WSCALE(tp)) |
1033 MSS_IDX_V(csk->mtu_idx) |
1034 L2T_IDX_V(csk->l2t_entry->idx) |
1035 NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
1036 TX_CHAN_V(csk->tx_chan) |
1037 SMAC_SEL_V(csk->smac_idx) |
1038 DSCP_V(csk->tos >> 2) |
1039 ULP_MODE_V(ULP_MODE_TLS) |
1040 RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
1042 opt2 = RX_CHANNEL_V(0) |
1043 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1045 if (!is_t5(lldi->adapter_type))
1046 opt2 |= RX_FC_DISABLE_F;
1047 if (req->tcpopt.tstamp)
1048 opt2 |= TSTAMPS_EN_F;
1049 if (req->tcpopt.sack)
1051 hlen = ntohl(req->hdr_len);
1053 tcph = (struct tcphdr *)((u8 *)(req + 1) +
1054 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1055 if (tcph->ece && tcph->cwr)
1056 opt2 |= CCTRL_ECN_V(1);
1057 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1059 opt2 |= T5_OPT_2_VALID_F;
1060 opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
1061 rpl5->opt0 = cpu_to_be64(opt0);
1062 rpl5->opt2 = cpu_to_be32(opt2);
1063 rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1064 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
1065 t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
1066 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
1069 static void inet_inherit_port(struct inet_hashinfo *hash_info,
1070 struct sock *lsk, struct sock *newsk)
1073 __inet_inherit_port(lsk, newsk);
1077 static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1079 if (skb->protocol) {
1083 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
1087 static void chtls_set_tcp_window(struct chtls_sock *csk)
1089 struct net_device *ndev = csk->egress_dev;
1090 struct port_info *pi = netdev_priv(ndev);
1091 unsigned int linkspeed;
1094 linkspeed = pi->link_cfg.speed;
1095 scale = linkspeed / SPEED_10000;
1096 #define CHTLS_10G_RCVWIN (256 * 1024)
1097 csk->rcv_win = CHTLS_10G_RCVWIN;
1099 csk->rcv_win *= scale;
1100 #define CHTLS_10G_SNDWIN (256 * 1024)
1101 csk->snd_win = CHTLS_10G_SNDWIN;
1103 csk->snd_win *= scale;
1106 static struct sock *chtls_recv_sock(struct sock *lsk,
1107 struct request_sock *oreq,
1109 const struct cpl_pass_accept_req *req,
1110 struct chtls_dev *cdev)
1112 struct neighbour *n = NULL;
1113 struct inet_sock *newinet;
1114 const struct iphdr *iph;
1115 struct tls_context *ctx;
1116 struct net_device *ndev;
1117 struct chtls_sock *csk;
1118 struct dst_entry *dst;
1119 struct tcp_sock *tp;
1125 iph = (const struct iphdr *)network_hdr;
1126 newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
1130 if (lsk->sk_family == AF_INET) {
1131 dst = inet_csk_route_child_sock(lsk, newsk, oreq);
1135 n = dst_neigh_lookup(dst, &iph->saddr);
1136 #if IS_ENABLED(CONFIG_IPV6)
1138 const struct ipv6hdr *ip6h;
1141 ip6h = (const struct ipv6hdr *)network_hdr;
1142 memset(&fl6, 0, sizeof(fl6));
1143 fl6.flowi6_proto = IPPROTO_TCP;
1144 fl6.saddr = ip6h->daddr;
1145 fl6.daddr = ip6h->saddr;
1146 fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
1147 fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
1148 security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
1149 dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
1152 n = dst_neigh_lookup(dst, &ip6h->saddr);
1161 if (is_vlan_dev(ndev))
1162 ndev = vlan_dev_real_dev(ndev);
1164 port_id = cxgb4_port_idx(ndev);
1166 csk = chtls_sock_create(cdev);
1170 csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
1171 if (!csk->l2t_entry)
1174 newsk->sk_user_data = csk;
1175 newsk->sk_backlog_rcv = chtls_backlog_rcv;
1178 newinet = inet_sk(newsk);
1180 if (iph->version == 0x4) {
1181 newinet->inet_daddr = iph->saddr;
1182 newinet->inet_rcv_saddr = iph->daddr;
1183 newinet->inet_saddr = iph->daddr;
1184 #if IS_ENABLED(CONFIG_IPV6)
1186 struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk;
1187 struct inet_request_sock *treq = inet_rsk(oreq);
1188 struct ipv6_pinfo *newnp = inet6_sk(newsk);
1189 struct ipv6_pinfo *np = inet6_sk(lsk);
1191 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1192 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1193 newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
1194 newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
1195 inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
1196 newnp->ipv6_fl_list = NULL;
1197 newnp->pktoptions = NULL;
1198 newsk->sk_bound_dev_if = treq->ir_iif;
1199 newinet->inet_opt = NULL;
1200 newinet->inet_daddr = LOOPBACK4_IPV6;
1201 newinet->inet_saddr = LOOPBACK4_IPV6;
1205 oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1206 sk_setup_caps(newsk, dst);
1207 ctx = tls_get_ctx(lsk);
1208 newsk->sk_destruct = ctx->sk_destruct;
1210 csk->passive_reap_next = oreq;
1211 csk->tx_chan = cxgb4_port_chan(ndev);
1212 csk->port_id = port_id;
1213 csk->egress_dev = ndev;
1214 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1215 chtls_set_tcp_window(csk);
1216 tp->rcv_wnd = csk->rcv_win;
1217 csk->sndbuf = csk->snd_win;
1218 csk->ulp_mode = ULP_MODE_TLS;
1219 step = cdev->lldi->nrxq / cdev->lldi->nchan;
1220 csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
1221 rxq_idx = port_id * step;
1222 csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
1224 csk->sndbuf = newsk->sk_sndbuf;
1225 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1226 RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
1228 ipv4.sysctl_tcp_window_scaling,
1231 inet_inherit_port(&tcp_hashinfo, lsk, newsk);
1232 csk_set_flag(csk, CSK_CONN_INLINE);
1233 bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
1237 chtls_sock_release(&csk->kref);
1241 inet_csk_prepare_forced_close(newsk);
1244 chtls_reqsk_free(oreq);
1249 * Populate a TID_RELEASE WR. The skb must be already propely sized.
1251 static void mk_tid_release(struct sk_buff *skb,
1252 unsigned int chan, unsigned int tid)
1254 struct cpl_tid_release *req;
1257 len = roundup(sizeof(struct cpl_tid_release), 16);
1258 req = (struct cpl_tid_release *)__skb_put(skb, len);
1259 memset(req, 0, len);
1260 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1261 INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
1264 static int chtls_get_module(struct sock *sk)
1266 struct inet_connection_sock *icsk = inet_csk(sk);
1268 if (!try_module_get(icsk->icsk_ulp_ops->owner))
1274 static void chtls_pass_accept_request(struct sock *sk,
1275 struct sk_buff *skb)
1277 struct cpl_t5_pass_accept_rpl *rpl;
1278 struct cpl_pass_accept_req *req;
1279 struct listen_ctx *listen_ctx;
1280 struct vlan_ethhdr *vlan_eh;
1281 struct request_sock *oreq;
1282 struct sk_buff *reply_skb;
1283 struct chtls_sock *csk;
1284 struct chtls_dev *cdev;
1285 struct ipv6hdr *ip6h;
1286 struct tcphdr *tcph;
1295 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1299 req = cplhdr(skb) + RSS_HDR;
1301 cdev = BLOG_SKB_CB(skb)->cdev;
1302 newsk = lookup_tid(cdev->tids, tid);
1303 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1305 pr_info("tid (%d) already in use\n", tid);
1309 len = roundup(sizeof(*rpl), 16);
1310 reply_skb = alloc_skb(len, GFP_ATOMIC);
1312 cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
1317 if (sk->sk_state != TCP_LISTEN)
1320 if (inet_csk_reqsk_queue_is_full(sk))
1323 if (sk_acceptq_is_full(sk))
1327 eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
1328 if (eth_hdr_len == ETH_HLEN) {
1329 eh = (struct ethhdr *)(req + 1);
1330 iph = (struct iphdr *)(eh + 1);
1331 ip6h = (struct ipv6hdr *)(eh + 1);
1332 network_hdr = (void *)(eh + 1);
1334 vlan_eh = (struct vlan_ethhdr *)(req + 1);
1335 iph = (struct iphdr *)(vlan_eh + 1);
1336 ip6h = (struct ipv6hdr *)(vlan_eh + 1);
1337 network_hdr = (void *)(vlan_eh + 1);
1340 if (iph->version == 0x4) {
1341 tcph = (struct tcphdr *)(iph + 1);
1342 skb_set_network_header(skb, (void *)iph - (void *)req);
1343 oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
1345 tcph = (struct tcphdr *)(ip6h + 1);
1346 skb_set_network_header(skb, (void *)ip6h - (void *)req);
1347 oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false);
1353 oreq->rsk_rcv_wnd = 0;
1354 oreq->rsk_window_clamp = 0;
1355 oreq->syncookie = 0;
1357 oreq->ts_recent = 0;
1359 tcp_rsk(oreq)->tfo_listener = false;
1360 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
1361 chtls_set_req_port(oreq, tcph->source, tcph->dest);
1362 if (iph->version == 0x4) {
1363 chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
1364 ip_dsfield = ipv4_get_dsfield(iph);
1365 #if IS_ENABLED(CONFIG_IPV6)
1367 inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
1368 inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
1369 ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb));
1372 if (req->tcpopt.wsf <= 14 &&
1373 sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
1374 inet_rsk(oreq)->wscale_ok = 1;
1375 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
1377 inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
1378 th_ecn = tcph->ece && tcph->cwr;
1380 ect = !INET_ECN_is_not_ect(ip_dsfield);
1381 ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
1382 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
1383 inet_rsk(oreq)->ecn_ok = 1;
1386 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
1390 if (chtls_get_module(newsk))
1392 inet_csk_reqsk_queue_added(sk);
1393 reply_skb->sk = newsk;
1394 chtls_install_cpl_ops(newsk);
1395 cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
1396 csk = rcu_dereference_sk_user_data(newsk);
1397 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
1398 csk->listen_ctx = listen_ctx;
1399 __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
1400 chtls_pass_accept_rpl(reply_skb, req, tid);
1405 chtls_reqsk_free(oreq);
1407 mk_tid_release(reply_skb, 0, tid);
1408 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1413 * Handle a CPL_PASS_ACCEPT_REQ message.
1415 static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
1417 struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
1418 struct listen_ctx *ctx;
1424 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1427 data = lookup_stid(cdev->tids, stid);
1431 ctx = (struct listen_ctx *)data;
1434 if (unlikely(tid_out_of_range(cdev->tids, tid))) {
1435 pr_info("passive open TID %u too large\n", tid);
1439 BLOG_SKB_CB(skb)->cdev = cdev;
1440 process_cpl_msg(chtls_pass_accept_request, lsk, skb);
1445 * Completes some final bits of initialization for just established connections
1446 * and changes their state to TCP_ESTABLISHED.
1448 * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
1450 static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
1452 struct tcp_sock *tp = tcp_sk(sk);
1454 tp->pushed_seq = snd_isn;
1455 tp->write_seq = snd_isn;
1456 tp->snd_nxt = snd_isn;
1457 tp->snd_una = snd_isn;
1458 inet_sk(sk)->inet_id = prandom_u32();
1459 assign_rxopt(sk, opt);
1461 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
1462 tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
1465 tcp_set_state(sk, TCP_ESTABLISHED);
1468 static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
1470 struct sk_buff *abort_skb;
1472 abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
1474 chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
1477 static struct sock *reap_list;
1478 static DEFINE_SPINLOCK(reap_list_lock);
1481 * Process the reap list.
1483 DECLARE_TASK_FUNC(process_reap_list, task_param)
1485 spin_lock_bh(&reap_list_lock);
1487 struct sock *sk = reap_list;
1488 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1490 reap_list = csk->passive_reap_next;
1491 csk->passive_reap_next = NULL;
1492 spin_unlock(&reap_list_lock);
1496 chtls_abort_conn(sk, NULL);
1498 if (sk->sk_state == TCP_CLOSE)
1499 inet_csk_destroy_sock(sk);
1502 spin_lock(&reap_list_lock);
1504 spin_unlock_bh(&reap_list_lock);
1507 static DECLARE_WORK(reap_task, process_reap_list);
1509 static void add_to_reap_list(struct sock *sk)
1511 struct chtls_sock *csk = sk->sk_user_data;
1514 release_tcp_port(sk); /* release the port immediately */
1516 spin_lock(&reap_list_lock);
1517 csk->passive_reap_next = reap_list;
1519 if (!csk->passive_reap_next)
1520 schedule_work(&reap_task);
1521 spin_unlock(&reap_list_lock);
1525 static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
1526 struct chtls_dev *cdev)
1528 struct request_sock *oreq;
1529 struct chtls_sock *csk;
1531 if (lsk->sk_state != TCP_LISTEN)
1534 csk = child->sk_user_data;
1535 oreq = csk->passive_reap_next;
1536 csk->passive_reap_next = NULL;
1538 reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
1539 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
1541 if (sk_acceptq_is_full(lsk)) {
1542 chtls_reqsk_free(oreq);
1543 add_to_reap_list(child);
1545 refcount_set(&oreq->rsk_refcnt, 1);
1546 inet_csk_reqsk_queue_add(lsk, oreq, child);
1547 lsk->sk_data_ready(lsk);
1551 static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
1553 struct sock *child = skb->sk;
1556 add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
1560 static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
1562 struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
1563 struct chtls_sock *csk;
1564 struct sock *lsk, *sk;
1567 hwtid = GET_TID(req);
1568 sk = lookup_tid(cdev->tids, hwtid);
1570 return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
1573 if (unlikely(sock_owned_by_user(sk))) {
1579 csk = sk->sk_user_data;
1580 csk->wr_max_credits = 64;
1581 csk->wr_credits = 64;
1582 csk->wr_unacked = 0;
1583 make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
1584 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1585 sk->sk_state_change(sk);
1586 if (unlikely(sk->sk_socket))
1587 sk_wake_async(sk, 0, POLL_OUT);
1589 data = lookup_stid(cdev->tids, stid);
1590 lsk = ((struct listen_ctx *)data)->lsk;
1593 if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
1594 /* removed from synq */
1595 bh_unlock_sock(lsk);
1600 if (likely(!sock_owned_by_user(lsk))) {
1602 add_pass_open_to_parent(sk, lsk, cdev);
1605 BLOG_SKB_CB(skb)->cdev = cdev;
1606 BLOG_SKB_CB(skb)->backlog_rcv =
1607 bl_add_pass_open_to_parent;
1608 __sk_add_backlog(lsk, skb);
1610 bh_unlock_sock(lsk);
1618 * Handle receipt of an urgent pointer.
1620 static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
1622 struct tcp_sock *tp = tcp_sk(sk);
1625 if (tp->urg_data && !after(urg_seq, tp->urg_seq))
1626 return; /* duplicate pointer */
1629 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
1630 !sock_flag(sk, SOCK_URGINLINE) &&
1631 tp->copied_seq != tp->rcv_nxt) {
1632 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1635 if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
1636 chtls_free_skb(sk, skb);
1639 tp->urg_data = TCP_URG_NOTYET;
1640 tp->urg_seq = urg_seq;
1643 static void check_sk_callbacks(struct chtls_sock *csk)
1645 struct sock *sk = csk->sk;
1647 if (unlikely(sk->sk_user_data &&
1648 !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
1649 csk_set_flag(csk, CSK_CALLBACKS_CHKD);
1653 * Handles Rx data that arrives in a state where the socket isn't accepting
1656 static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
1658 if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
1659 chtls_abort_conn(sk, skb);
1664 static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
1666 struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
1667 struct chtls_sock *csk;
1668 struct tcp_sock *tp;
1670 csk = rcu_dereference_sk_user_data(sk);
1673 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1674 handle_excess_rx(sk, skb);
1678 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1679 ULP_SKB_CB(skb)->psh = hdr->psh;
1680 skb_ulp_mode(skb) = ULP_MODE_NONE;
1682 skb_reset_transport_header(skb);
1683 __skb_pull(skb, sizeof(*hdr) + RSS_HDR);
1685 __skb_trim(skb, ntohs(hdr->len));
1687 if (unlikely(hdr->urg))
1688 handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
1689 if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
1690 tp->urg_seq - tp->rcv_nxt < skb->len))
1691 tp->urg_data = TCP_URG_VALID |
1692 skb->data[tp->urg_seq - tp->rcv_nxt];
1694 if (unlikely(hdr->dack_mode != csk->delack_mode)) {
1695 csk->delack_mode = hdr->dack_mode;
1696 csk->delack_seq = tp->rcv_nxt;
1699 tcp_hdr(skb)->fin = 0;
1700 tp->rcv_nxt += skb->len;
1702 __skb_queue_tail(&sk->sk_receive_queue, skb);
1704 if (!sock_flag(sk, SOCK_DEAD)) {
1705 check_sk_callbacks(csk);
1706 sk->sk_data_ready(sk);
1710 static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
1712 struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
1713 unsigned int hwtid = GET_TID(req);
1716 sk = lookup_tid(cdev->tids, hwtid);
1717 if (unlikely(!sk)) {
1718 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1721 skb_dst_set(skb, NULL);
1722 process_cpl_msg(chtls_recv_data, sk, skb);
1726 static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
1728 struct cpl_tls_data *hdr = cplhdr(skb);
1729 struct chtls_sock *csk;
1730 struct chtls_hws *tlsk;
1731 struct tcp_sock *tp;
1733 csk = rcu_dereference_sk_user_data(sk);
1734 tlsk = &csk->tlshws;
1737 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1738 handle_excess_rx(sk, skb);
1742 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1743 ULP_SKB_CB(skb)->flags = 0;
1744 skb_ulp_mode(skb) = ULP_MODE_TLS;
1746 skb_reset_transport_header(skb);
1747 __skb_pull(skb, sizeof(*hdr));
1750 CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
1752 if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
1753 tp->rcv_nxt < skb->len))
1754 tp->urg_data = TCP_URG_VALID |
1755 skb->data[tp->urg_seq - tp->rcv_nxt];
1757 tcp_hdr(skb)->fin = 0;
1758 tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
1759 __skb_queue_tail(&tlsk->sk_recv_queue, skb);
1762 static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
1764 struct cpl_tls_data *req = cplhdr(skb);
1765 unsigned int hwtid = GET_TID(req);
1768 sk = lookup_tid(cdev->tids, hwtid);
1769 if (unlikely(!sk)) {
1770 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1773 skb_dst_set(skb, NULL);
1774 process_cpl_msg(chtls_recv_pdu, sk, skb);
1778 static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
1780 struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
1782 skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
1783 tls_cmp_hdr->length = ntohs((__force __be16)nlen);
1786 static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
1788 struct tlsrx_cmp_hdr *tls_hdr_pkt;
1789 struct cpl_rx_tls_cmp *cmp_cpl;
1790 struct sk_buff *skb_rec;
1791 struct chtls_sock *csk;
1792 struct chtls_hws *tlsk;
1793 struct tcp_sock *tp;
1795 cmp_cpl = cplhdr(skb);
1796 csk = rcu_dereference_sk_user_data(sk);
1797 tlsk = &csk->tlshws;
1800 ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
1801 ULP_SKB_CB(skb)->flags = 0;
1803 skb_reset_transport_header(skb);
1804 __skb_pull(skb, sizeof(*cmp_cpl));
1805 tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
1806 if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
1807 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1809 __skb_trim(skb, TLS_HEADER_LENGTH);
1812 CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
1814 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
1815 skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
1817 __skb_queue_tail(&sk->sk_receive_queue, skb);
1819 chtls_set_hdrlen(skb, tlsk->pldlen);
1821 __skb_queue_tail(&sk->sk_receive_queue, skb);
1822 __skb_queue_tail(&sk->sk_receive_queue, skb_rec);
1825 if (!sock_flag(sk, SOCK_DEAD)) {
1826 check_sk_callbacks(csk);
1827 sk->sk_data_ready(sk);
1831 static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
1833 struct cpl_rx_tls_cmp *req = cplhdr(skb);
1834 unsigned int hwtid = GET_TID(req);
1837 sk = lookup_tid(cdev->tids, hwtid);
1838 if (unlikely(!sk)) {
1839 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1842 skb_dst_set(skb, NULL);
1843 process_cpl_msg(chtls_rx_hdr, sk, skb);
1848 static void chtls_timewait(struct sock *sk)
1850 struct tcp_sock *tp = tcp_sk(sk);
1853 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
1855 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
1858 static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
1860 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1862 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1865 sk->sk_shutdown |= RCV_SHUTDOWN;
1866 sock_set_flag(sk, SOCK_DONE);
1868 switch (sk->sk_state) {
1870 case TCP_ESTABLISHED:
1871 tcp_set_state(sk, TCP_CLOSE_WAIT);
1874 tcp_set_state(sk, TCP_CLOSING);
1877 chtls_release_resources(sk);
1878 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1879 chtls_conn_done(sk);
1884 pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
1887 if (!sock_flag(sk, SOCK_DEAD)) {
1888 sk->sk_state_change(sk);
1889 /* Do not send POLL_HUP for half duplex close. */
1891 if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1892 sk->sk_state == TCP_CLOSE)
1893 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
1895 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1901 static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
1903 struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
1904 struct chtls_sock *csk;
1905 struct tcp_sock *tp;
1907 csk = rcu_dereference_sk_user_data(sk);
1909 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1914 tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
1916 switch (sk->sk_state) {
1918 chtls_release_resources(sk);
1919 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1920 chtls_conn_done(sk);
1925 chtls_release_resources(sk);
1926 chtls_conn_done(sk);
1929 tcp_set_state(sk, TCP_FIN_WAIT2);
1930 sk->sk_shutdown |= SEND_SHUTDOWN;
1932 if (!sock_flag(sk, SOCK_DEAD))
1933 sk->sk_state_change(sk);
1934 else if (tcp_sk(sk)->linger2 < 0 &&
1935 !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
1936 chtls_abort_conn(sk, skb);
1939 pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
1945 static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
1946 size_t len, gfp_t gfp)
1948 if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
1949 WARN_ONCE(skb->len < len, "skb alloc error");
1950 __skb_trim(skb, len);
1953 skb = alloc_skb(len, gfp);
1955 __skb_put(skb, len);
1960 static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
1963 struct cpl_abort_rpl *rpl = cplhdr(skb);
1965 INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
1969 static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
1971 struct cpl_abort_req_rss *req = cplhdr(skb);
1972 struct sk_buff *reply_skb;
1974 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1975 GFP_KERNEL | __GFP_NOFAIL);
1976 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
1977 set_abort_rpl_wr(reply_skb, GET_TID(req),
1978 (req->status & CPL_ABORT_NO_RST));
1979 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
1980 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1985 * Add an skb to the deferred skb queue for processing from process context.
1987 static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
1988 defer_handler_t handler)
1990 DEFERRED_SKB_CB(skb)->handler = handler;
1991 spin_lock_bh(&cdev->deferq.lock);
1992 __skb_queue_tail(&cdev->deferq, skb);
1993 if (skb_queue_len(&cdev->deferq) == 1)
1994 schedule_work(&cdev->deferq_task);
1995 spin_unlock_bh(&cdev->deferq.lock);
1998 static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1999 struct chtls_dev *cdev, int status, int queue)
2001 struct cpl_abort_req_rss *req = cplhdr(skb);
2002 struct sk_buff *reply_skb;
2003 struct chtls_sock *csk;
2005 csk = rcu_dereference_sk_user_data(sk);
2007 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
2011 req->status = (queue << 1);
2012 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
2016 set_abort_rpl_wr(reply_skb, GET_TID(req), status);
2019 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
2020 if (csk_conn_inline(csk)) {
2021 struct l2t_entry *e = csk->l2t_entry;
2023 if (e && sk->sk_state != TCP_SYN_RECV) {
2024 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
2028 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
2031 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
2032 struct chtls_dev *cdev,
2033 int status, int queue)
2035 struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
2036 struct sk_buff *reply_skb;
2037 struct chtls_sock *csk;
2040 csk = rcu_dereference_sk_user_data(sk);
2043 reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
2045 req->status = (queue << 1) | status;
2046 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
2050 set_abort_rpl_wr(reply_skb, tid, status);
2052 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
2053 if (csk_conn_inline(csk)) {
2054 struct l2t_entry *e = csk->l2t_entry;
2056 if (e && sk->sk_state != TCP_SYN_RECV) {
2057 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
2061 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
2065 * This is run from a listener's backlog to abort a child connection in
2066 * SYN_RCV state (i.e., one on the listener's SYN queue).
2068 static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
2070 struct chtls_sock *csk;
2075 csk = rcu_dereference_sk_user_data(child);
2076 queue = csk->txq_idx;
2079 do_abort_syn_rcv(child, lsk);
2080 send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
2081 CPL_ABORT_NO_RST, queue);
2084 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
2086 const struct request_sock *oreq;
2087 struct listen_ctx *listen_ctx;
2088 struct chtls_sock *csk;
2089 struct chtls_dev *cdev;
2093 csk = sk->sk_user_data;
2094 oreq = csk->passive_reap_next;
2100 ctx = lookup_stid(cdev->tids, oreq->ts_recent);
2104 listen_ctx = (struct listen_ctx *)ctx;
2105 psk = listen_ctx->lsk;
2108 if (!sock_owned_by_user(psk)) {
2109 int queue = csk->txq_idx;
2111 do_abort_syn_rcv(sk, psk);
2112 send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
2115 BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
2116 __sk_add_backlog(psk, skb);
2118 bh_unlock_sock(psk);
2122 static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
2124 const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
2125 struct chtls_sock *csk = sk->sk_user_data;
2126 int rst_status = CPL_ABORT_NO_RST;
2127 int queue = csk->txq_idx;
2129 if (is_neg_adv(req->status)) {
2130 if (sk->sk_state == TCP_SYN_RECV)
2131 chtls_set_tcb_tflag(sk, 0, 0);
2137 csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
2139 if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
2140 !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
2141 struct tcp_sock *tp = tcp_sk(sk);
2143 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
2144 WARN_ONCE(1, "send_tx_flowc error");
2145 csk_set_flag(csk, CSK_TX_DATA_SENT);
2148 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
2150 if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
2151 sk->sk_err = ETIMEDOUT;
2153 if (!sock_flag(sk, SOCK_DEAD))
2154 sk->sk_error_report(sk);
2156 if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
2159 chtls_release_resources(sk);
2160 chtls_conn_done(sk);
2163 chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
2167 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
2169 struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
2170 struct chtls_sock *csk;
2171 struct chtls_dev *cdev;
2173 csk = rcu_dereference_sk_user_data(sk);
2176 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
2177 csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
2178 if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
2179 if (sk->sk_state == TCP_SYN_SENT) {
2180 cxgb4_remove_tid(cdev->tids,
2186 chtls_release_resources(sk);
2187 chtls_conn_done(sk);
2193 static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
2195 struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
2196 void (*fn)(struct sock *sk, struct sk_buff *skb);
2197 unsigned int hwtid = GET_TID(req);
2198 struct chtls_sock *csk;
2202 opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
2204 sk = lookup_tid(cdev->tids, hwtid);
2208 csk = sk->sk_user_data;
2211 case CPL_PEER_CLOSE:
2212 fn = chtls_peer_close;
2214 case CPL_CLOSE_CON_RPL:
2215 fn = chtls_close_con_rpl;
2217 case CPL_ABORT_REQ_RSS:
2219 * Save the offload device in the skb, we may process this
2220 * message after the socket has closed.
2222 BLOG_SKB_CB(skb)->cdev = csk->cdev;
2223 fn = chtls_abort_req_rss;
2225 case CPL_ABORT_RPL_RSS:
2226 fn = chtls_abort_rpl_rss;
2232 process_cpl_msg(fn, sk, skb);
2240 static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
2242 struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
2243 struct chtls_sock *csk = sk->sk_user_data;
2244 struct tcp_sock *tp = tcp_sk(sk);
2245 u32 credits = hdr->credits;
2248 snd_una = ntohl(hdr->snd_una);
2249 csk->wr_credits += credits;
2251 if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
2252 csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
2255 struct sk_buff *pskb = csk->wr_skb_head;
2258 if (unlikely(!pskb)) {
2259 if (csk->wr_nondata)
2260 csk->wr_nondata -= credits;
2263 csum = (__force u32)pskb->csum;
2264 if (unlikely(credits < csum)) {
2265 pskb->csum = (__force __wsum)(csum - credits);
2272 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
2273 if (unlikely(before(snd_una, tp->snd_una))) {
2278 if (tp->snd_una != snd_una) {
2279 tp->snd_una = snd_una;
2280 tp->rcv_tstamp = tcp_time_stamp(tp);
2281 if (tp->snd_una == tp->snd_nxt &&
2282 !csk_flag_nochk(csk, CSK_TX_FAILOVER))
2283 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2287 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
2288 unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
2290 csk->wr_credits -= fclen16;
2291 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2292 csk_reset_flag(csk, CSK_TX_FAILOVER);
2294 if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
2295 sk->sk_write_space(sk);
2300 static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
2302 struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
2303 unsigned int hwtid = GET_TID(rpl);
2306 sk = lookup_tid(cdev->tids, hwtid);
2307 if (unlikely(!sk)) {
2308 pr_err("can't find conn. for hwtid %u.\n", hwtid);
2311 process_cpl_msg(chtls_rx_ack, sk, skb);
2316 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
2317 [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
2318 [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
2319 [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req,
2320 [CPL_PASS_ESTABLISH] = chtls_pass_establish,
2321 [CPL_RX_DATA] = chtls_rx_data,
2322 [CPL_TLS_DATA] = chtls_rx_pdu,
2323 [CPL_RX_TLS_CMP] = chtls_rx_cmp,
2324 [CPL_PEER_CLOSE] = chtls_conn_cpl,
2325 [CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
2326 [CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
2327 [CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
2328 [CPL_FW4_ACK] = chtls_wr_ack,