2 * Copyright (c) 2018 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Written by: Atul Gupta (atul.gupta@chelsio.com)
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/workqueue.h>
14 #include <linux/skbuff.h>
15 #include <linux/timer.h>
16 #include <linux/notifier.h>
17 #include <linux/inetdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/sched/signal.h>
21 #include <linux/kallsyms.h>
22 #include <linux/kprobes.h>
23 #include <linux/if_vlan.h>
24 #include <net/inet_common.h>
32 * State transitions and actions for close. Note that if we are in SYN_SENT
33 * we remain in that state as we cannot control a connection while it's in
34 * SYN_SENT; such connections are allowed to establish and are then aborted.
36 static unsigned char new_state[16] = {
37 /* current state: new state: action: */
38 /* (Invalid) */ TCP_CLOSE,
39 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
40 /* TCP_SYN_SENT */ TCP_SYN_SENT,
41 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
42 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
43 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
44 /* TCP_TIME_WAIT */ TCP_CLOSE,
45 /* TCP_CLOSE */ TCP_CLOSE,
46 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
47 /* TCP_LAST_ACK */ TCP_LAST_ACK,
48 /* TCP_LISTEN */ TCP_CLOSE,
49 /* TCP_CLOSING */ TCP_CLOSING,
52 static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
54 struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
59 csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
60 if (!csk->txdata_skb_cache) {
65 kref_init(&csk->kref);
67 skb_queue_head_init(&csk->txq);
68 csk->wr_skb_head = NULL;
69 csk->wr_skb_tail = NULL;
72 csk->tlshws.txkey = -1;
73 csk->tlshws.rxkey = -1;
74 csk->tlshws.mfs = TLS_MFS;
75 skb_queue_head_init(&csk->tlshws.sk_recv_queue);
79 static void chtls_sock_release(struct kref *ref)
81 struct chtls_sock *csk =
82 container_of(ref, struct chtls_sock, kref);
87 static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev,
90 struct net_device *ndev = cdev->ports[0];
92 if (likely(!inet_sk(sk)->inet_rcv_saddr))
95 ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
99 if (is_vlan_dev(ndev))
100 return vlan_dev_real_dev(ndev);
104 static void assign_rxopt(struct sock *sk, unsigned int opt)
106 const struct chtls_dev *cdev;
107 struct chtls_sock *csk;
110 csk = rcu_dereference_sk_user_data(sk);
114 tp->tcp_header_len = sizeof(struct tcphdr);
115 tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
116 tp->mss_cache = tp->rx_opt.mss_clamp;
117 tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt);
118 tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt);
119 tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt);
120 SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt);
121 if (!tp->rx_opt.wscale_ok)
122 tp->rx_opt.rcv_wscale = 0;
123 if (tp->rx_opt.tstamp_ok) {
124 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
125 tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
126 } else if (csk->opt2 & TSTAMPS_EN_F) {
127 csk->opt2 &= ~TSTAMPS_EN_F;
128 csk->mtu_idx = TCPOPT_MSS_G(opt);
132 static void chtls_purge_receive_queue(struct sock *sk)
136 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
137 skb_dst_set(skb, (void *)NULL);
142 static void chtls_purge_write_queue(struct sock *sk)
144 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
147 while ((skb = __skb_dequeue(&csk->txq))) {
148 sk->sk_wmem_queued -= skb->truesize;
153 static void chtls_purge_recv_queue(struct sock *sk)
155 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
156 struct chtls_hws *tlsk = &csk->tlshws;
159 while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
160 skb_dst_set(skb, NULL);
165 static void abort_arp_failure(void *handle, struct sk_buff *skb)
167 struct cpl_abort_req *req = cplhdr(skb);
168 struct chtls_dev *cdev;
170 cdev = (struct chtls_dev *)handle;
171 req->cmd = CPL_ABORT_NO_RST;
172 cxgb4_ofld_send(cdev->lldi->ports[0], skb);
175 static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
177 if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
179 refcount_add(2, &skb->users);
181 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
186 static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
188 struct cpl_abort_req *req;
189 struct chtls_sock *csk;
192 csk = rcu_dereference_sk_user_data(sk);
196 skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
198 req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
199 INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
200 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
201 req->rsvd0 = htonl(tp->snd_nxt);
202 req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
204 t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
205 send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
208 static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
210 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
212 if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
214 if (sk->sk_state == TCP_SYN_RECV)
215 csk_set_flag(csk, CSK_RST_ABORTED);
219 if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
220 struct tcp_sock *tp = tcp_sk(sk);
222 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
223 WARN_ONCE(1, "send tx flowc error");
224 csk_set_flag(csk, CSK_TX_DATA_SENT);
227 csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
228 chtls_purge_write_queue(sk);
230 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
231 if (sk->sk_state != TCP_SYN_RECV)
232 chtls_send_abort(sk, mode, skb);
241 static void release_tcp_port(struct sock *sk)
243 if (inet_csk(sk)->icsk_bind_hash)
247 static void tcp_uncork(struct sock *sk)
249 struct tcp_sock *tp = tcp_sk(sk);
251 if (tp->nonagle & TCP_NAGLE_CORK) {
252 tp->nonagle &= ~TCP_NAGLE_CORK;
253 chtls_tcp_push(sk, 0);
257 static void chtls_close_conn(struct sock *sk)
259 struct cpl_close_con_req *req;
260 struct chtls_sock *csk;
265 len = roundup(sizeof(struct cpl_close_con_req), 16);
266 csk = rcu_dereference_sk_user_data(sk);
269 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
270 req = (struct cpl_close_con_req *)__skb_put(skb, len);
272 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
273 FW_WR_IMMDLEN_V(sizeof(*req) -
275 req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
276 FW_WR_FLOWID_V(tid));
278 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
281 skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
282 if (sk->sk_state != TCP_SYN_SENT)
283 chtls_push_frames(csk, 1);
287 * Perform a state transition during close and return the actions indicated
288 * for the transition. Do not make this function inline, the main reason
289 * it exists at all is to avoid multiple inlining of tcp_set_state.
291 static int make_close_transition(struct sock *sk)
293 int next = (int)new_state[sk->sk_state];
295 tcp_set_state(sk, next & TCP_STATE_MASK);
296 return next & TCP_ACTION_FIN;
299 void chtls_close(struct sock *sk, long timeout)
301 int data_lost, prev_state;
302 struct chtls_sock *csk;
304 csk = rcu_dereference_sk_user_data(sk);
307 sk->sk_shutdown |= SHUTDOWN_MASK;
309 data_lost = skb_queue_len(&sk->sk_receive_queue);
310 data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
311 chtls_purge_recv_queue(sk);
312 chtls_purge_receive_queue(sk);
314 if (sk->sk_state == TCP_CLOSE) {
316 } else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
317 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
318 release_tcp_port(sk);
320 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
321 sk->sk_prot->disconnect(sk, 0);
322 } else if (make_close_transition(sk)) {
323 chtls_close_conn(sk);
327 sk_stream_wait_close(sk, timeout);
330 prev_state = sk->sk_state;
339 if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
342 if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
343 !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
346 skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
348 chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
351 if (sk->sk_state == TCP_CLOSE)
352 inet_csk_destroy_sock(sk);
361 * Wait until a socket enters on of the given states.
363 static int wait_for_states(struct sock *sk, unsigned int states)
365 DECLARE_WAITQUEUE(wait, current);
366 struct socket_wq _sk_wq;
373 * We want this to work even when there's no associated struct socket.
374 * In that case we provide a temporary wait_queue_head_t.
377 init_waitqueue_head(&_sk_wq.wait);
378 _sk_wq.fasync_list = NULL;
379 init_rcu_head_on_stack(&_sk_wq.rcu);
380 RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
383 add_wait_queue(sk_sleep(sk), &wait);
384 while (!sk_in_state(sk, states)) {
385 if (!current_timeo) {
389 if (signal_pending(current)) {
390 err = sock_intr_errno(current_timeo);
393 set_current_state(TASK_UNINTERRUPTIBLE);
395 if (!sk_in_state(sk, states))
396 current_timeo = schedule_timeout(current_timeo);
397 __set_current_state(TASK_RUNNING);
400 remove_wait_queue(sk_sleep(sk), &wait);
402 if (rcu_dereference(sk->sk_wq) == &_sk_wq)
407 int chtls_disconnect(struct sock *sk, int flags)
413 chtls_purge_recv_queue(sk);
414 chtls_purge_receive_queue(sk);
415 chtls_purge_write_queue(sk);
417 if (sk->sk_state != TCP_CLOSE) {
418 sk->sk_err = ECONNRESET;
419 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
420 err = wait_for_states(sk, TCPF_CLOSE);
424 chtls_purge_recv_queue(sk);
425 chtls_purge_receive_queue(sk);
426 tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
427 return tcp_disconnect(sk, flags);
430 #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
431 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
432 void chtls_shutdown(struct sock *sk, int how)
434 if ((how & SEND_SHUTDOWN) &&
435 sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
436 make_close_transition(sk))
437 chtls_close_conn(sk);
440 void chtls_destroy_sock(struct sock *sk)
442 struct chtls_sock *csk;
444 csk = rcu_dereference_sk_user_data(sk);
445 chtls_purge_recv_queue(sk);
446 csk->ulp_mode = ULP_MODE_NONE;
447 chtls_purge_write_queue(sk);
449 kref_put(&csk->kref, chtls_sock_release);
450 sk->sk_prot = &tcp_prot;
451 sk->sk_prot->destroy(sk);
454 static void reset_listen_child(struct sock *child)
456 struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
459 skb = alloc_ctrl_skb(csk->txdata_skb_cache,
460 sizeof(struct cpl_abort_req));
462 chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
464 INC_ORPHAN_COUNT(child);
465 if (child->sk_state == TCP_CLOSE)
466 inet_csk_destroy_sock(child);
469 static void chtls_disconnect_acceptq(struct sock *listen_sk)
471 struct request_sock **pprev;
473 pprev = ACCEPT_QUEUE(listen_sk);
475 struct request_sock *req = *pprev;
477 if (req->rsk_ops == &chtls_rsk_ops) {
478 struct sock *child = req->sk;
480 *pprev = req->dl_next;
481 sk_acceptq_removed(listen_sk);
486 release_tcp_port(child);
487 reset_listen_child(child);
488 bh_unlock_sock(child);
492 pprev = &req->dl_next;
497 static int listen_hashfn(const struct sock *sk)
499 return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
502 static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
506 struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
509 int key = listen_hashfn(sk);
513 spin_lock(&cdev->listen_lock);
514 p->next = cdev->listen_hash_tab[key];
515 cdev->listen_hash_tab[key] = p;
516 spin_unlock(&cdev->listen_lock);
521 static int listen_hash_find(struct chtls_dev *cdev,
524 struct listen_info *p;
528 key = listen_hashfn(sk);
530 spin_lock(&cdev->listen_lock);
531 for (p = cdev->listen_hash_tab[key]; p; p = p->next)
536 spin_unlock(&cdev->listen_lock);
540 static int listen_hash_del(struct chtls_dev *cdev,
543 struct listen_info *p, **prev;
547 key = listen_hashfn(sk);
548 prev = &cdev->listen_hash_tab[key];
550 spin_lock(&cdev->listen_lock);
551 for (p = *prev; p; prev = &p->next, p = p->next)
558 spin_unlock(&cdev->listen_lock);
562 static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
564 struct request_sock *req;
565 struct chtls_sock *csk;
567 csk = rcu_dereference_sk_user_data(child);
568 req = csk->passive_reap_next;
570 reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
571 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
572 chtls_reqsk_free(req);
573 csk->passive_reap_next = NULL;
576 static void chtls_reset_synq(struct listen_ctx *listen_ctx)
578 struct sock *listen_sk = listen_ctx->lsk;
580 while (!skb_queue_empty(&listen_ctx->synq)) {
581 struct chtls_sock *csk =
582 container_of((struct synq *)__skb_dequeue
583 (&listen_ctx->synq), struct chtls_sock, synq);
584 struct sock *child = csk->sk;
586 cleanup_syn_rcv_conn(child, listen_sk);
590 release_tcp_port(child);
591 reset_listen_child(child);
592 bh_unlock_sock(child);
598 int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
600 struct net_device *ndev;
601 struct listen_ctx *ctx;
602 struct adapter *adap;
603 struct port_info *pi;
607 if (sk->sk_family != PF_INET)
611 ndev = chtls_ipv4_netdev(cdev, sk);
616 pi = netdev_priv(ndev);
618 if (!(adap->flags & FULL_INIT_DONE))
621 if (listen_hash_find(cdev, sk) >= 0) /* already have it */
624 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
628 __module_get(THIS_MODULE);
631 ctx->state = T4_LISTEN_START_PENDING;
632 skb_queue_head_init(&ctx->synq);
634 stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
639 if (!listen_hash_add(cdev, sk, stid))
642 ret = cxgb4_create_server(ndev, stid,
643 inet_sk(sk)->inet_rcv_saddr,
644 inet_sk(sk)->inet_sport, 0,
645 cdev->lldi->rxq_ids[0]);
647 ret = net_xmit_errno(ret);
652 listen_hash_del(cdev, sk);
654 cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
658 module_put(THIS_MODULE);
662 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
664 struct listen_ctx *listen_ctx;
667 stid = listen_hash_del(cdev, sk);
671 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
672 chtls_reset_synq(listen_ctx);
674 cxgb4_remove_server(cdev->lldi->ports[0], stid,
675 cdev->lldi->rxq_ids[0], 0);
676 chtls_disconnect_acceptq(sk);
679 static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
681 struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
682 unsigned int stid = GET_TID(rpl);
683 struct listen_ctx *listen_ctx;
685 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
687 return CPL_RET_BUF_DONE;
689 if (listen_ctx->state == T4_LISTEN_START_PENDING) {
690 listen_ctx->state = T4_LISTEN_STARTED;
691 return CPL_RET_BUF_DONE;
694 if (rpl->status != CPL_ERR_NONE) {
695 pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
697 return CPL_RET_BUF_DONE;
699 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
700 sock_put(listen_ctx->lsk);
702 module_put(THIS_MODULE);
707 static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
709 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
710 struct listen_ctx *listen_ctx;
715 data = lookup_stid(cdev->tids, stid);
716 listen_ctx = (struct listen_ctx *)data;
718 if (rpl->status != CPL_ERR_NONE) {
719 pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
721 return CPL_RET_BUF_DONE;
724 cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
725 sock_put(listen_ctx->lsk);
727 module_put(THIS_MODULE);
732 static void chtls_release_resources(struct sock *sk)
734 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
735 struct chtls_dev *cdev = csk->cdev;
736 unsigned int tid = csk->tid;
737 struct tid_info *tids;
743 kfree_skb(csk->txdata_skb_cache);
744 csk->txdata_skb_cache = NULL;
746 if (csk->l2t_entry) {
747 cxgb4_l2t_release(csk->l2t_entry);
748 csk->l2t_entry = NULL;
751 cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
755 static void chtls_conn_done(struct sock *sk)
757 if (sock_flag(sk, SOCK_DEAD))
758 chtls_purge_receive_queue(sk);
759 sk_wakeup_sleepers(sk, 0);
763 static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
766 * If the server is still open we clean up the child connection,
767 * otherwise the server already did the clean up as it was purging
768 * its SYN queue and the skb was just sitting in its backlog.
770 if (likely(parent->sk_state == TCP_LISTEN)) {
771 cleanup_syn_rcv_conn(child, parent);
772 /* Without the below call to sock_orphan,
773 * we leak the socket resource with syn_flood test
774 * as inet_csk_destroy_sock will not be called
775 * in tcp_done since SOCK_DEAD flag is not set.
776 * Kernel handles this differently where new socket is
777 * created only after 3 way handshake is done.
780 percpu_counter_inc((child)->sk_prot->orphan_count);
781 chtls_release_resources(child);
782 chtls_conn_done(child);
784 if (csk_flag(child, CSK_RST_ABORTED)) {
785 chtls_release_resources(child);
786 chtls_conn_done(child);
791 static void pass_open_abort(struct sock *child, struct sock *parent,
794 do_abort_syn_rcv(child, parent);
798 static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
800 pass_open_abort(skb->sk, lsk, skb);
803 static void chtls_pass_open_arp_failure(struct sock *sk,
806 const struct request_sock *oreq;
807 struct chtls_sock *csk;
808 struct chtls_dev *cdev;
812 csk = rcu_dereference_sk_user_data(sk);
816 * If the connection is being aborted due to the parent listening
817 * socket going away there's nothing to do, the ABORT_REQ will close
820 if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
825 oreq = csk->passive_reap_next;
826 data = lookup_stid(cdev->tids, oreq->ts_recent);
827 parent = ((struct listen_ctx *)data)->lsk;
829 bh_lock_sock(parent);
830 if (!sock_owned_by_user(parent)) {
831 pass_open_abort(sk, parent, skb);
833 BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
834 __sk_add_backlog(parent, skb);
836 bh_unlock_sock(parent);
839 static void chtls_accept_rpl_arp_failure(void *handle,
842 struct sock *sk = (struct sock *)handle;
845 process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
849 static unsigned int chtls_select_mss(const struct chtls_sock *csk,
851 struct cpl_pass_accept_req *req)
853 struct chtls_dev *cdev;
854 struct dst_entry *dst;
855 unsigned int tcpoptsz;
856 unsigned int iphdrsz;
857 unsigned int mtu_idx;
862 mss = ntohs(req->tcpopt.mss);
864 dst = __sk_dst_get(sk);
869 iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
870 if (req->tcpopt.tstamp)
871 tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
873 tp->advmss = dst_metric_advmss(dst);
874 if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
875 tp->advmss = USER_MSS(tp);
876 if (tp->advmss > pmtu - iphdrsz)
877 tp->advmss = pmtu - iphdrsz;
878 if (mss && tp->advmss > mss)
881 tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
883 tp->advmss - tcpoptsz,
885 tp->advmss -= iphdrsz;
887 inet_csk(sk)->icsk_pmtu_cookie = pmtu;
891 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
895 if (space > MAX_RCV_WND)
897 if (win_clamp && win_clamp < space)
901 while (wscale < 14 && (65535 << wscale) < space)
907 static void chtls_pass_accept_rpl(struct sk_buff *skb,
908 struct cpl_pass_accept_req *req,
912 struct cpl_t5_pass_accept_rpl *rpl5;
913 struct cxgb4_lld_info *lldi;
914 const struct tcphdr *tcph;
915 const struct tcp_sock *tp;
916 struct chtls_sock *csk;
924 csk = sk->sk_user_data;
926 lldi = csk->cdev->lldi;
927 len = roundup(sizeof(*rpl5), 16);
929 rpl5 = __skb_put_zero(skb, len);
930 INIT_TP_WR(rpl5, tid);
932 OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
934 csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
936 opt0 = TCAM_BYPASS_F |
937 WND_SCALE_V(RCV_WSCALE(tp)) |
938 MSS_IDX_V(csk->mtu_idx) |
939 L2T_IDX_V(csk->l2t_entry->idx) |
940 NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
941 TX_CHAN_V(csk->tx_chan) |
942 SMAC_SEL_V(csk->smac_idx) |
943 DSCP_V(csk->tos >> 2) |
944 ULP_MODE_V(ULP_MODE_TLS) |
945 RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
947 opt2 = RX_CHANNEL_V(0) |
948 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
950 if (!is_t5(lldi->adapter_type))
951 opt2 |= RX_FC_DISABLE_F;
952 if (req->tcpopt.tstamp)
953 opt2 |= TSTAMPS_EN_F;
954 if (req->tcpopt.sack)
956 hlen = ntohl(req->hdr_len);
958 tcph = (struct tcphdr *)((u8 *)(req + 1) +
959 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
960 if (tcph->ece && tcph->cwr)
961 opt2 |= CCTRL_ECN_V(1);
962 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
964 opt2 |= T5_OPT_2_VALID_F;
965 rpl5->opt0 = cpu_to_be64(opt0);
966 rpl5->opt2 = cpu_to_be32(opt2);
967 rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
968 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
969 t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
970 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
973 static void inet_inherit_port(struct inet_hashinfo *hash_info,
974 struct sock *lsk, struct sock *newsk)
977 __inet_inherit_port(lsk, newsk);
981 static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
987 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
991 static void chtls_set_tcp_window(struct chtls_sock *csk)
993 struct net_device *ndev = csk->egress_dev;
994 struct port_info *pi = netdev_priv(ndev);
995 unsigned int linkspeed;
998 linkspeed = pi->link_cfg.speed;
999 scale = linkspeed / SPEED_10000;
1000 #define CHTLS_10G_RCVWIN (256 * 1024)
1001 csk->rcv_win = CHTLS_10G_RCVWIN;
1003 csk->rcv_win *= scale;
1004 #define CHTLS_10G_SNDWIN (256 * 1024)
1005 csk->snd_win = CHTLS_10G_SNDWIN;
1007 csk->snd_win *= scale;
1010 static struct sock *chtls_recv_sock(struct sock *lsk,
1011 struct request_sock *oreq,
1013 const struct cpl_pass_accept_req *req,
1014 struct chtls_dev *cdev)
1016 struct inet_sock *newinet;
1017 const struct iphdr *iph;
1018 struct net_device *ndev;
1019 struct chtls_sock *csk;
1020 struct dst_entry *dst;
1021 struct neighbour *n;
1022 struct tcp_sock *tp;
1028 iph = (const struct iphdr *)network_hdr;
1029 newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
1033 dst = inet_csk_route_child_sock(lsk, newsk, oreq);
1037 n = dst_neigh_lookup(dst, &iph->saddr);
1044 port_id = cxgb4_port_idx(ndev);
1046 csk = chtls_sock_create(cdev);
1050 csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
1051 if (!csk->l2t_entry)
1054 newsk->sk_user_data = csk;
1055 newsk->sk_backlog_rcv = chtls_backlog_rcv;
1058 newinet = inet_sk(newsk);
1060 newinet->inet_daddr = iph->saddr;
1061 newinet->inet_rcv_saddr = iph->daddr;
1062 newinet->inet_saddr = iph->daddr;
1064 oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1065 sk_setup_caps(newsk, dst);
1067 csk->passive_reap_next = oreq;
1068 csk->tx_chan = cxgb4_port_chan(ndev);
1069 csk->port_id = port_id;
1070 csk->egress_dev = ndev;
1071 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1072 chtls_set_tcp_window(csk);
1073 tp->rcv_wnd = csk->rcv_win;
1074 csk->sndbuf = csk->snd_win;
1075 csk->ulp_mode = ULP_MODE_TLS;
1076 step = cdev->lldi->nrxq / cdev->lldi->nchan;
1077 csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
1078 rxq_idx = port_id * step;
1079 csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
1081 csk->sndbuf = newsk->sk_sndbuf;
1082 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1083 RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
1085 ipv4.sysctl_tcp_window_scaling,
1088 inet_inherit_port(&tcp_hashinfo, lsk, newsk);
1089 csk_set_flag(csk, CSK_CONN_INLINE);
1090 bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
1094 chtls_sock_release(&csk->kref);
1098 inet_csk_prepare_forced_close(newsk);
1101 chtls_reqsk_free(oreq);
1106 * Populate a TID_RELEASE WR. The skb must be already propely sized.
1108 static void mk_tid_release(struct sk_buff *skb,
1109 unsigned int chan, unsigned int tid)
1111 struct cpl_tid_release *req;
1114 len = roundup(sizeof(struct cpl_tid_release), 16);
1115 req = (struct cpl_tid_release *)__skb_put(skb, len);
1116 memset(req, 0, len);
1117 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1118 INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
1121 static int chtls_get_module(struct sock *sk)
1123 struct inet_connection_sock *icsk = inet_csk(sk);
1125 if (!try_module_get(icsk->icsk_ulp_ops->owner))
1131 static void chtls_pass_accept_request(struct sock *sk,
1132 struct sk_buff *skb)
1134 struct cpl_t5_pass_accept_rpl *rpl;
1135 struct cpl_pass_accept_req *req;
1136 struct listen_ctx *listen_ctx;
1137 struct vlan_ethhdr *vlan_eh;
1138 struct request_sock *oreq;
1139 struct sk_buff *reply_skb;
1140 struct chtls_sock *csk;
1141 struct chtls_dev *cdev;
1142 struct tcphdr *tcph;
1151 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1155 req = cplhdr(skb) + RSS_HDR;
1157 cdev = BLOG_SKB_CB(skb)->cdev;
1158 newsk = lookup_tid(cdev->tids, tid);
1159 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1161 pr_info("tid (%d) already in use\n", tid);
1165 len = roundup(sizeof(*rpl), 16);
1166 reply_skb = alloc_skb(len, GFP_ATOMIC);
1168 cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
1173 if (sk->sk_state != TCP_LISTEN)
1176 if (inet_csk_reqsk_queue_is_full(sk))
1179 if (sk_acceptq_is_full(sk))
1182 oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
1186 oreq->rsk_rcv_wnd = 0;
1187 oreq->rsk_window_clamp = 0;
1188 oreq->cookie_ts = 0;
1190 oreq->ts_recent = 0;
1192 eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
1193 if (eth_hdr_len == ETH_HLEN) {
1194 eh = (struct ethhdr *)(req + 1);
1195 iph = (struct iphdr *)(eh + 1);
1196 network_hdr = (void *)(eh + 1);
1198 vlan_eh = (struct vlan_ethhdr *)(req + 1);
1199 iph = (struct iphdr *)(vlan_eh + 1);
1200 network_hdr = (void *)(vlan_eh + 1);
1202 if (iph->version != 0x4)
1205 tcph = (struct tcphdr *)(iph + 1);
1206 skb_set_network_header(skb, (void *)iph - (void *)req);
1208 tcp_rsk(oreq)->tfo_listener = false;
1209 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
1210 chtls_set_req_port(oreq, tcph->source, tcph->dest);
1211 chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
1212 ip_dsfield = ipv4_get_dsfield(iph);
1213 if (req->tcpopt.wsf <= 14 &&
1214 sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
1215 inet_rsk(oreq)->wscale_ok = 1;
1216 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
1218 inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
1219 th_ecn = tcph->ece && tcph->cwr;
1221 ect = !INET_ECN_is_not_ect(ip_dsfield);
1222 ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
1223 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
1224 inet_rsk(oreq)->ecn_ok = 1;
1227 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
1231 if (chtls_get_module(newsk))
1233 inet_csk_reqsk_queue_added(sk);
1234 reply_skb->sk = newsk;
1235 chtls_install_cpl_ops(newsk);
1236 cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
1237 csk = rcu_dereference_sk_user_data(newsk);
1238 listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
1239 csk->listen_ctx = listen_ctx;
1240 __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
1241 chtls_pass_accept_rpl(reply_skb, req, tid);
1246 chtls_reqsk_free(oreq);
1248 mk_tid_release(reply_skb, 0, tid);
1249 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1254 * Handle a CPL_PASS_ACCEPT_REQ message.
1256 static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
1258 struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
1259 struct listen_ctx *ctx;
1265 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1268 data = lookup_stid(cdev->tids, stid);
1272 ctx = (struct listen_ctx *)data;
1275 if (unlikely(tid >= cdev->tids->ntids)) {
1276 pr_info("passive open TID %u too large\n", tid);
1280 BLOG_SKB_CB(skb)->cdev = cdev;
1281 process_cpl_msg(chtls_pass_accept_request, lsk, skb);
1286 * Completes some final bits of initialization for just established connections
1287 * and changes their state to TCP_ESTABLISHED.
1289 * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
1291 static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
1293 struct tcp_sock *tp = tcp_sk(sk);
1295 tp->pushed_seq = snd_isn;
1296 tp->write_seq = snd_isn;
1297 tp->snd_nxt = snd_isn;
1298 tp->snd_una = snd_isn;
1299 inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
1300 assign_rxopt(sk, opt);
1302 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
1303 tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
1306 tcp_set_state(sk, TCP_ESTABLISHED);
1309 static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
1311 struct sk_buff *abort_skb;
1313 abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
1315 chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
1318 static struct sock *reap_list;
1319 static DEFINE_SPINLOCK(reap_list_lock);
1322 * Process the reap list.
1324 DECLARE_TASK_FUNC(process_reap_list, task_param)
1326 spin_lock_bh(&reap_list_lock);
1328 struct sock *sk = reap_list;
1329 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1331 reap_list = csk->passive_reap_next;
1332 csk->passive_reap_next = NULL;
1333 spin_unlock(&reap_list_lock);
1337 chtls_abort_conn(sk, NULL);
1339 if (sk->sk_state == TCP_CLOSE)
1340 inet_csk_destroy_sock(sk);
1343 spin_lock(&reap_list_lock);
1345 spin_unlock_bh(&reap_list_lock);
1348 static DECLARE_WORK(reap_task, process_reap_list);
1350 static void add_to_reap_list(struct sock *sk)
1352 struct chtls_sock *csk = sk->sk_user_data;
1356 release_tcp_port(sk); /* release the port immediately */
1358 spin_lock(&reap_list_lock);
1359 csk->passive_reap_next = reap_list;
1361 if (!csk->passive_reap_next)
1362 schedule_work(&reap_task);
1363 spin_unlock(&reap_list_lock);
1368 static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
1369 struct chtls_dev *cdev)
1371 struct request_sock *oreq;
1372 struct chtls_sock *csk;
1374 if (lsk->sk_state != TCP_LISTEN)
1377 csk = child->sk_user_data;
1378 oreq = csk->passive_reap_next;
1379 csk->passive_reap_next = NULL;
1381 reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
1382 __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
1384 if (sk_acceptq_is_full(lsk)) {
1385 chtls_reqsk_free(oreq);
1386 add_to_reap_list(child);
1388 refcount_set(&oreq->rsk_refcnt, 1);
1389 inet_csk_reqsk_queue_add(lsk, oreq, child);
1390 lsk->sk_data_ready(lsk);
1394 static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
1396 struct sock *child = skb->sk;
1399 add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
1403 static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
1405 struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
1406 struct chtls_sock *csk;
1407 struct sock *lsk, *sk;
1410 hwtid = GET_TID(req);
1411 sk = lookup_tid(cdev->tids, hwtid);
1413 return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
1416 if (unlikely(sock_owned_by_user(sk))) {
1422 csk = sk->sk_user_data;
1423 csk->wr_max_credits = 64;
1424 csk->wr_credits = 64;
1425 csk->wr_unacked = 0;
1426 make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
1427 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1428 sk->sk_state_change(sk);
1429 if (unlikely(sk->sk_socket))
1430 sk_wake_async(sk, 0, POLL_OUT);
1432 data = lookup_stid(cdev->tids, stid);
1433 lsk = ((struct listen_ctx *)data)->lsk;
1436 if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
1437 /* removed from synq */
1438 bh_unlock_sock(lsk);
1443 if (likely(!sock_owned_by_user(lsk))) {
1445 add_pass_open_to_parent(sk, lsk, cdev);
1448 BLOG_SKB_CB(skb)->cdev = cdev;
1449 BLOG_SKB_CB(skb)->backlog_rcv =
1450 bl_add_pass_open_to_parent;
1451 __sk_add_backlog(lsk, skb);
1453 bh_unlock_sock(lsk);
1461 * Handle receipt of an urgent pointer.
1463 static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
1465 struct tcp_sock *tp = tcp_sk(sk);
1468 if (tp->urg_data && !after(urg_seq, tp->urg_seq))
1469 return; /* duplicate pointer */
1472 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
1473 !sock_flag(sk, SOCK_URGINLINE) &&
1474 tp->copied_seq != tp->rcv_nxt) {
1475 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1478 if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
1479 chtls_free_skb(sk, skb);
1482 tp->urg_data = TCP_URG_NOTYET;
1483 tp->urg_seq = urg_seq;
1486 static void check_sk_callbacks(struct chtls_sock *csk)
1488 struct sock *sk = csk->sk;
1490 if (unlikely(sk->sk_user_data &&
1491 !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
1492 csk_set_flag(csk, CSK_CALLBACKS_CHKD);
1496 * Handles Rx data that arrives in a state where the socket isn't accepting
1499 static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
1501 if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
1502 chtls_abort_conn(sk, skb);
1507 static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
1509 struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
1510 struct chtls_sock *csk;
1511 struct tcp_sock *tp;
1513 csk = rcu_dereference_sk_user_data(sk);
1516 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1517 handle_excess_rx(sk, skb);
1521 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1522 ULP_SKB_CB(skb)->psh = hdr->psh;
1523 skb_ulp_mode(skb) = ULP_MODE_NONE;
1525 skb_reset_transport_header(skb);
1526 __skb_pull(skb, sizeof(*hdr) + RSS_HDR);
1528 __skb_trim(skb, ntohs(hdr->len));
1530 if (unlikely(hdr->urg))
1531 handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
1532 if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
1533 tp->urg_seq - tp->rcv_nxt < skb->len))
1534 tp->urg_data = TCP_URG_VALID |
1535 skb->data[tp->urg_seq - tp->rcv_nxt];
1537 if (unlikely(hdr->dack_mode != csk->delack_mode)) {
1538 csk->delack_mode = hdr->dack_mode;
1539 csk->delack_seq = tp->rcv_nxt;
1542 tcp_hdr(skb)->fin = 0;
1543 tp->rcv_nxt += skb->len;
1545 __skb_queue_tail(&sk->sk_receive_queue, skb);
1547 if (!sock_flag(sk, SOCK_DEAD)) {
1548 check_sk_callbacks(csk);
1549 sk->sk_data_ready(sk);
1553 static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
1555 struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
1556 unsigned int hwtid = GET_TID(req);
1559 sk = lookup_tid(cdev->tids, hwtid);
1560 if (unlikely(!sk)) {
1561 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1564 skb_dst_set(skb, NULL);
1565 process_cpl_msg(chtls_recv_data, sk, skb);
1569 static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
1571 struct cpl_tls_data *hdr = cplhdr(skb);
1572 struct chtls_sock *csk;
1573 struct chtls_hws *tlsk;
1574 struct tcp_sock *tp;
1576 csk = rcu_dereference_sk_user_data(sk);
1577 tlsk = &csk->tlshws;
1580 if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1581 handle_excess_rx(sk, skb);
1585 ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1586 ULP_SKB_CB(skb)->flags = 0;
1587 skb_ulp_mode(skb) = ULP_MODE_TLS;
1589 skb_reset_transport_header(skb);
1590 __skb_pull(skb, sizeof(*hdr));
1593 CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
1595 if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
1596 tp->rcv_nxt < skb->len))
1597 tp->urg_data = TCP_URG_VALID |
1598 skb->data[tp->urg_seq - tp->rcv_nxt];
1600 tcp_hdr(skb)->fin = 0;
1601 tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
1602 __skb_queue_tail(&tlsk->sk_recv_queue, skb);
1605 static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
1607 struct cpl_tls_data *req = cplhdr(skb);
1608 unsigned int hwtid = GET_TID(req);
1611 sk = lookup_tid(cdev->tids, hwtid);
1612 if (unlikely(!sk)) {
1613 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1616 skb_dst_set(skb, NULL);
1617 process_cpl_msg(chtls_recv_pdu, sk, skb);
1621 static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
1623 struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
1625 skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
1626 tls_cmp_hdr->length = ntohs((__force __be16)nlen);
1629 static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
1631 struct tlsrx_cmp_hdr *tls_hdr_pkt;
1632 struct cpl_rx_tls_cmp *cmp_cpl;
1633 struct sk_buff *skb_rec;
1634 struct chtls_sock *csk;
1635 struct chtls_hws *tlsk;
1636 struct tcp_sock *tp;
1638 cmp_cpl = cplhdr(skb);
1639 csk = rcu_dereference_sk_user_data(sk);
1640 tlsk = &csk->tlshws;
1643 ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
1644 ULP_SKB_CB(skb)->flags = 0;
1646 skb_reset_transport_header(skb);
1647 __skb_pull(skb, sizeof(*cmp_cpl));
1648 tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
1649 if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
1650 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1652 __skb_trim(skb, TLS_HEADER_LENGTH);
1655 CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
1657 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
1658 skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
1660 __skb_queue_tail(&sk->sk_receive_queue, skb);
1662 chtls_set_hdrlen(skb, tlsk->pldlen);
1664 __skb_queue_tail(&sk->sk_receive_queue, skb);
1665 __skb_queue_tail(&sk->sk_receive_queue, skb_rec);
1668 if (!sock_flag(sk, SOCK_DEAD)) {
1669 check_sk_callbacks(csk);
1670 sk->sk_data_ready(sk);
1674 static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
1676 struct cpl_rx_tls_cmp *req = cplhdr(skb);
1677 unsigned int hwtid = GET_TID(req);
1680 sk = lookup_tid(cdev->tids, hwtid);
1681 if (unlikely(!sk)) {
1682 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1685 skb_dst_set(skb, NULL);
1686 process_cpl_msg(chtls_rx_hdr, sk, skb);
1691 static void chtls_timewait(struct sock *sk)
1693 struct tcp_sock *tp = tcp_sk(sk);
1696 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
1698 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
1701 static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
1703 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1705 sk->sk_shutdown |= RCV_SHUTDOWN;
1706 sock_set_flag(sk, SOCK_DONE);
1708 switch (sk->sk_state) {
1710 case TCP_ESTABLISHED:
1711 tcp_set_state(sk, TCP_CLOSE_WAIT);
1714 tcp_set_state(sk, TCP_CLOSING);
1717 chtls_release_resources(sk);
1718 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1719 chtls_conn_done(sk);
1724 pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
1727 if (!sock_flag(sk, SOCK_DEAD)) {
1728 sk->sk_state_change(sk);
1729 /* Do not send POLL_HUP for half duplex close. */
1731 if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1732 sk->sk_state == TCP_CLOSE)
1733 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
1735 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1739 static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
1741 struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
1742 struct chtls_sock *csk;
1743 struct tcp_sock *tp;
1745 csk = rcu_dereference_sk_user_data(sk);
1748 tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
1750 switch (sk->sk_state) {
1752 chtls_release_resources(sk);
1753 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1754 chtls_conn_done(sk);
1759 chtls_release_resources(sk);
1760 chtls_conn_done(sk);
1763 tcp_set_state(sk, TCP_FIN_WAIT2);
1764 sk->sk_shutdown |= SEND_SHUTDOWN;
1766 if (!sock_flag(sk, SOCK_DEAD))
1767 sk->sk_state_change(sk);
1768 else if (tcp_sk(sk)->linger2 < 0 &&
1769 !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
1770 chtls_abort_conn(sk, skb);
1773 pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
1778 static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
1779 size_t len, gfp_t gfp)
1781 if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
1782 WARN_ONCE(skb->len < len, "skb alloc error");
1783 __skb_trim(skb, len);
1786 skb = alloc_skb(len, gfp);
1788 __skb_put(skb, len);
1793 static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
1796 struct cpl_abort_rpl *rpl = cplhdr(skb);
1798 INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
1802 static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
1804 struct cpl_abort_req_rss *req = cplhdr(skb);
1805 struct sk_buff *reply_skb;
1807 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1808 GFP_KERNEL | __GFP_NOFAIL);
1809 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
1810 set_abort_rpl_wr(reply_skb, GET_TID(req),
1811 (req->status & CPL_ABORT_NO_RST));
1812 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
1813 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1817 static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1818 struct chtls_dev *cdev, int status, int queue)
1820 struct cpl_abort_req_rss *req = cplhdr(skb);
1821 struct sk_buff *reply_skb;
1822 struct chtls_sock *csk;
1824 csk = rcu_dereference_sk_user_data(sk);
1826 reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1830 req->status = (queue << 1);
1831 send_defer_abort_rpl(cdev, skb);
1835 set_abort_rpl_wr(reply_skb, GET_TID(req), status);
1838 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
1839 if (csk_conn_inline(csk)) {
1840 struct l2t_entry *e = csk->l2t_entry;
1842 if (e && sk->sk_state != TCP_SYN_RECV) {
1843 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
1847 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1851 * Add an skb to the deferred skb queue for processing from process context.
1853 static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
1854 defer_handler_t handler)
1856 DEFERRED_SKB_CB(skb)->handler = handler;
1857 spin_lock_bh(&cdev->deferq.lock);
1858 __skb_queue_tail(&cdev->deferq, skb);
1859 if (skb_queue_len(&cdev->deferq) == 1)
1860 schedule_work(&cdev->deferq_task);
1861 spin_unlock_bh(&cdev->deferq.lock);
1864 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1865 struct chtls_dev *cdev,
1866 int status, int queue)
1868 struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1869 struct sk_buff *reply_skb;
1870 struct chtls_sock *csk;
1873 csk = rcu_dereference_sk_user_data(sk);
1876 reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
1878 req->status = (queue << 1) | status;
1879 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
1883 set_abort_rpl_wr(reply_skb, tid, status);
1884 set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
1885 if (csk_conn_inline(csk)) {
1886 struct l2t_entry *e = csk->l2t_entry;
1888 if (e && sk->sk_state != TCP_SYN_RECV) {
1889 cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
1893 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1898 * This is run from a listener's backlog to abort a child connection in
1899 * SYN_RCV state (i.e., one on the listener's SYN queue).
1901 static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
1903 struct chtls_sock *csk;
1908 csk = rcu_dereference_sk_user_data(child);
1909 queue = csk->txq_idx;
1912 do_abort_syn_rcv(child, lsk);
1913 send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
1914 CPL_ABORT_NO_RST, queue);
1917 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
1919 const struct request_sock *oreq;
1920 struct listen_ctx *listen_ctx;
1921 struct chtls_sock *csk;
1922 struct chtls_dev *cdev;
1926 csk = sk->sk_user_data;
1927 oreq = csk->passive_reap_next;
1933 ctx = lookup_stid(cdev->tids, oreq->ts_recent);
1937 listen_ctx = (struct listen_ctx *)ctx;
1938 psk = listen_ctx->lsk;
1941 if (!sock_owned_by_user(psk)) {
1942 int queue = csk->txq_idx;
1944 do_abort_syn_rcv(sk, psk);
1945 send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
1948 BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
1949 __sk_add_backlog(psk, skb);
1951 bh_unlock_sock(psk);
1955 static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
1957 const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1958 struct chtls_sock *csk = sk->sk_user_data;
1959 int rst_status = CPL_ABORT_NO_RST;
1960 int queue = csk->txq_idx;
1962 if (is_neg_adv(req->status)) {
1963 if (sk->sk_state == TCP_SYN_RECV)
1964 chtls_set_tcb_tflag(sk, 0, 0);
1970 csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
1972 if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
1973 !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
1974 struct tcp_sock *tp = tcp_sk(sk);
1976 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
1977 WARN_ONCE(1, "send_tx_flowc error");
1978 csk_set_flag(csk, CSK_TX_DATA_SENT);
1981 csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
1983 if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
1984 sk->sk_err = ETIMEDOUT;
1986 if (!sock_flag(sk, SOCK_DEAD))
1987 sk->sk_error_report(sk);
1989 if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
1992 chtls_release_resources(sk);
1993 chtls_conn_done(sk);
1996 chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
1999 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
2001 struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
2002 struct chtls_sock *csk;
2003 struct chtls_dev *cdev;
2005 csk = rcu_dereference_sk_user_data(sk);
2008 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
2009 csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
2010 if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
2011 if (sk->sk_state == TCP_SYN_SENT) {
2012 cxgb4_remove_tid(cdev->tids,
2018 chtls_release_resources(sk);
2019 chtls_conn_done(sk);
2025 static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
2027 struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
2028 void (*fn)(struct sock *sk, struct sk_buff *skb);
2029 unsigned int hwtid = GET_TID(req);
2033 opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
2035 sk = lookup_tid(cdev->tids, hwtid);
2040 case CPL_PEER_CLOSE:
2041 fn = chtls_peer_close;
2043 case CPL_CLOSE_CON_RPL:
2044 fn = chtls_close_con_rpl;
2046 case CPL_ABORT_REQ_RSS:
2047 fn = chtls_abort_req_rss;
2049 case CPL_ABORT_RPL_RSS:
2050 fn = chtls_abort_rpl_rss;
2056 process_cpl_msg(fn, sk, skb);
2064 static struct sk_buff *dequeue_wr(struct sock *sk)
2066 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
2067 struct sk_buff *skb = csk->wr_skb_head;
2070 /* Don't bother clearing the tail */
2071 csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
2072 WR_SKB_CB(skb)->next_wr = NULL;
2077 static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
2079 struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
2080 struct chtls_sock *csk = sk->sk_user_data;
2081 struct tcp_sock *tp = tcp_sk(sk);
2082 u32 credits = hdr->credits;
2085 snd_una = ntohl(hdr->snd_una);
2086 csk->wr_credits += credits;
2088 if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
2089 csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
2092 struct sk_buff *pskb = csk->wr_skb_head;
2095 if (unlikely(!pskb)) {
2096 if (csk->wr_nondata)
2097 csk->wr_nondata -= credits;
2100 csum = (__force u32)pskb->csum;
2101 if (unlikely(credits < csum)) {
2102 pskb->csum = (__force __wsum)(csum - credits);
2109 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
2110 if (unlikely(before(snd_una, tp->snd_una))) {
2115 if (tp->snd_una != snd_una) {
2116 tp->snd_una = snd_una;
2117 tp->rcv_tstamp = tcp_time_stamp(tp);
2118 if (tp->snd_una == tp->snd_nxt &&
2119 !csk_flag_nochk(csk, CSK_TX_FAILOVER))
2120 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2124 if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
2125 unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
2127 csk->wr_credits -= fclen16;
2128 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2129 csk_reset_flag(csk, CSK_TX_FAILOVER);
2131 if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
2132 sk->sk_write_space(sk);
2137 static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
2139 struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
2140 unsigned int hwtid = GET_TID(rpl);
2143 sk = lookup_tid(cdev->tids, hwtid);
2144 if (unlikely(!sk)) {
2145 pr_err("can't find conn. for hwtid %u.\n", hwtid);
2148 process_cpl_msg(chtls_rx_ack, sk, skb);
2153 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
2154 [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
2155 [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
2156 [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req,
2157 [CPL_PASS_ESTABLISH] = chtls_pass_establish,
2158 [CPL_RX_DATA] = chtls_rx_data,
2159 [CPL_TLS_DATA] = chtls_rx_pdu,
2160 [CPL_RX_TLS_CMP] = chtls_rx_cmp,
2161 [CPL_PEER_CLOSE] = chtls_conn_cpl,
2162 [CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
2163 [CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
2164 [CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
2165 [CPL_FW4_ACK] = chtls_wr_ack,