1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/workqueue.h>
11 #include <linux/skbuff.h>
12 #include <linux/timer.h>
13 #include <linux/notifier.h>
14 #include <linux/inetdevice.h>
16 #include <linux/tcp.h>
17 #include <linux/sched/signal.h>
19 #include <net/busy_poll.h>
20 #include <crypto/aes.h>
25 static bool is_tls_tx(struct chtls_sock *csk)
27 return csk->tlshws.txkey >= 0;
30 static bool is_tls_rx(struct chtls_sock *csk)
32 return csk->tlshws.rxkey >= 0;
35 static int data_sgl_len(const struct sk_buff *skb)
39 cnt = skb_shinfo(skb)->nr_frags;
40 return sgl_len(cnt) * 8;
43 static int nos_ivs(struct sock *sk, unsigned int size)
45 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
47 return DIV_ROUND_UP(size, csk->tlshws.mfs);
50 static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
52 int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
53 int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
55 if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
56 MAX_IMM_OFLD_TX_DATA_WR_LEN) {
57 ULP_SKB_CB(skb)->ulp.tls.iv = 1;
60 ULP_SKB_CB(skb)->ulp.tls.iv = 0;
64 static int max_ivs_size(struct sock *sk, int size)
66 return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
69 static int ivs_size(struct sock *sk, const struct sk_buff *skb)
71 return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
72 CIPHER_BLOCK_SIZE) : 0;
75 static int flowc_wr_credits(int nparams, int *flowclenp)
77 int flowclen16, flowclen;
79 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
80 flowclen16 = DIV_ROUND_UP(flowclen, 16);
81 flowclen = flowclen16 * 16;
84 *flowclenp = flowclen;
89 static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
90 struct fw_flowc_wr *flowc,
93 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
96 skb = alloc_skb(flowclen, GFP_ATOMIC);
100 __skb_put_data(skb, flowc, flowclen);
101 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
106 static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
109 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
110 struct tcp_sock *tp = tcp_sk(sk);
115 flowclen16 = flowclen / 16;
117 if (csk_flag(sk, CSK_TX_DATA_SENT)) {
118 skb = create_flowc_wr_skb(sk, flowc, flowclen);
123 ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
127 ret = cxgb4_immdata_send(csk->egress_dev,
132 skb = create_flowc_wr_skb(sk, flowc, flowclen);
135 send_or_defer(sk, tp, skb, 0);
139 static u8 tcp_state_to_flowc_state(u8 state)
142 case TCP_ESTABLISHED:
143 return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
145 return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
147 return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
149 return FW_FLOWC_MNEM_TCPSTATE_CLOSING;
151 return FW_FLOWC_MNEM_TCPSTATE_LASTACK;
153 return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
156 return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
159 int send_tx_flowc_wr(struct sock *sk, int compl,
160 u32 snd_nxt, u32 rcv_nxt)
162 struct flowc_packed {
163 struct fw_flowc_wr fc;
164 struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
166 int nparams, paramidx, flowclen16, flowclen;
167 struct fw_flowc_wr *flowc;
168 struct chtls_sock *csk;
171 csk = rcu_dereference_sk_user_data(sk);
173 memset(&sflowc, 0, sizeof(sflowc));
176 #define FLOWC_PARAM(__m, __v) \
178 flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \
179 flowc->mnemval[paramidx].val = cpu_to_be32(__v); \
185 FLOWC_PARAM(PFNVFN, FW_PFVF_CMD_PFN_V(csk->cdev->lldi->pf));
186 FLOWC_PARAM(CH, csk->tx_chan);
187 FLOWC_PARAM(PORT, csk->tx_chan);
188 FLOWC_PARAM(IQID, csk->rss_qid);
189 FLOWC_PARAM(SNDNXT, tp->snd_nxt);
190 FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
191 FLOWC_PARAM(SNDBUF, csk->sndbuf);
192 FLOWC_PARAM(MSS, tp->mss_cache);
193 FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state));
196 FLOWC_PARAM(RCV_SCALE, SND_WSCALE(tp));
198 if (csk->ulp_mode == ULP_MODE_TLS)
199 FLOWC_PARAM(ULD_MODE, ULP_MODE_TLS);
201 if (csk->tlshws.fcplenmax)
202 FLOWC_PARAM(TXDATAPLEN_MAX, csk->tlshws.fcplenmax);
207 flowclen16 = flowc_wr_credits(nparams, &flowclen);
208 flowc->op_to_nparams =
209 cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
210 FW_WR_COMPL_V(compl) |
211 FW_FLOWC_WR_NPARAMS_V(nparams));
212 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
213 FW_WR_FLOWID_V(csk->tid));
215 return send_flowc_wr(sk, flowc, flowclen);
219 static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
222 struct chtls_sock *csk;
223 unsigned char *iv_loc;
224 struct chtls_hws *hws;
230 csk = rcu_dereference_sk_user_data(sk);
232 number_of_ivs = nos_ivs(sk, skb->len);
234 if (number_of_ivs > MAX_IVS_PAGE) {
235 pr_warn("MAX IVs in PAGE exceeded %d\n", number_of_ivs);
239 /* generate the IVs */
240 ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC);
243 get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
245 if (skb_ulp_tls_iv_imm(skb)) {
246 /* send the IVs as immediate data in the WR */
247 iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs *
250 memcpy(iv_loc, ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
252 hws->ivsize = number_of_ivs * CIPHER_BLOCK_SIZE;
254 /* Send the IVs as sgls */
255 /* Already accounted IV DSGL for credits */
256 skb_shinfo(skb)->nr_frags--;
257 page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
259 pr_info("%s : Page allocation for IVs failed\n",
264 memcpy(page_address(page), ivs, number_of_ivs *
266 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
267 number_of_ivs * CIPHER_BLOCK_SIZE);
276 static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
278 struct ulptx_sc_memrd *sc_memrd;
279 struct chtls_sock *csk;
280 struct chtls_dev *cdev;
281 struct ulptx_idata *sc;
282 struct chtls_hws *hws;
286 csk = rcu_dereference_sk_user_data(sk);
290 immdlen = sizeof(*sc) + sizeof(*sc_memrd);
291 kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey);
292 sc = (struct ulptx_idata *)__skb_push(skb, immdlen);
294 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
296 sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
297 sc_memrd->cmd_to_len =
298 htonl(ULPTX_CMD_V(ULP_TX_SC_MEMRD) |
299 ULP_TX_SC_MORE_V(1) |
300 ULPTX_LEN16_V(hws->keylen >> 4));
301 sc_memrd->addr = htonl(kaddr);
305 static u64 tlstx_incr_seqnum(struct chtls_hws *hws)
307 return hws->tx_seq_no++;
310 static bool is_sg_request(const struct sk_buff *skb)
312 return skb->peeked ||
313 (skb->len > MAX_IMM_ULPTX_WR_LEN);
317 * Returns true if an sk_buff carries urgent data.
319 static bool skb_urgent(struct sk_buff *skb)
321 return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG;
324 /* TLS content type for CPL SFO */
325 static unsigned char tls_content_type(unsigned char content_type)
327 switch (content_type) {
328 case TLS_HDR_TYPE_CCS:
329 return CPL_TX_TLS_SFO_TYPE_CCS;
330 case TLS_HDR_TYPE_ALERT:
331 return CPL_TX_TLS_SFO_TYPE_ALERT;
332 case TLS_HDR_TYPE_HANDSHAKE:
333 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
334 case TLS_HDR_TYPE_HEARTBEAT:
335 return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
337 return CPL_TX_TLS_SFO_TYPE_DATA;
340 static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
341 int dlen, int tls_immd, u32 credits,
344 struct fw_tlstx_data_wr *req_wr;
345 struct cpl_tx_tls_sfo *req_cpl;
346 unsigned int wr_ulp_mode_force;
347 struct tls_scmd *updated_scmd;
348 unsigned char data_type;
349 struct chtls_sock *csk;
350 struct net_device *dev;
351 struct chtls_hws *hws;
352 struct tls_scmd *scmd;
353 struct adapter *adap;
359 csk = rcu_dereference_sk_user_data(sk);
360 iv_imm = skb_ulp_tls_iv_imm(skb);
361 dev = csk->egress_dev;
362 adap = netdev2adap(dev);
367 dlen = (dlen < hws->mfs) ? dlen : hws->mfs;
368 atomic_inc(&adap->chcr_stats.tls_pdu_tx);
371 updated_scmd->seqno_numivs &= 0xffffff80;
372 updated_scmd->seqno_numivs |= SCMD_NUM_IVS_V(pdus);
373 hws->scmd = *updated_scmd;
375 req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
376 req_cpl = (struct cpl_tx_tls_sfo *)req;
377 req = (unsigned char *)__skb_push(skb, (sizeof(struct
380 req_wr = (struct fw_tlstx_data_wr *)req;
381 immd_len = (tls_immd ? dlen : 0);
382 req_wr->op_to_immdlen =
383 htonl(FW_WR_OP_V(FW_TLSTX_DATA_WR) |
384 FW_TLSTX_DATA_WR_COMPL_V(1) |
385 FW_TLSTX_DATA_WR_IMMDLEN_V(immd_len));
386 req_wr->flowid_len16 = htonl(FW_TLSTX_DATA_WR_FLOWID_V(csk->tid) |
387 FW_TLSTX_DATA_WR_LEN16_V(credits));
388 wr_ulp_mode_force = TX_ULP_MODE_V(ULP_MODE_TLS);
390 if (is_sg_request(skb))
391 wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
392 ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
393 FW_OFLD_TX_DATA_WR_SHOVE_F);
395 req_wr->lsodisable_to_flags =
396 htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
397 TX_URG_V(skb_urgent(skb)) |
398 T6_TX_FORCE_F | wr_ulp_mode_force |
399 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
400 skb_queue_empty(&csk->txq)));
402 req_wr->ctxloc_to_exp =
403 htonl(FW_TLSTX_DATA_WR_NUMIVS_V(pdus) |
404 FW_TLSTX_DATA_WR_EXP_V(expn) |
405 FW_TLSTX_DATA_WR_CTXLOC_V(CHTLS_KEY_CONTEXT_DDR) |
406 FW_TLSTX_DATA_WR_IVDSGL_V(!iv_imm) |
407 FW_TLSTX_DATA_WR_KEYSIZE_V(hws->keylen >> 4));
409 /* Fill in the length */
410 req_wr->plen = htonl(len);
411 req_wr->mfs = htons(hws->mfs);
412 req_wr->adjustedplen_pkd =
413 htons(FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(hws->adjustlen));
414 req_wr->expinplenmax_pkd =
415 htons(FW_TLSTX_DATA_WR_EXPINPLENMAX_V(hws->expansion));
416 req_wr->pdusinplenmax_pkd =
417 FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(hws->pdus);
420 data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type);
421 req_cpl->op_to_seg_len = htonl(CPL_TX_TLS_SFO_OPCODE_V(CPL_TX_TLS_SFO) |
422 CPL_TX_TLS_SFO_DATA_TYPE_V(data_type) |
423 CPL_TX_TLS_SFO_CPL_LEN_V(2) |
424 CPL_TX_TLS_SFO_SEG_LEN_V(dlen));
425 req_cpl->pld_len = htonl(len - expn);
427 req_cpl->type_protover = htonl(CPL_TX_TLS_SFO_TYPE_V
428 ((data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) ?
429 TLS_HDR_TYPE_HEARTBEAT : 0) |
430 CPL_TX_TLS_SFO_PROTOVER_V(0));
432 /* create the s-command */
434 req_cpl->seqno_numivs = cpu_to_be32(hws->scmd.seqno_numivs);
435 req_cpl->ivgen_hdrlen = cpu_to_be32(hws->scmd.ivgen_hdrlen);
436 req_cpl->scmd1 = cpu_to_be64(tlstx_incr_seqnum(hws));
440 * Calculate the TLS data expansion size
442 static int chtls_expansion_size(struct sock *sk, int data_len,
444 unsigned short *pducnt)
446 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
447 struct chtls_hws *hws = &csk->tlshws;
448 struct tls_scmd *scmd = &hws->scmd;
449 int fragsize = hws->mfs;
455 if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) ==
456 SCMD_CIPH_MODE_AES_GCM) {
457 expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
461 *pducnt = data_len / (expppdu + fragsize);
466 expnsize = (*pducnt) * expppdu;
469 fragcnt = (data_len / fragsize);
470 expnsize = fragcnt * expppdu;
471 fragleft = data_len % fragsize;
478 /* WR with IV, KEY and CPL SFO added */
479 static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
480 int tls_tx_imm, int tls_len, u32 credits)
482 unsigned short pdus_per_ulp = 0;
483 struct chtls_sock *csk;
484 struct chtls_hws *hws;
488 csk = rcu_dereference_sk_user_data(sk);
490 pdus = DIV_ROUND_UP(tls_len, hws->mfs);
491 expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL);
493 hws->expansion = chtls_expansion_size(sk,
496 hws->pdus = pdus_per_ulp;
497 hws->adjustlen = hws->pdus *
498 ((hws->expansion / hws->pdus) + hws->mfs);
501 if (tls_copy_ivs(sk, skb))
503 tls_copy_tx_key(sk, skb);
504 tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
505 hws->tx_seq_no += (pdus - 1);
508 static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
509 unsigned int immdlen, int len,
510 u32 credits, u32 compl)
512 struct fw_ofld_tx_data_wr *req;
513 unsigned int wr_ulp_mode_force;
514 struct chtls_sock *csk;
517 csk = rcu_dereference_sk_user_data(sk);
518 opcode = FW_OFLD_TX_DATA_WR;
520 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
521 req->op_to_immdlen = htonl(WR_OP_V(opcode) |
522 FW_WR_COMPL_V(compl) |
523 FW_WR_IMMDLEN_V(immdlen));
524 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
525 FW_WR_LEN16_V(credits));
527 wr_ulp_mode_force = TX_ULP_MODE_V(csk->ulp_mode);
528 if (is_sg_request(skb))
529 wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
530 ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
531 FW_OFLD_TX_DATA_WR_SHOVE_F);
533 req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
534 TX_URG_V(skb_urgent(skb)) |
535 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
536 skb_queue_empty(&csk->txq)));
537 req->plen = htonl(len);
540 static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb,
545 wr_size = TLS_WR_CPL_LEN;
546 wr_size += KEY_ON_MEM_SZ;
547 wr_size += ivs_size(csk->sk, skb);
552 /* frags counted for IV dsgl */
553 if (!skb_ulp_tls_iv_imm(skb))
554 skb_shinfo(skb)->nr_frags++;
559 static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb)
561 int length = skb->len;
563 if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN)
566 if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
567 /* Check TLS header len for Immediate */
568 if (csk->ulp_mode == ULP_MODE_TLS &&
569 skb_ulp_tls_inline(skb))
570 length += chtls_wr_size(csk, skb, true);
572 length += sizeof(struct fw_ofld_tx_data_wr);
574 return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
579 static unsigned int calc_tx_flits(const struct sk_buff *skb,
580 unsigned int immdlen)
582 unsigned int flits, cnt;
584 flits = immdlen / 8; /* headers */
585 cnt = skb_shinfo(skb)->nr_frags;
586 if (skb_tail_pointer(skb) != skb_transport_header(skb))
588 return flits + sgl_len(cnt);
591 static void arp_failure_discard(void *handle, struct sk_buff *skb)
596 int chtls_push_frames(struct chtls_sock *csk, int comp)
598 struct chtls_hws *hws = &csk->tlshws;
605 wr_size = sizeof(struct fw_ofld_tx_data_wr);
609 if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
612 if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
615 while (csk->wr_credits && (skb = skb_peek(&csk->txq)) &&
616 (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) ||
617 skb_queue_len(&csk->txq) > 1)) {
618 unsigned int credit_len = skb->len;
619 unsigned int credits_needed;
620 unsigned int completion = 0;
621 int tls_len = skb->len;/* TLS data len before IV/key */
622 unsigned int immdlen;
623 int len = skb->len; /* length [ulp bytes] inserted by hw */
628 if (!is_ofld_imm(csk, skb)) {
629 immdlen = skb_transport_offset(skb);
630 if (skb_ulp_tls_inline(skb))
631 wr_size = chtls_wr_size(csk, skb, false);
632 credit_len = 8 * calc_tx_flits(skb, immdlen);
634 if (skb_ulp_tls_inline(skb)) {
635 wr_size = chtls_wr_size(csk, skb, false);
639 if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR))
640 credit_len += wr_size;
641 credits_needed = DIV_ROUND_UP(credit_len, 16);
642 if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
643 flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt,
647 csk->wr_credits -= flowclen16;
648 csk->wr_unacked += flowclen16;
649 csk->wr_nondata += flowclen16;
650 csk_set_flag(csk, CSK_TX_DATA_SENT);
653 if (csk->wr_credits < credits_needed) {
654 if (skb_ulp_tls_inline(skb) &&
655 !skb_ulp_tls_iv_imm(skb))
656 skb_shinfo(skb)->nr_frags--;
660 __skb_unlink(skb, &csk->txq);
661 skb_set_queue_mapping(skb, (csk->txq_idx << 1) |
664 hws->txqid = (skb->queue_mapping >> 1);
665 skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata);
666 csk->wr_credits -= credits_needed;
667 csk->wr_unacked += credits_needed;
669 enqueue_wr(csk, skb);
671 if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
672 if ((comp && csk->wr_unacked == credits_needed) ||
673 (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) ||
674 csk->wr_unacked >= csk->wr_max_credits / 2) {
678 if (skb_ulp_tls_inline(skb))
679 make_tlstx_data_wr(sk, skb, tls_tx_imm,
680 tls_len, credits_needed);
682 make_tx_data_wr(sk, skb, immdlen, len,
683 credits_needed, completion);
685 tp->lsndtime = tcp_time_stamp(tp);
687 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
689 struct cpl_close_con_req *req = cplhdr(skb);
690 unsigned int cmd = CPL_OPCODE_G(ntohl
693 if (cmd == CPL_CLOSE_CON_REQ)
695 CSK_CLOSE_CON_REQUESTED);
697 if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) &&
698 (csk->wr_unacked >= csk->wr_max_credits / 2)) {
699 req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
703 total_size += skb->truesize;
704 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER)
705 csk_set_flag(csk, CSK_TX_WAIT_IDLE);
706 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
707 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
709 sk->sk_wmem_queued -= total_size;
713 static void mark_urg(struct tcp_sock *tp, int flags,
716 if (unlikely(flags & MSG_OOB)) {
717 tp->snd_up = tp->write_seq;
718 ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
720 ULPCB_FLAG_NO_APPEND |
726 * Returns true if a connection should send more data to TCP engine
728 static bool should_push(struct sock *sk)
730 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
731 struct chtls_dev *cdev = csk->cdev;
732 struct tcp_sock *tp = tcp_sk(sk);
735 * If we've released our offload resources there's nothing to do ...
741 * If there aren't any work requests in flight, or there isn't enough
742 * data in flight, or Nagle is off then send the current TX_DATA
743 * otherwise hold it and wait to accumulate more data.
745 return csk->wr_credits == csk->wr_max_credits ||
746 (tp->nonagle & TCP_NAGLE_OFF);
750 * Returns true if a TCP socket is corked.
752 static bool corked(const struct tcp_sock *tp, int flags)
754 return (flags & MSG_MORE) || (tp->nonagle & TCP_NAGLE_CORK);
758 * Returns true if a send should try to push new data.
760 static bool send_should_push(struct sock *sk, int flags)
762 return should_push(sk) && !corked(tcp_sk(sk), flags);
765 void chtls_tcp_push(struct sock *sk, int flags)
767 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
768 int qlen = skb_queue_len(&csk->txq);
771 struct sk_buff *skb = skb_peek_tail(&csk->txq);
772 struct tcp_sock *tp = tcp_sk(sk);
774 mark_urg(tp, flags, skb);
776 if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) &&
778 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD;
782 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD;
784 ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
786 chtls_push_frames(csk, 1);
791 * Calculate the size for a new send sk_buff. It's maximum size so we can
792 * pack lots of data into it, unless we plan to send it immediately, in which
793 * case we size it more tightly.
795 * Note: we don't bother compensating for MSS < PAGE_SIZE because it doesn't
796 * arise in normal cases and when it does we are just wasting memory.
798 static int select_size(struct sock *sk, int io_len, int flags, int len)
800 const int pgbreak = SKB_MAX_HEAD(len);
803 * If the data wouldn't fit in the main body anyway, put only the
804 * header in the main body so it can use immediate data and place all
805 * the payload in page fragments.
807 if (io_len > pgbreak)
811 * If we will be accumulating payload get a large main body.
813 if (!send_should_push(sk, flags))
819 void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
821 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
822 struct tcp_sock *tp = tcp_sk(sk);
824 ULP_SKB_CB(skb)->seq = tp->write_seq;
825 ULP_SKB_CB(skb)->flags = flags;
826 __skb_queue_tail(&csk->txq, skb);
827 sk->sk_wmem_queued += skb->truesize;
829 if (TCP_PAGE(sk) && TCP_OFF(sk)) {
830 put_page(TCP_PAGE(sk));
836 static struct sk_buff *get_tx_skb(struct sock *sk, int size)
840 skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
842 skb_reserve(skb, TX_HEADER_LEN);
843 skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
844 skb_reset_transport_header(skb);
849 static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy)
851 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
854 skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN +
855 KEY_ON_MEM_SZ + max_ivs_size(sk, size)),
858 skb_reserve(skb, (TX_TLSHDR_LEN +
859 KEY_ON_MEM_SZ + max_ivs_size(sk, size)));
860 skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
861 skb_reset_transport_header(skb);
862 ULP_SKB_CB(skb)->ulp.tls.ofld = 1;
863 ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type;
868 static void tx_skb_finalize(struct sk_buff *skb)
870 struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
872 if (!(cb->flags & ULPCB_FLAG_NO_HDR))
873 cb->flags = ULPCB_FLAG_NEED_HDR;
874 cb->flags |= ULPCB_FLAG_NO_APPEND;
877 static void push_frames_if_head(struct sock *sk)
879 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
881 if (skb_queue_len(&csk->txq) == 1)
882 chtls_push_frames(csk, 1);
885 static int chtls_skb_copy_to_page_nocache(struct sock *sk,
886 struct iov_iter *from,
893 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
894 off, copy, skb->len);
899 skb->data_len += copy;
900 skb->truesize += copy;
901 sk->sk_wmem_queued += copy;
905 /* Read TLS header to find content type and data length */
906 static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
908 if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
910 return (__force int)cpu_to_be16(thdr->length);
913 static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
915 return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
918 static int csk_wait_memory(struct chtls_dev *cdev,
919 struct sock *sk, long *timeo_p)
921 DEFINE_WAIT_FUNC(wait, woken_wake_function);
927 current_timeo = *timeo_p;
928 noblock = (*timeo_p ? false : true);
929 if (csk_mem_free(cdev, sk)) {
930 current_timeo = (prandom_u32() % (HZ / 5)) + 2;
931 vm_wait = (prandom_u32() % (HZ / 5)) + 2;
934 add_wait_queue(sk_sleep(sk), &wait);
936 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
938 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
942 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
945 if (signal_pending(current))
947 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
948 if (csk_mem_free(cdev, sk) && !vm_wait)
951 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
952 sk->sk_write_pending++;
953 sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
954 (sk->sk_shutdown & SEND_SHUTDOWN) ||
955 (csk_mem_free(cdev, sk) && !vm_wait), &wait);
956 sk->sk_write_pending--;
959 vm_wait -= current_timeo;
960 current_timeo = *timeo_p;
961 if (current_timeo != MAX_SCHEDULE_TIMEOUT) {
962 current_timeo -= vm_wait;
963 if (current_timeo < 0)
968 *timeo_p = current_timeo;
971 remove_wait_queue(sk_sleep(sk), &wait);
980 err = sock_intr_errno(*timeo_p);
984 int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
986 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
987 struct chtls_dev *cdev = csk->cdev;
988 struct tcp_sock *tp = tcp_sk(sk);
996 flags = msg->msg_flags;
997 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
999 if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1000 err = sk_stream_wait_connect(sk, &timeo);
1005 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1007 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1011 csk_set_flag(csk, CSK_TX_MORE_DATA);
1013 while (msg_data_left(msg)) {
1016 skb = skb_peek_tail(&csk->txq);
1018 copy = mss - skb->len;
1019 skb->ip_summed = CHECKSUM_UNNECESSARY;
1021 if (!csk_mem_free(cdev, sk))
1022 goto wait_for_sndbuf;
1024 if (is_tls_tx(csk) && !csk->tlshws.txleft) {
1027 recordsz = tls_header_read(&hdr, &msg->msg_iter);
1028 size -= TLS_HEADER_LENGTH;
1029 copied += TLS_HEADER_LENGTH;
1030 csk->tlshws.txleft = recordsz;
1031 csk->tlshws.type = hdr.type;
1033 ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
1036 if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
1040 tx_skb_finalize(skb);
1041 push_frames_if_head(sk);
1044 if (is_tls_tx(csk)) {
1045 skb = get_record_skb(sk,
1052 skb = get_tx_skb(sk,
1053 select_size(sk, size, flags,
1057 goto wait_for_memory;
1059 skb->ip_summed = CHECKSUM_UNNECESSARY;
1065 if (skb_tailroom(skb) > 0) {
1066 copy = min(copy, skb_tailroom(skb));
1068 copy = min_t(int, copy, csk->tlshws.txleft);
1069 err = skb_add_data_nocache(sk, skb,
1070 &msg->msg_iter, copy);
1074 int i = skb_shinfo(skb)->nr_frags;
1075 struct page *page = TCP_PAGE(sk);
1076 int pg_size = PAGE_SIZE;
1077 int off = TCP_OFF(sk);
1081 pg_size = page_size(page);
1082 if (off < pg_size &&
1083 skb_can_coalesce(skb, i, page, off)) {
1088 if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
1092 if (page && off == pg_size) {
1094 TCP_PAGE(sk) = page = NULL;
1095 pg_size = PAGE_SIZE;
1099 gfp_t gfp = sk->sk_allocation;
1100 int order = cdev->send_page_order;
1103 page = alloc_pages(gfp | __GFP_COMP |
1111 page = alloc_page(gfp);
1112 pg_size = PAGE_SIZE;
1115 goto wait_for_memory;
1119 if (copy > pg_size - off)
1120 copy = pg_size - off;
1122 copy = min_t(int, copy, csk->tlshws.txleft);
1124 err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter,
1127 if (unlikely(err)) {
1128 if (!TCP_PAGE(sk)) {
1129 TCP_PAGE(sk) = page;
1134 /* Update the skb. */
1137 &skb_shinfo(skb)->frags[i - 1],
1140 skb_fill_page_desc(skb, i, page, off, copy);
1141 if (off + copy < pg_size) {
1142 /* space left keep page */
1144 TCP_PAGE(sk) = page;
1146 TCP_PAGE(sk) = NULL;
1149 TCP_OFF(sk) = off + copy;
1151 if (unlikely(skb->len == mss))
1152 tx_skb_finalize(skb);
1153 tp->write_seq += copy;
1158 csk->tlshws.txleft -= copy;
1160 if (corked(tp, flags) &&
1161 (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
1162 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
1167 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
1168 push_frames_if_head(sk);
1171 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1173 err = csk_wait_memory(cdev, sk, &timeo);
1178 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1180 chtls_tcp_push(sk, flags);
1186 __skb_unlink(skb, &csk->txq);
1187 sk->sk_wmem_queued -= skb->truesize;
1194 if (csk_conn_inline(csk))
1195 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1196 copied = sk_stream_error(sk, flags, err);
1200 int chtls_sendpage(struct sock *sk, struct page *page,
1201 int offset, size_t size, int flags)
1203 struct chtls_sock *csk;
1204 struct chtls_dev *cdev;
1205 int mss, err, copied;
1206 struct tcp_sock *tp;
1211 csk = rcu_dereference_sk_user_data(sk);
1213 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1215 err = sk_stream_wait_connect(sk, &timeo);
1216 if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1221 csk_set_flag(csk, CSK_TX_MORE_DATA);
1224 struct sk_buff *skb = skb_peek_tail(&csk->txq);
1227 if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
1228 (copy = mss - skb->len) <= 0) {
1230 if (!csk_mem_free(cdev, sk))
1231 goto wait_for_sndbuf;
1233 if (is_tls_tx(csk)) {
1234 skb = get_record_skb(sk,
1235 select_size(sk, size,
1240 skb = get_tx_skb(sk, 0);
1243 goto wait_for_memory;
1249 i = skb_shinfo(skb)->nr_frags;
1250 if (skb_can_coalesce(skb, i, page, offset)) {
1251 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1252 } else if (i < MAX_SKB_FRAGS) {
1254 skb_fill_page_desc(skb, i, page, offset, copy);
1256 tx_skb_finalize(skb);
1257 push_frames_if_head(sk);
1262 if (skb->len == mss)
1263 tx_skb_finalize(skb);
1264 skb->data_len += copy;
1265 skb->truesize += copy;
1266 sk->sk_wmem_queued += copy;
1267 tp->write_seq += copy;
1272 if (corked(tp, flags) &&
1273 (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
1274 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
1279 if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
1280 push_frames_if_head(sk);
1283 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1285 err = csk_wait_memory(cdev, sk, &timeo);
1290 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1292 chtls_tcp_push(sk, flags);
1302 if (csk_conn_inline(csk))
1303 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1304 copied = sk_stream_error(sk, flags, err);
1308 static void chtls_select_window(struct sock *sk)
1310 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1311 struct tcp_sock *tp = tcp_sk(sk);
1312 unsigned int wnd = tp->rcv_wnd;
1314 wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
1315 wnd = max_t(unsigned int, MIN_RCV_WND, wnd);
1317 if (wnd > MAX_RCV_WND)
1321 * Check if we need to grow the receive window in response to an increase in
1322 * the socket's receive buffer size. Some applications increase the buffer
1323 * size dynamically and rely on the window to grow accordingly.
1326 if (wnd > tp->rcv_wnd) {
1327 tp->rcv_wup -= wnd - tp->rcv_wnd;
1329 /* Mark the receive window as updated */
1330 csk_reset_flag(csk, CSK_UPDATE_RCV_WND);
1335 * Send RX credits through an RX_DATA_ACK CPL message. We are permitted
1336 * to return without sending the message in case we cannot allocate
1337 * an sk_buff. Returns the number of credits sent.
1339 static u32 send_rx_credits(struct chtls_sock *csk, u32 credits)
1341 struct cpl_rx_data_ack *req;
1342 struct sk_buff *skb;
1344 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1347 __skb_put(skb, sizeof(*req));
1348 req = (struct cpl_rx_data_ack *)skb->head;
1350 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
1351 INIT_TP_WR(req, csk->tid);
1352 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1354 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
1356 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1360 #define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \
1365 * Called after some received data has been read. It returns RX credits
1366 * to the HW for the amount of data processed.
1368 static void chtls_cleanup_rbuf(struct sock *sk, int copied)
1370 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1371 struct tcp_sock *tp;
1378 if (!sk_in_state(sk, CREDIT_RETURN_STATE))
1381 chtls_select_window(sk);
1383 credits = tp->copied_seq - tp->rcv_wup;
1384 if (unlikely(!credits))
1388 * For coalescing to work effectively ensure the receive window has
1389 * at least 16KB left.
1391 must_send = credits + 16384 >= tp->rcv_wnd;
1393 if (must_send || credits >= thres)
1394 tp->rcv_wup += send_rx_credits(csk, credits);
1397 static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1398 int nonblock, int flags, int *addr_len)
1400 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1401 struct chtls_hws *hws = &csk->tlshws;
1402 struct tcp_sock *tp = tcp_sk(sk);
1403 unsigned long avail;
1411 timeo = sock_rcvtimeo(sk, nonblock);
1412 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1414 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
1415 chtls_cleanup_rbuf(sk, copied);
1418 struct sk_buff *skb;
1421 if (unlikely(tp->urg_data &&
1422 tp->urg_seq == tp->copied_seq)) {
1425 if (signal_pending(current)) {
1426 copied = timeo ? sock_intr_errno(timeo) :
1431 skb = skb_peek(&sk->sk_receive_queue);
1434 if (csk->wr_credits &&
1435 skb_queue_len(&csk->txq) &&
1436 chtls_push_frames(csk, csk->wr_credits ==
1437 csk->wr_max_credits))
1438 sk->sk_write_space(sk);
1440 if (copied >= target && !sk->sk_backlog.tail)
1444 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
1445 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1446 signal_pending(current))
1452 if (sock_flag(sk, SOCK_DONE))
1455 copied = sock_error(sk);
1458 if (sk->sk_shutdown & RCV_SHUTDOWN)
1460 if (sk->sk_state == TCP_CLOSE) {
1468 if (signal_pending(current)) {
1469 copied = sock_intr_errno(timeo);
1473 if (sk->sk_backlog.tail) {
1476 chtls_cleanup_rbuf(sk, copied);
1480 if (copied >= target)
1482 chtls_cleanup_rbuf(sk, copied);
1483 sk_wait_data(sk, &timeo, NULL);
1487 skb_dst_set(skb, NULL);
1488 __skb_unlink(skb, &sk->sk_receive_queue);
1491 if (!copied && !timeo) {
1496 if (copied < target) {
1503 offset = hws->copied_seq;
1504 avail = skb->len - offset;
1508 if (unlikely(tp->urg_data)) {
1509 u32 urg_offset = tp->urg_seq - tp->copied_seq;
1511 if (urg_offset < avail) {
1514 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
1515 /* First byte is urgent, skip */
1524 if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
1533 hws->copied_seq += avail;
1535 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
1538 if ((avail + offset) >= skb->len) {
1539 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
1540 tp->copied_seq += skb->len;
1541 hws->rcvpld = skb->hdr_len;
1543 tp->copied_seq += hws->rcvpld;
1545 chtls_free_skb(sk, skb);
1547 hws->copied_seq = 0;
1548 if (copied >= target &&
1549 !skb_peek(&sk->sk_receive_queue))
1555 chtls_cleanup_rbuf(sk, copied);
1561 * Peek at data in a socket's receive buffer.
1563 static int peekmsg(struct sock *sk, struct msghdr *msg,
1564 size_t len, int nonblock, int flags)
1566 struct tcp_sock *tp = tcp_sk(sk);
1567 u32 peek_seq, offset;
1568 struct sk_buff *skb;
1570 size_t avail; /* amount of available data in current skb */
1574 timeo = sock_rcvtimeo(sk, nonblock);
1575 peek_seq = tp->copied_seq;
1578 if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) {
1581 if (signal_pending(current)) {
1582 copied = timeo ? sock_intr_errno(timeo) :
1588 skb_queue_walk(&sk->sk_receive_queue, skb) {
1589 offset = peek_seq - ULP_SKB_CB(skb)->seq;
1590 if (offset < skb->len)
1594 /* empty receive queue */
1597 if (sock_flag(sk, SOCK_DONE))
1600 copied = sock_error(sk);
1603 if (sk->sk_shutdown & RCV_SHUTDOWN)
1605 if (sk->sk_state == TCP_CLOSE) {
1613 if (signal_pending(current)) {
1614 copied = sock_intr_errno(timeo);
1618 if (sk->sk_backlog.tail) {
1619 /* Do not sleep, just process backlog. */
1623 sk_wait_data(sk, &timeo, NULL);
1626 if (unlikely(peek_seq != tp->copied_seq)) {
1627 if (net_ratelimit())
1628 pr_info("TCP(%s:%d), race in MSG_PEEK.\n",
1629 current->comm, current->pid);
1630 peek_seq = tp->copied_seq;
1635 avail = skb->len - offset;
1639 * Do we have urgent data here? We need to skip over the
1642 if (unlikely(tp->urg_data)) {
1643 u32 urg_offset = tp->urg_seq - peek_seq;
1645 if (urg_offset < avail) {
1647 * The amount of data we are preparing to copy
1648 * contains urgent data.
1650 if (!urg_offset) { /* First byte is urgent */
1651 if (!sock_flag(sk, SOCK_URGINLINE)) {
1659 /* stop short of the urgent data */
1666 * If MSG_TRUNC is specified the data is discarded.
1668 if (likely(!(flags & MSG_TRUNC)))
1669 if (skb_copy_datagram_msg(skb, offset, msg, len)) {
1684 int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1685 int nonblock, int flags, int *addr_len)
1687 struct tcp_sock *tp = tcp_sk(sk);
1688 struct chtls_sock *csk;
1689 unsigned long avail; /* amount of available data in current skb */
1693 int target; /* Read at least this many bytes */
1697 if (unlikely(flags & MSG_OOB))
1698 return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
1701 if (unlikely(flags & MSG_PEEK))
1702 return peekmsg(sk, msg, len, nonblock, flags);
1704 if (sk_can_busy_loop(sk) &&
1705 skb_queue_empty_lockless(&sk->sk_receive_queue) &&
1706 sk->sk_state == TCP_ESTABLISHED)
1707 sk_busy_loop(sk, nonblock);
1710 csk = rcu_dereference_sk_user_data(sk);
1713 return chtls_pt_recvmsg(sk, msg, len, nonblock,
1716 timeo = sock_rcvtimeo(sk, nonblock);
1717 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1719 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
1720 chtls_cleanup_rbuf(sk, copied);
1723 struct sk_buff *skb;
1726 if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) {
1729 if (signal_pending(current)) {
1730 copied = timeo ? sock_intr_errno(timeo) :
1736 skb = skb_peek(&sk->sk_receive_queue);
1740 if (csk->wr_credits &&
1741 skb_queue_len(&csk->txq) &&
1742 chtls_push_frames(csk, csk->wr_credits ==
1743 csk->wr_max_credits))
1744 sk->sk_write_space(sk);
1746 if (copied >= target && !sk->sk_backlog.tail)
1750 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
1751 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1752 signal_pending(current))
1755 if (sock_flag(sk, SOCK_DONE))
1758 copied = sock_error(sk);
1761 if (sk->sk_shutdown & RCV_SHUTDOWN)
1763 if (sk->sk_state == TCP_CLOSE) {
1771 if (signal_pending(current)) {
1772 copied = sock_intr_errno(timeo);
1777 if (sk->sk_backlog.tail) {
1780 chtls_cleanup_rbuf(sk, copied);
1784 if (copied >= target)
1786 chtls_cleanup_rbuf(sk, copied);
1787 sk_wait_data(sk, &timeo, NULL);
1792 chtls_kfree_skb(sk, skb);
1793 if (!copied && !timeo) {
1798 if (copied < target)
1804 offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
1805 avail = skb->len - offset;
1809 if (unlikely(tp->urg_data)) {
1810 u32 urg_offset = tp->urg_seq - tp->copied_seq;
1812 if (urg_offset < avail) {
1815 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
1825 if (likely(!(flags & MSG_TRUNC))) {
1826 if (skb_copy_datagram_msg(skb, offset,
1835 tp->copied_seq += avail;
1840 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
1843 if (avail + offset >= skb->len) {
1844 chtls_free_skb(sk, skb);
1847 if (copied >= target &&
1848 !skb_peek(&sk->sk_receive_queue))
1854 chtls_cleanup_rbuf(sk, copied);