1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
6 #include <linux/errno.h>
7 #include <linux/types.h>
9 #include <linux/scatterlist.h>
10 #include <linux/highmem.h>
13 #include <rdma/iw_cm.h>
14 #include <rdma/ib_verbs.h>
15 #include <rdma/ib_user_verbs.h>
18 #include "siw_verbs.h"
21 #define MAX_HDR_INLINE \
22 (((uint32_t)(sizeof(struct siw_rreq_pkt) - \
23 sizeof(struct iwarp_send))) & 0xF8)
25 static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
27 struct siw_pbl *pbl = mem->pbl;
28 u64 offset = addr - mem->va;
29 u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
32 return virt_to_page(paddr);
38 * Copy short payload at provided destination payload address
40 static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
42 struct siw_wqe *wqe = &c_tx->wqe_active;
43 struct siw_sge *sge = &wqe->sqe.sge[0];
44 u32 bytes = sge->length;
46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1)
47 return MAX_HDR_INLINE + 1;
52 if (tx_flags(wqe) & SIW_WQE_INLINE) {
53 memcpy((void *)paddr, &wqe->sqe.sge[1], bytes);
55 struct siw_mem *mem = wqe->mem[0];
58 /* Kernel client using kva */
59 memcpy((void *)paddr, (void *)sge->laddr, bytes);
60 } else if (c_tx->in_syscall) {
61 if (copy_from_user((void *)paddr,
62 (const void __user *)sge->laddr,
66 unsigned int off = sge->laddr & ~PAGE_MASK;
72 p = siw_get_upage(mem->umem, sge->laddr);
74 p = siw_get_pblpage(mem, sge->laddr, &pbl_idx);
79 buffer = kmap_atomic(p);
81 if (likely(PAGE_SIZE - off >= bytes)) {
82 memcpy((void *)paddr, buffer + off, bytes);
83 kunmap_atomic(buffer);
85 unsigned long part = bytes - (PAGE_SIZE - off);
87 memcpy((void *)paddr, buffer + off, part);
88 kunmap_atomic(buffer);
91 p = siw_get_upage(mem->umem,
94 p = siw_get_pblpage(mem,
100 buffer = kmap_atomic(p);
101 memcpy((void *)(paddr + part), buffer,
103 kunmap_atomic(buffer);
110 #define PKT_FRAGMENTED 1
111 #define PKT_COMPLETE 0
114 * siw_qp_prepare_tx()
116 * Prepare tx state for sending out one fpdu. Builds complete pkt
117 * if no user data or only immediate data are present.
119 * returns PKT_COMPLETE if complete pkt built, PKT_FRAGMENTED otherwise.
121 static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
123 struct siw_wqe *wqe = &c_tx->wqe_active;
127 switch (tx_type(wqe)) {
129 case SIW_OP_READ_LOCAL_INV:
130 memcpy(&c_tx->pkt.ctrl,
131 &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl,
132 sizeof(struct iwarp_ctrl));
134 c_tx->pkt.rreq.rsvd = 0;
135 c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
136 c_tx->pkt.rreq.ddp_msn =
137 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]);
138 c_tx->pkt.rreq.ddp_mo = 0;
139 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey);
140 c_tx->pkt.rreq.sink_to =
141 cpu_to_be64(wqe->sqe.sge[0].laddr);
142 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey);
143 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr);
144 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length);
146 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rreq);
147 crc = (char *)&c_tx->pkt.rreq_pkt.crc;
151 if (tx_flags(wqe) & SIW_WQE_SOLICITED)
152 memcpy(&c_tx->pkt.ctrl,
153 &iwarp_pktinfo[RDMAP_SEND_SE].ctrl,
154 sizeof(struct iwarp_ctrl));
156 memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND].ctrl,
157 sizeof(struct iwarp_ctrl));
159 c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
160 c_tx->pkt.send.ddp_msn =
161 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
162 c_tx->pkt.send.ddp_mo = 0;
164 c_tx->pkt.send_inv.inval_stag = 0;
166 c_tx->ctrl_len = sizeof(struct iwarp_send);
168 crc = (char *)&c_tx->pkt.send_pkt.crc;
169 data = siw_try_1seg(c_tx, (u64)crc);
172 case SIW_OP_SEND_REMOTE_INV:
173 if (tx_flags(wqe) & SIW_WQE_SOLICITED)
174 memcpy(&c_tx->pkt.ctrl,
175 &iwarp_pktinfo[RDMAP_SEND_SE_INVAL].ctrl,
176 sizeof(struct iwarp_ctrl));
178 memcpy(&c_tx->pkt.ctrl,
179 &iwarp_pktinfo[RDMAP_SEND_INVAL].ctrl,
180 sizeof(struct iwarp_ctrl));
182 c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
183 c_tx->pkt.send.ddp_msn =
184 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
185 c_tx->pkt.send.ddp_mo = 0;
187 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey);
189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
191 crc = (char *)&c_tx->pkt.send_pkt.crc;
192 data = siw_try_1seg(c_tx, (u64)crc);
196 memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl,
197 sizeof(struct iwarp_ctrl));
199 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey);
200 c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr);
201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
203 crc = (char *)&c_tx->pkt.write_pkt.crc;
204 data = siw_try_1seg(c_tx, (u64)crc);
207 case SIW_OP_READ_RESPONSE:
208 memcpy(&c_tx->pkt.ctrl,
209 &iwarp_pktinfo[RDMAP_RDMA_READ_RESP].ctrl,
210 sizeof(struct iwarp_ctrl));
213 c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey);
214 c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr);
216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
218 crc = (char *)&c_tx->pkt.write_pkt.crc;
219 data = siw_try_1seg(c_tx, (u64)crc);
223 siw_dbg_qp(tx_qp(c_tx), "stale wqe type %d\n", tx_type(wqe));
226 if (unlikely(data < 0))
231 if (data <= MAX_HDR_INLINE) {
233 wqe->processed = data;
235 c_tx->pkt.ctrl.mpa_len =
236 htons(c_tx->ctrl_len + data - MPA_HDR_SIZE);
238 /* Add pad, if needed */
239 data += -(int)data & 0x3;
240 /* advance CRC location after payload */
242 c_tx->ctrl_len += data;
244 if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
245 c_tx->pkt.c_untagged.ddp_mo = 0;
247 c_tx->pkt.c_tagged.ddp_to =
248 cpu_to_be64(wqe->sqe.raddr);
253 * Do complete CRC if enabled and short packet
255 if (c_tx->mpa_crc_hd) {
256 crypto_shash_init(c_tx->mpa_crc_hd);
257 if (crypto_shash_update(c_tx->mpa_crc_hd,
261 crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)crc);
263 c_tx->ctrl_len += MPA_CRC_SIZE;
267 c_tx->ctrl_len += MPA_CRC_SIZE;
273 * Allow direct sending out of user buffer if WR is non signalled
274 * and payload is over threshold.
275 * Per RDMA verbs, the application should not change the send buffer
276 * until the work completed. In iWarp, work completion is only
277 * local delivery to TCP. TCP may reuse the buffer for
278 * retransmission. Changing unsent data also breaks the CRC,
281 if (c_tx->zcopy_tx && wqe->bytes >= SENDPAGE_THRESH &&
282 !(tx_flags(wqe) & SIW_WQE_SIGNALLED))
283 c_tx->use_sendpage = 1;
285 c_tx->use_sendpage = 0;
287 return PKT_FRAGMENTED;
291 * Send out one complete control type FPDU, or header of FPDU carrying
292 * data. Used for fixed sized packets like Read.Requests or zero length
293 * SENDs, WRITEs, READ.Responses, or header only.
295 static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
298 struct msghdr msg = { .msg_flags = flags };
299 struct kvec iov = { .iov_base =
300 (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent,
301 .iov_len = c_tx->ctrl_len - c_tx->ctrl_sent };
303 int rv = kernel_sendmsg(s, &msg, &iov, 1,
304 c_tx->ctrl_len - c_tx->ctrl_sent);
307 c_tx->ctrl_sent += rv;
309 if (c_tx->ctrl_sent == c_tx->ctrl_len)
318 * 0copy TCP transmit interface: Use do_tcp_sendpages.
320 * Using sendpage to push page by page appears to be less efficient
321 * than using sendmsg, even if data are copied.
323 * A general performance limitation might be the extra four bytes
324 * trailer checksum segment to be pushed after user data.
326 static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
329 struct sock *sk = s->sk;
330 int i = 0, rv = 0, sent = 0,
331 flags = MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST;
334 size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
336 if (size + offset <= PAGE_SIZE)
337 flags = MSG_MORE | MSG_DONTWAIT;
339 tcp_rate_check_app_limited(sk);
342 rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags);
355 if (rv == -EAGAIN || rv == 0)
367 * Pushes list of pages to TCP socket. If pages from multiple
368 * SGE's, all referenced pages of each SGE are pushed in one
371 static int siw_0copy_tx(struct socket *s, struct page **page,
372 struct siw_sge *sge, unsigned int offset,
375 int i = 0, sent = 0, rv;
376 int sge_bytes = min(sge->length - offset, size);
378 offset = (sge->laddr + offset) & ~PAGE_MASK;
380 while (sent != size) {
381 rv = siw_tcp_sendpages(s, &page[i], offset, sge_bytes);
384 if (size == sent || sge_bytes > rv)
387 i += PAGE_ALIGN(sge_bytes + offset) >> PAGE_SHIFT;
389 sge_bytes = min(sge->length, size - sent);
390 offset = sge->laddr & ~PAGE_MASK;
399 #define MAX_TRAILER (MPA_CRC_SIZE + 4)
401 static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps)
407 while (num_maps-- > 0) {
414 * siw_tx_hdt() tries to push a complete packet to TCP where all
415 * packet fragments are referenced by the elements of one iovec.
416 * For the data portion, each involved page must be referenced by
417 * one extra element. All sge's data can be non-aligned to page
418 * boundaries. Two more elements are referencing iWARP header
420 * MAX_ARRAY = 64KB/PAGE_SIZE + 1 + (2 * (SIW_MAX_SGE - 1) + HDR + TRL
422 #define MAX_ARRAY ((0xffff / PAGE_SIZE) + 1 + (2 * (SIW_MAX_SGE - 1) + 2))
425 * Write out iov referencing hdr, data and trailer of current FPDU.
426 * Update transmit state dependent on write return status
428 static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
430 struct siw_wqe *wqe = &c_tx->wqe_active;
431 struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx];
432 struct kvec iov[MAX_ARRAY];
433 struct page *page_array[MAX_ARRAY];
434 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
436 int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv;
437 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
438 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
439 pbl_idx = c_tx->pbl_idx;
441 if (c_tx->state == SIW_SEND_HDR) {
442 if (c_tx->use_sendpage) {
443 rv = siw_tx_ctrl(c_tx, s, MSG_DONTWAIT | MSG_MORE);
447 c_tx->state = SIW_SEND_DATA;
450 (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent;
451 iov[0].iov_len = hdr_len =
452 c_tx->ctrl_len - c_tx->ctrl_sent;
457 wqe->processed += data_len;
459 while (data_len) { /* walk the list of SGE's */
460 unsigned int sge_len = min(sge->length - sge_off, data_len);
461 unsigned int fp_off = (sge->laddr + sge_off) & ~PAGE_MASK;
464 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
465 mem = wqe->mem[sge_idx];
471 if (is_kva && !c_tx->use_sendpage) {
473 * tx from kernel virtual address: either inline data
474 * or memory region with assigned kernel buffer
476 iov[seg].iov_base = (void *)(sge->laddr + sge_off);
477 iov[seg].iov_len = sge_len;
480 crypto_shash_update(c_tx->mpa_crc_hd,
490 size_t plen = min((int)PAGE_SIZE - fp_off, sge_len);
497 mem, sge->laddr + sge_off,
500 p = siw_get_upage(mem->umem,
501 sge->laddr + sge_off);
505 if (!c_tx->use_sendpage && seg) {
506 siw_unmap_pages(page_array,
509 wqe->processed -= c_tx->bytes_unsent;
515 if (!c_tx->use_sendpage) {
516 iov[seg].iov_base = kmap(p) + fp_off;
517 iov[seg].iov_len = plen;
526 page_address(p) + fp_off,
529 u64 pa = ((sge->laddr + sge_off) & PAGE_MASK);
531 page_array[seg] = virt_to_page(pa);
535 (void *)(sge->laddr + sge_off),
544 if (++seg > (int)MAX_ARRAY) {
545 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
546 if (!is_kva && !c_tx->use_sendpage) {
547 siw_unmap_pages(page_array, hdr_len,
550 wqe->processed -= c_tx->bytes_unsent;
556 /* Update SGE variables at end of SGE */
557 if (sge_off == sge->length &&
558 (data_len != 0 || wqe->processed < wqe->bytes)) {
565 if (likely(c_tx->state != SIW_SEND_TRAILER)) {
566 iov[seg].iov_base = &c_tx->trailer.pad[4 - c_tx->pad];
567 iov[seg].iov_len = trl_len = MAX_TRAILER - (4 - c_tx->pad);
569 iov[seg].iov_base = &c_tx->trailer.pad[c_tx->ctrl_sent];
570 iov[seg].iov_len = trl_len = MAX_TRAILER - c_tx->ctrl_sent;
574 *(u32 *)c_tx->trailer.pad = 0;
576 crypto_shash_update(c_tx->mpa_crc_hd,
577 (u8 *)&c_tx->trailer.crc - c_tx->pad,
580 if (!c_tx->mpa_crc_hd)
581 c_tx->trailer.crc = 0;
583 crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)&c_tx->trailer.crc);
585 data_len = c_tx->bytes_unsent;
587 if (c_tx->use_sendpage) {
588 rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx],
589 c_tx->sge_off, data_len);
590 if (rv == data_len) {
591 rv = kernel_sendmsg(s, &msg, &iov[seg], 1, trl_len);
598 rv = kernel_sendmsg(s, &msg, iov, seg + 1,
599 hdr_len + data_len + trl_len);
601 siw_unmap_pages(page_array, hdr_len, seg);
603 if (rv < (int)hdr_len) {
604 /* Not even complete hdr pushed or negative rv */
605 wqe->processed -= data_len;
607 c_tx->ctrl_sent += rv;
614 if (rv >= (int)data_len) {
615 /* all user data pushed to TCP or no data to push */
616 if (data_len > 0 && wqe->processed < wqe->bytes) {
617 /* Save the current state for next tx */
618 c_tx->sge_idx = sge_idx;
619 c_tx->sge_off = sge_off;
620 c_tx->pbl_idx = pbl_idx;
624 if (rv == trl_len) /* all pushed */
627 c_tx->state = SIW_SEND_TRAILER;
628 c_tx->ctrl_len = MAX_TRAILER;
629 c_tx->ctrl_sent = rv + 4 - c_tx->pad;
630 c_tx->bytes_unsent = 0;
634 } else if (data_len > 0) {
635 /* Maybe some user data pushed to TCP */
636 c_tx->state = SIW_SEND_DATA;
637 wqe->processed -= data_len - rv;
641 * Some bytes out. Recompute tx state based
642 * on old state and bytes pushed
644 unsigned int sge_unsent;
646 c_tx->bytes_unsent -= rv;
647 sge = &wqe->sqe.sge[c_tx->sge_idx];
648 sge_unsent = sge->length - c_tx->sge_off;
650 while (sge_unsent <= rv) {
655 sge_unsent = sge->length;
667 static void siw_update_tcpseg(struct siw_iwarp_tx *c_tx,
670 struct tcp_sock *tp = tcp_sk(s->sk);
673 if (c_tx->gso_seg_limit == 0)
674 c_tx->tcp_seglen = tp->mss_cache * tp->gso_segs;
678 min_t(u16, c_tx->gso_seg_limit, tp->gso_segs);
680 c_tx->tcp_seglen = tp->mss_cache;
682 /* Loopback may give odd numbers */
683 c_tx->tcp_seglen &= 0xfffffff8;
689 * Prepares transmit context to send out one FPDU if FPDU will contain
690 * user data and user data are not immediate data.
691 * Computes maximum FPDU length to fill up TCP MSS if possible.
693 * @qp: QP from which to transmit
694 * @wqe: Current WQE causing transmission
696 * TODO: Take into account real available sendspace on socket
697 * to avoid header misalignment due to send pausing within
700 static void siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe)
702 struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
706 iwarp_pktinfo[__rdmap_get_opcode(&c_tx->pkt.ctrl)].hdr_len;
710 * Update target buffer offset if any
712 if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
713 /* Untagged message */
714 c_tx->pkt.c_untagged.ddp_mo = cpu_to_be32(wqe->processed);
715 else /* Tagged message */
716 c_tx->pkt.c_tagged.ddp_to =
717 cpu_to_be64(wqe->sqe.raddr + wqe->processed);
719 data_len = wqe->bytes - wqe->processed;
720 if (data_len + c_tx->ctrl_len + MPA_CRC_SIZE > c_tx->tcp_seglen) {
721 /* Trim DDP payload to fit into current TCP segment */
722 data_len = c_tx->tcp_seglen - (c_tx->ctrl_len + MPA_CRC_SIZE);
723 c_tx->pkt.ctrl.ddp_rdmap_ctrl &= ~DDP_FLAG_LAST;
726 c_tx->pkt.ctrl.ddp_rdmap_ctrl |= DDP_FLAG_LAST;
727 c_tx->pad = -data_len & 0x3;
729 c_tx->bytes_unsent = data_len;
731 c_tx->pkt.ctrl.mpa_len =
732 htons(c_tx->ctrl_len + data_len - MPA_HDR_SIZE);
735 * Init MPA CRC computation
737 if (c_tx->mpa_crc_hd) {
738 crypto_shash_init(c_tx->mpa_crc_hd);
739 crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt,
748 * Check permissions for a list of SGE's (SGL).
749 * A successful check will have all memory referenced
750 * for transmission resolved and assigned to the WQE.
752 * @pd: Protection Domain SGL should belong to
753 * @wqe: WQE to be checked
754 * @perms: requested access permissions
758 static int siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe,
759 enum ib_access_flags perms)
761 struct siw_sge *sge = &wqe->sqe.sge[0];
762 int i, len, num_sge = wqe->sqe.num_sge;
764 if (unlikely(num_sge > SIW_MAX_SGE))
767 for (i = 0, len = 0; num_sge; num_sge--, i++, sge++) {
769 * rdma verbs: do not check stag for a zero length sge
772 int rv = siw_check_sge(pd, sge, &wqe->mem[i], perms, 0,
775 if (unlikely(rv != E_ACCESS_OK))
784 * siw_qp_sq_proc_tx()
786 * Process one WQE which needs transmission on the wire.
788 static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
790 struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
791 struct socket *s = qp->attrs.sk;
792 int rv = 0, burst_len = qp->tx_ctx.burst;
793 enum rdmap_ecode ecode = RDMAP_ECODE_CATASTROPHIC_STREAM;
795 if (unlikely(wqe->wr_status == SIW_WR_IDLE))
799 burst_len = SQ_USER_MAXBURST;
801 if (wqe->wr_status == SIW_WR_QUEUED) {
802 if (!(wqe->sqe.flags & SIW_WQE_INLINE)) {
803 if (tx_type(wqe) == SIW_OP_READ_RESPONSE)
804 wqe->sqe.num_sge = 1;
806 if (tx_type(wqe) != SIW_OP_READ &&
807 tx_type(wqe) != SIW_OP_READ_LOCAL_INV) {
809 * Reference memory to be tx'd w/o checking
810 * access for LOCAL_READ permission, since
811 * not defined in RDMA core.
813 rv = siw_check_sgl_tx(qp->pd, wqe, 0);
816 SIW_OP_READ_RESPONSE)
817 ecode = siw_rdmap_error(-rv);
826 wqe->bytes = wqe->sqe.sge[0].length;
827 if (!qp->kernel_verbs) {
828 if (wqe->bytes > SIW_MAX_INLINE) {
832 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
835 wqe->wr_status = SIW_WR_INPROGRESS;
838 siw_update_tcpseg(c_tx, s);
840 rv = siw_qp_prepare_tx(c_tx);
841 if (rv == PKT_FRAGMENTED) {
842 c_tx->state = SIW_SEND_HDR;
843 siw_prepare_fpdu(qp, wqe);
844 } else if (rv == PKT_COMPLETE) {
845 c_tx->state = SIW_SEND_SHORT_FPDU;
852 siw_dbg_qp(qp, "wr type %d, state %d, data %u, sent %u, id %llx\n",
853 tx_type(wqe), wqe->wr_status, wqe->bytes, wqe->processed,
856 if (--burst_len == 0) {
860 if (c_tx->state == SIW_SEND_SHORT_FPDU) {
861 enum siw_opcode tx_type = tx_type(wqe);
862 unsigned int msg_flags;
864 if (siw_sq_empty(qp) || !siw_tcp_nagle || burst_len == 1)
866 * End current TCP segment, if SQ runs empty,
867 * or siw_tcp_nagle is not set, or we bail out
868 * soon due to no burst credit left.
870 msg_flags = MSG_DONTWAIT;
872 msg_flags = MSG_DONTWAIT | MSG_MORE;
874 rv = siw_tx_ctrl(c_tx, s, msg_flags);
876 if (!rv && tx_type != SIW_OP_READ &&
877 tx_type != SIW_OP_READ_LOCAL_INV)
878 wqe->processed = wqe->bytes;
883 rv = siw_tx_hdt(c_tx, s);
887 * One segment sent. Processing completed if last
888 * segment, Do next segment otherwise.
890 if (unlikely(c_tx->tx_suspend)) {
892 * Verbs, 6.4.: Try stopping sending after a full
893 * DDP segment if the connection goes down
894 * (== peer halfclose)
899 if (c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST) {
900 siw_dbg_qp(qp, "WQE completed\n");
903 c_tx->state = SIW_SEND_HDR;
905 siw_update_tcpseg(c_tx, s);
907 siw_prepare_fpdu(qp, wqe);
911 qp->tx_ctx.burst = burst_len;
915 if (ecode != RDMAP_ECODE_CATASTROPHIC_STREAM)
916 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
917 RDMAP_ETYPE_REMOTE_PROTECTION, ecode, 1);
919 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
920 RDMAP_ETYPE_CATASTROPHIC,
921 RDMAP_ECODE_UNSPECIFIED, 1);
925 static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
927 struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr;
928 struct siw_device *sdev = to_siw_dev(pd->device);
929 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
932 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
934 if (unlikely(!mem || !base_mr)) {
935 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
938 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
939 pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
943 if (unlikely(mem->pd != pd)) {
944 pr_warn("siw: fastreg: PD mismatch\n");
948 if (unlikely(mem->stag_valid)) {
949 pr_warn("siw: fastreg: STag 0x%08x already valid\n", sqe->rkey);
953 /* Refresh STag since user may have changed key part */
954 mem->stag = sqe->rkey;
955 mem->perms = sqe->access;
957 siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n",
958 mem->va, base_mr->iova);
959 mem->va = base_mr->iova;
966 static int siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe)
970 switch (tx_type(wqe)) {
972 rv = siw_fastreg_mr(qp->pd, &wqe->sqe);
975 case SIW_OP_INVAL_STAG:
976 rv = siw_invalidate_stag(qp->pd, wqe->sqe.rkey);
986 * siw_qp_sq_process()
988 * Core TX path routine for RDMAP/DDP/MPA using a TCP kernel socket.
989 * Sends RDMAP payload for the current SQ WR @wqe of @qp in one or more
990 * MPA FPDUs, each containing a DDP segment.
992 * SQ processing may occur in user context as a result of posting
993 * new WQE's or from siw_sq_work_handler() context. Processing in
994 * user context is limited to non-kernel verbs users.
996 * SQ processing may get paused anytime, possibly in the middle of a WR
997 * or FPDU, if insufficient send space is available. SQ processing
998 * gets resumed from siw_sq_work_handler(), if send space becomes
1001 * Must be called with the QP state read-locked.
1004 * An outbound RREQ can be satisfied by the corresponding RRESP
1005 * _before_ it gets assigned to the ORQ. This happens regularly
1006 * in RDMA READ via loopback case. Since both outbound RREQ and
1007 * inbound RRESP can be handled by the same CPU, locking the ORQ
1008 * is dead-lock prone and thus not an option. With that, the
1009 * RREQ gets assigned to the ORQ _before_ being sent - see
1010 * siw_activate_tx() - and pulled back in case of send failure.
1012 int siw_qp_sq_process(struct siw_qp *qp)
1014 struct siw_wqe *wqe = tx_wqe(qp);
1015 enum siw_opcode tx_type;
1016 unsigned long flags;
1019 siw_dbg_qp(qp, "enter for type %d\n", tx_type(wqe));
1023 * Stop QP processing if SQ state changed
1025 if (unlikely(qp->tx_ctx.tx_suspend)) {
1026 siw_dbg_qp(qp, "tx suspended\n");
1029 tx_type = tx_type(wqe);
1031 if (tx_type <= SIW_OP_READ_RESPONSE)
1032 rv = siw_qp_sq_proc_tx(qp, wqe);
1034 rv = siw_qp_sq_proc_local(qp, wqe);
1038 * WQE processing done
1042 case SIW_OP_SEND_REMOTE_INV:
1044 siw_wqe_put_mem(wqe, tx_type);
1047 case SIW_OP_INVAL_STAG:
1049 if (tx_flags(wqe) & SIW_WQE_SIGNALLED)
1050 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
1055 case SIW_OP_READ_LOCAL_INV:
1057 * already enqueued to ORQ queue
1061 case SIW_OP_READ_RESPONSE:
1062 siw_wqe_put_mem(wqe, tx_type);
1066 WARN(1, "undefined WQE type %d\n", tx_type);
1071 spin_lock_irqsave(&qp->sq_lock, flags);
1072 wqe->wr_status = SIW_WR_IDLE;
1073 rv = siw_activate_tx(qp);
1074 spin_unlock_irqrestore(&qp->sq_lock, flags);
1081 } else if (rv == -EAGAIN) {
1082 siw_dbg_qp(qp, "sq paused: hd/tr %d of %d, data %d\n",
1083 qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len,
1084 qp->tx_ctx.bytes_unsent);
1087 } else if (rv == -EINPROGRESS) {
1088 rv = siw_sq_start(qp);
1092 * WQE processing failed.
1094 * o It turns any WQE into a signalled WQE.
1095 * o Local catastrophic error must be surfaced
1096 * o QP must be moved into Terminate state: done by code
1097 * doing socket state change processing
1099 * o TODO: Termination message must be sent.
1100 * o TODO: Implement more precise work completion errors,
1101 * see enum ib_wc_status in ib_verbs.h
1103 siw_dbg_qp(qp, "wqe type %d processing failed: %d\n",
1106 spin_lock_irqsave(&qp->sq_lock, flags);
1108 * RREQ may have already been completed by inbound RRESP!
1110 if (tx_type == SIW_OP_READ ||
1111 tx_type == SIW_OP_READ_LOCAL_INV) {
1112 /* Cleanup pending entry in ORQ */
1114 qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
1116 spin_unlock_irqrestore(&qp->sq_lock, flags);
1118 * immediately suspends further TX processing
1120 if (!qp->tx_ctx.tx_suspend)
1121 siw_qp_cm_drop(qp, 0);
1125 case SIW_OP_SEND_REMOTE_INV:
1126 case SIW_OP_SEND_WITH_IMM:
1129 case SIW_OP_READ_LOCAL_INV:
1130 siw_wqe_put_mem(wqe, tx_type);
1133 case SIW_OP_INVAL_STAG:
1135 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
1136 SIW_WC_LOC_QP_OP_ERR);
1138 siw_qp_event(qp, IB_EVENT_QP_FATAL);
1142 case SIW_OP_READ_RESPONSE:
1143 siw_dbg_qp(qp, "proc. read.response failed: %d\n", rv);
1145 siw_qp_event(qp, IB_EVENT_QP_REQ_ERR);
1147 siw_wqe_put_mem(wqe, SIW_OP_READ_RESPONSE);
1152 WARN(1, "undefined WQE type %d\n", tx_type);
1155 wqe->wr_status = SIW_WR_IDLE;
1161 static void siw_sq_resume(struct siw_qp *qp)
1163 if (down_read_trylock(&qp->state_lock)) {
1164 if (likely(qp->attrs.state == SIW_QP_STATE_RTS &&
1165 !qp->tx_ctx.tx_suspend)) {
1166 int rv = siw_qp_sq_process(qp);
1168 up_read(&qp->state_lock);
1170 if (unlikely(rv < 0)) {
1171 siw_dbg_qp(qp, "SQ task failed: err %d\n", rv);
1173 if (!qp->tx_ctx.tx_suspend)
1174 siw_qp_cm_drop(qp, 0);
1177 up_read(&qp->state_lock);
1180 siw_dbg_qp(qp, "Resume SQ while QP locked\n");
1186 struct llist_head active;
1187 wait_queue_head_t waiting;
1190 static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
1192 void siw_stop_tx_thread(int nr_cpu)
1194 kthread_stop(siw_tx_thread[nr_cpu]);
1195 wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
1198 int siw_run_sq(void *data)
1200 const int nr_cpu = (unsigned int)(long)data;
1201 struct llist_node *active;
1203 struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
1205 init_llist_head(&tx_task->active);
1206 init_waitqueue_head(&tx_task->waiting);
1209 struct llist_node *fifo_list = NULL;
1211 wait_event_interruptible(tx_task->waiting,
1212 !llist_empty(&tx_task->active) ||
1213 kthread_should_stop());
1215 if (kthread_should_stop())
1218 active = llist_del_all(&tx_task->active);
1220 * llist_del_all returns a list with newest entry first.
1221 * Re-order list for fairness among QP's.
1224 struct llist_node *tmp = active;
1226 active = llist_next(active);
1227 tmp->next = fifo_list;
1231 qp = container_of(fifo_list, struct siw_qp, tx_list);
1232 fifo_list = llist_next(fifo_list);
1233 qp->tx_list.next = NULL;
1238 active = llist_del_all(&tx_task->active);
1240 llist_for_each_entry(qp, active, tx_list) {
1241 qp->tx_list.next = NULL;
1248 int siw_sq_start(struct siw_qp *qp)
1250 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE)
1253 if (unlikely(!cpu_online(qp->tx_cpu))) {
1254 siw_put_tx_cpu(qp->tx_cpu);
1255 qp->tx_cpu = siw_get_tx_cpu(qp->sdev);
1256 if (qp->tx_cpu < 0) {
1257 pr_warn("siw: no tx cpu available\n");
1264 llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
1266 wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);