2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/uverbs_ioctl.h>
38 static int db_delay_usecs = 1;
39 module_param(db_delay_usecs, int, 0644);
40 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
42 static int ocqp_support = 1;
43 module_param(ocqp_support, int, 0644);
44 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
46 int db_fc_threshold = 1000;
47 module_param(db_fc_threshold, int, 0644);
48 MODULE_PARM_DESC(db_fc_threshold,
49 "QP count/threshold that triggers"
50 " automatic db flow control mode (default = 1000)");
52 int db_coalescing_threshold;
53 module_param(db_coalescing_threshold, int, 0644);
54 MODULE_PARM_DESC(db_coalescing_threshold,
55 "QP count/threshold that triggers"
56 " disabling db coalescing (default = 0)");
58 static int max_fr_immd = T4_MAX_FR_IMMD;
59 module_param(max_fr_immd, int, 0644);
60 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
62 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
66 xa_lock_irq(&dev->qps);
67 if (ird <= dev->avail_ird)
68 dev->avail_ird -= ird;
71 xa_unlock_irq(&dev->qps);
74 dev_warn(&dev->rdev.lldi.pdev->dev,
75 "device IRD resources exhausted\n");
80 static void free_ird(struct c4iw_dev *dev, int ird)
82 xa_lock_irq(&dev->qps);
83 dev->avail_ird += ird;
84 xa_unlock_irq(&dev->qps);
87 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
90 spin_lock_irqsave(&qhp->lock, flag);
91 qhp->attr.state = state;
92 spin_unlock_irqrestore(&qhp->lock, flag);
95 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
103 dma_unmap_addr(sq, mapping));
106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
108 if (t4_sq_onchip(sq))
109 dealloc_oc_sq(rdev, sq);
111 dealloc_host_sq(rdev, sq);
114 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
116 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
118 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
121 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
122 rdev->lldi.vr->ocq.start;
123 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
124 rdev->lldi.vr->ocq.start);
125 sq->flags |= T4_SQ_ONCHIP;
129 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
131 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
132 &(sq->dma_addr), GFP_KERNEL);
135 sq->phys_addr = virt_to_phys(sq->queue);
136 dma_unmap_addr_set(sq, mapping, sq->dma_addr);
140 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
144 ret = alloc_oc_sq(rdev, sq);
146 ret = alloc_host_sq(rdev, sq);
150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
151 struct c4iw_dev_ucontext *uctx, int has_rq)
154 * uP clears EQ contexts when the connection exits rdma mode,
155 * so no need to post a RESET WR for these EQs.
157 dealloc_sq(rdev, &wq->sq);
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
162 dma_free_coherent(&rdev->lldi.pdev->dev,
163 wq->rq.memsize, wq->rq.queue,
164 dma_unmap_addr(&wq->rq, mapping));
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
173 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
174 * then this is a user mapping so compute the page-aligned physical address
177 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
178 enum cxgb4_bar2_qtype qtype,
179 unsigned int *pbar2_qid, u64 *pbar2_pa)
184 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
186 &bar2_qoffset, pbar2_qid);
191 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
193 if (is_t4(rdev->lldi.adapter_type))
196 return rdev->bar2_kva + bar2_qoffset;
199 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
200 struct t4_cq *rcq, struct t4_cq *scq,
201 struct c4iw_dev_ucontext *uctx,
202 struct c4iw_wr_wait *wr_waitp,
205 int user = (uctx != &rdev->uctx);
206 struct fw_ri_res_wr *res_wr;
207 struct fw_ri_res *res;
213 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
218 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
226 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
230 goto free_rq_qid;//FIXME
234 wq->rq.sw_rq = kcalloc(wq->rq.size,
235 sizeof(*wq->rq.sw_rq),
246 * RQT must be a power of 2 and at least 16 deep.
249 roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
250 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
251 if (!wq->rq.rqt_hwaddr) {
257 ret = alloc_sq(rdev, &wq->sq, user);
260 memset(wq->sq.queue, 0, wq->sq.memsize);
261 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
264 wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
272 pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
274 (unsigned long long)virt_to_phys(wq->sq.queue),
276 (unsigned long long)virt_to_phys(wq->rq.queue));
277 memset(wq->rq.queue, 0, wq->rq.memsize);
278 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
281 wq->db = rdev->lldi.db_reg;
283 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
284 CXGB4_BAR2_QTYPE_EGRESS,
286 user ? &wq->sq.bar2_pa : NULL);
288 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
289 CXGB4_BAR2_QTYPE_EGRESS,
291 user ? &wq->rq.bar2_pa : NULL);
294 * User mode must have bar2 access.
296 if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
297 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
298 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
305 /* build fw_ri_res_wr */
306 wr_len = sizeof *res_wr + 2 * sizeof *res;
308 wr_len += sizeof(*res);
309 skb = alloc_skb(wr_len, GFP_KERNEL);
314 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
316 res_wr = __skb_put_zero(skb, wr_len);
317 res_wr->op_nres = cpu_to_be32(
318 FW_WR_OP_V(FW_RI_RES_WR) |
319 FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
321 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
322 res_wr->cookie = (uintptr_t)wr_waitp;
324 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
325 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
328 * eqsize is the number of 64B entries plus the status page size.
330 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
331 rdev->hw_queue.t4_eq_status_entries;
333 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
334 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
335 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
336 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
337 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
338 FW_RI_RES_WR_IQID_V(scq->cqid));
339 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
340 FW_RI_RES_WR_DCAEN_V(0) |
341 FW_RI_RES_WR_DCACPU_V(0) |
342 FW_RI_RES_WR_FBMIN_V(2) |
343 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
344 FW_RI_RES_WR_FBMAX_V(3)) |
345 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
346 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
347 FW_RI_RES_WR_EQSIZE_V(eqsize));
348 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
349 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
353 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
354 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
357 * eqsize is the number of 64B entries plus the status page size
359 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
360 rdev->hw_queue.t4_eq_status_entries;
361 res->u.sqrq.fetchszm_to_iqid =
362 /* no host cidx updates */
363 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
364 /* don't keep in chip cache */
365 FW_RI_RES_WR_CPRIO_V(0) |
366 /* set by uP at ri_init time */
367 FW_RI_RES_WR_PCIECHN_V(0) |
368 FW_RI_RES_WR_IQID_V(rcq->cqid));
369 res->u.sqrq.dcaen_to_eqsize =
370 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
371 FW_RI_RES_WR_DCACPU_V(0) |
372 FW_RI_RES_WR_FBMIN_V(2) |
373 FW_RI_RES_WR_FBMAX_V(3) |
374 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
375 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
376 FW_RI_RES_WR_EQSIZE_V(eqsize));
377 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
378 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
381 c4iw_init_wr_wait(wr_waitp);
382 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
386 pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
387 wq->sq.qid, wq->rq.qid, wq->db,
388 wq->sq.bar2_va, wq->rq.bar2_va);
393 dma_free_coherent(&rdev->lldi.pdev->dev,
394 wq->rq.memsize, wq->rq.queue,
395 dma_unmap_addr(&wq->rq, mapping));
397 dealloc_sq(rdev, &wq->sq);
400 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
408 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
410 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
414 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
415 const struct ib_send_wr *wr, int max, u32 *plenp)
422 dstp = (u8 *)immdp->data;
423 for (i = 0; i < wr->num_sge; i++) {
424 if ((plen + wr->sg_list[i].length) > max)
426 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
427 plen += wr->sg_list[i].length;
428 rem = wr->sg_list[i].length;
430 if (dstp == (u8 *)&sq->queue[sq->size])
431 dstp = (u8 *)sq->queue;
432 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
435 len = (u8 *)&sq->queue[sq->size] - dstp;
436 memcpy(dstp, srcp, len);
442 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
444 memset(dstp, 0, len);
445 immdp->op = FW_RI_DATA_IMMD;
448 immdp->immdlen = cpu_to_be32(plen);
453 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
454 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
455 int num_sge, u32 *plenp)
462 if ((__be64 *)isglp == queue_end)
463 isglp = (struct fw_ri_isgl *)queue_start;
465 flitp = (__be64 *)isglp->sge;
467 for (i = 0; i < num_sge; i++) {
468 if ((plen + sg_list[i].length) < plen)
470 plen += sg_list[i].length;
471 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
473 if (++flitp == queue_end)
475 *flitp = cpu_to_be64(sg_list[i].addr);
476 if (++flitp == queue_end)
479 *flitp = (__force __be64)0;
480 isglp->op = FW_RI_DATA_ISGL;
482 isglp->nsge = cpu_to_be16(num_sge);
489 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
490 const struct ib_send_wr *wr, u8 *len16)
496 if (wr->num_sge > T4_MAX_SEND_SGE)
498 switch (wr->opcode) {
500 if (wr->send_flags & IB_SEND_SOLICITED)
501 wqe->send.sendop_pkd = cpu_to_be32(
502 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
504 wqe->send.sendop_pkd = cpu_to_be32(
505 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
506 wqe->send.stag_inv = 0;
508 case IB_WR_SEND_WITH_INV:
509 if (wr->send_flags & IB_SEND_SOLICITED)
510 wqe->send.sendop_pkd = cpu_to_be32(
511 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
513 wqe->send.sendop_pkd = cpu_to_be32(
514 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
526 if (wr->send_flags & IB_SEND_INLINE) {
527 ret = build_immd(sq, wqe->send.u.immd_src, wr,
528 T4_MAX_SEND_INLINE, &plen);
531 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
534 ret = build_isgl((__be64 *)sq->queue,
535 (__be64 *)&sq->queue[sq->size],
536 wqe->send.u.isgl_src,
537 wr->sg_list, wr->num_sge, &plen);
540 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
541 wr->num_sge * sizeof(struct fw_ri_sge);
544 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
545 wqe->send.u.immd_src[0].r1 = 0;
546 wqe->send.u.immd_src[0].r2 = 0;
547 wqe->send.u.immd_src[0].immdlen = 0;
548 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
551 *len16 = DIV_ROUND_UP(size, 16);
552 wqe->send.plen = cpu_to_be32(plen);
556 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
557 const struct ib_send_wr *wr, u8 *len16)
563 if (wr->num_sge > T4_MAX_SEND_SGE)
567 * iWARP protocol supports 64 bit immediate data but rdma api
568 * limits it to 32bit.
570 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
571 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
573 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
574 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
575 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
577 if (wr->send_flags & IB_SEND_INLINE) {
578 ret = build_immd(sq, wqe->write.u.immd_src, wr,
579 T4_MAX_WRITE_INLINE, &plen);
582 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
585 ret = build_isgl((__be64 *)sq->queue,
586 (__be64 *)&sq->queue[sq->size],
587 wqe->write.u.isgl_src,
588 wr->sg_list, wr->num_sge, &plen);
591 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
592 wr->num_sge * sizeof(struct fw_ri_sge);
595 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
596 wqe->write.u.immd_src[0].r1 = 0;
597 wqe->write.u.immd_src[0].r2 = 0;
598 wqe->write.u.immd_src[0].immdlen = 0;
599 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
602 *len16 = DIV_ROUND_UP(size, 16);
603 wqe->write.plen = cpu_to_be32(plen);
607 static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
608 struct ib_send_wr *wr)
610 memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
611 memset(immdp->r1, 0, 6);
612 immdp->op = FW_RI_DATA_IMMD;
616 static void build_rdma_write_cmpl(struct t4_sq *sq,
617 struct fw_ri_rdma_write_cmpl_wr *wcwr,
618 const struct ib_send_wr *wr, u8 *len16)
624 * This code assumes the struct fields preceding the write isgl
625 * fit in one 64B WR slot. This is because the WQE is built
626 * directly in the dma queue, and wrapping is only handled
627 * by the code buildling sgls. IE the "fixed part" of the wr
628 * structs must all fit in 64B. The WQE build code should probably be
629 * redesigned to avoid this restriction, but for now just add
630 * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
632 BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
634 wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
635 wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
636 if (wr->next->opcode == IB_WR_SEND)
639 wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
644 if (wr->next->send_flags & IB_SEND_INLINE)
645 build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
647 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
648 &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
651 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
652 wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
654 size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
655 wr->num_sge * sizeof(struct fw_ri_sge);
656 wcwr->plen = cpu_to_be32(plen);
657 *len16 = DIV_ROUND_UP(size, 16);
660 static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
665 if (wr->num_sge && wr->sg_list[0].length) {
666 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
667 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
669 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
670 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
671 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
672 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
674 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
676 wqe->read.stag_src = cpu_to_be32(2);
677 wqe->read.to_src_hi = 0;
678 wqe->read.to_src_lo = 0;
679 wqe->read.stag_sink = cpu_to_be32(2);
681 wqe->read.to_sink_hi = 0;
682 wqe->read.to_sink_lo = 0;
686 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
690 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
692 bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
694 bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
696 struct t4_swsqe *swsqe;
703 * The sw_sq entries still look like a WRITE and a SEND and consume
704 * 2 slots. The FW WR, however, will be a single uber-WR.
706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
708 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
711 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
712 swsqe->opcode = FW_RI_RDMA_WRITE;
713 swsqe->idx = qhp->wq.sq.pidx;
715 swsqe->signaled = write_signaled;
717 swsqe->wr_id = wr->wr_id;
720 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
721 swsqe->host_time = ktime_get();
724 write_wrid = qhp->wq.sq.pidx;
726 /* just bump the sw_sq */
728 if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
731 /* SEND_WITH_INV swsqe */
732 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
733 if (wr->next->opcode == IB_WR_SEND)
734 swsqe->opcode = FW_RI_SEND;
736 swsqe->opcode = FW_RI_SEND_WITH_INV;
737 swsqe->idx = qhp->wq.sq.pidx;
739 swsqe->signaled = send_signaled;
741 swsqe->wr_id = wr->next->wr_id;
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
745 swsqe->host_time = ktime_get();
748 wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
749 wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
751 init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
752 write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
753 t4_sq_produce(&qhp->wq, len16);
754 idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
756 t4_ring_sq_db(&qhp->wq, idx, wqe);
759 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
760 const struct ib_recv_wr *wr, u8 *len16)
764 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
765 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
766 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
769 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
770 wr->num_sge * sizeof(struct fw_ri_sge), 16);
774 static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
779 ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
780 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
783 *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
784 wr->num_sge * sizeof(struct fw_ri_sge), 16);
788 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
789 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
792 __be64 *p = (__be64 *)fr->pbl;
794 fr->r2 = cpu_to_be32(0);
795 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
797 fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
798 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
799 FW_RI_TPTE_STAGSTATE_V(1) |
800 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
801 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
802 fr->tpte.locread_to_qpid = cpu_to_be32(
803 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
804 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
805 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
806 fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
807 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
808 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
809 fr->tpte.len_hi = cpu_to_be32(0);
810 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
811 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
812 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
814 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
815 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
817 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
820 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
821 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
822 u8 *len16, bool dsgl_supported)
824 struct fw_ri_immd *imdp;
827 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
830 if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
833 wqe->fr.qpbinde_to_dcacpu = 0;
834 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
835 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
836 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
838 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
839 wqe->fr.stag = cpu_to_be32(wr->key);
840 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
841 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
844 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
845 struct fw_ri_dsgl *sglp;
847 for (i = 0; i < mhp->mpl_len; i++)
848 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
850 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
851 sglp->op = FW_RI_DATA_DSGL;
853 sglp->nsge = cpu_to_be16(1);
854 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
855 sglp->len0 = cpu_to_be32(pbllen);
857 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
859 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
860 imdp->op = FW_RI_DATA_IMMD;
863 imdp->immdlen = cpu_to_be32(pbllen);
864 p = (__be64 *)(imdp + 1);
866 for (i = 0; i < mhp->mpl_len; i++) {
867 *p = cpu_to_be64((u64)mhp->mpl[i]);
869 if (++p == (__be64 *)&sq->queue[sq->size])
870 p = (__be64 *)sq->queue;
875 if (++p == (__be64 *)&sq->queue[sq->size])
876 p = (__be64 *)sq->queue;
878 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
884 static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
887 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
889 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
893 static void free_qp_work(struct work_struct *work)
895 struct c4iw_ucontext *ucontext;
897 struct c4iw_dev *rhp;
899 qhp = container_of(work, struct c4iw_qp, free_work);
900 ucontext = qhp->ucontext;
903 pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
904 destroy_qp(&rhp->rdev, &qhp->wq,
905 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
907 c4iw_put_wr_wait(qhp->wr_waitp);
911 static void queue_qp_free(struct kref *kref)
915 qhp = container_of(kref, struct c4iw_qp, kref);
916 pr_debug("qhp %p\n", qhp);
917 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
920 void c4iw_qp_add_ref(struct ib_qp *qp)
922 pr_debug("ib_qp %p\n", qp);
923 kref_get(&to_c4iw_qp(qp)->kref);
926 void c4iw_qp_rem_ref(struct ib_qp *qp)
928 pr_debug("ib_qp %p\n", qp);
929 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
932 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
934 if (list_empty(entry))
935 list_add_tail(entry, head);
938 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
942 xa_lock_irqsave(&qhp->rhp->qps, flags);
943 spin_lock(&qhp->lock);
944 if (qhp->rhp->db_state == NORMAL)
945 t4_ring_sq_db(&qhp->wq, inc, NULL);
947 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
948 qhp->wq.sq.wq_pidx_inc += inc;
950 spin_unlock(&qhp->lock);
951 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
955 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
959 xa_lock_irqsave(&qhp->rhp->qps, flags);
960 spin_lock(&qhp->lock);
961 if (qhp->rhp->db_state == NORMAL)
962 t4_ring_rq_db(&qhp->wq, inc, NULL);
964 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
965 qhp->wq.rq.wq_pidx_inc += inc;
967 spin_unlock(&qhp->lock);
968 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
972 static int ib_to_fw_opcode(int ib_opcode)
977 case IB_WR_SEND_WITH_INV:
978 opcode = FW_RI_SEND_WITH_INV;
983 case IB_WR_RDMA_WRITE:
984 opcode = FW_RI_RDMA_WRITE;
986 case IB_WR_RDMA_WRITE_WITH_IMM:
987 opcode = FW_RI_WRITE_IMMEDIATE;
989 case IB_WR_RDMA_READ:
990 case IB_WR_RDMA_READ_WITH_INV:
991 opcode = FW_RI_READ_REQ;
994 opcode = FW_RI_FAST_REGISTER;
996 case IB_WR_LOCAL_INV:
997 opcode = FW_RI_LOCAL_INV;
1005 static int complete_sq_drain_wr(struct c4iw_qp *qhp,
1006 const struct ib_send_wr *wr)
1008 struct t4_cqe cqe = {};
1009 struct c4iw_cq *schp;
1014 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1017 opcode = ib_to_fw_opcode(wr->opcode);
1021 cqe.u.drain_cookie = wr->wr_id;
1022 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
1023 CQE_OPCODE_V(opcode) |
1027 CQE_QPID_V(qhp->wq.sq.qid));
1029 spin_lock_irqsave(&schp->lock, flag);
1030 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1031 cq->sw_queue[cq->sw_pidx] = cqe;
1032 t4_swcq_produce(cq);
1033 spin_unlock_irqrestore(&schp->lock, flag);
1035 if (t4_clear_cq_armed(&schp->cq)) {
1036 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1037 (*schp->ibcq.comp_handler)(&schp->ibcq,
1038 schp->ibcq.cq_context);
1039 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1044 static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
1045 const struct ib_send_wr *wr,
1046 const struct ib_send_wr **bad_wr)
1051 ret = complete_sq_drain_wr(qhp, wr);
1061 static void complete_rq_drain_wr(struct c4iw_qp *qhp,
1062 const struct ib_recv_wr *wr)
1064 struct t4_cqe cqe = {};
1065 struct c4iw_cq *rchp;
1069 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1072 cqe.u.drain_cookie = wr->wr_id;
1073 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
1074 CQE_OPCODE_V(FW_RI_SEND) |
1078 CQE_QPID_V(qhp->wq.sq.qid));
1080 spin_lock_irqsave(&rchp->lock, flag);
1081 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1082 cq->sw_queue[cq->sw_pidx] = cqe;
1083 t4_swcq_produce(cq);
1084 spin_unlock_irqrestore(&rchp->lock, flag);
1086 if (t4_clear_cq_armed(&rchp->cq)) {
1087 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1088 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1089 rchp->ibcq.cq_context);
1090 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1094 static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
1095 const struct ib_recv_wr *wr)
1098 complete_rq_drain_wr(qhp, wr);
1103 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1104 const struct ib_send_wr **bad_wr)
1108 enum fw_wr_opcodes fw_opcode = 0;
1109 enum fw_ri_wr_flags fw_flags;
1110 struct c4iw_qp *qhp;
1111 struct c4iw_dev *rhp;
1112 union t4_wr *wqe = NULL;
1114 struct t4_swsqe *swsqe;
1118 qhp = to_c4iw_qp(ibqp);
1120 spin_lock_irqsave(&qhp->lock, flag);
1123 * If the qp has been flushed, then just insert a special
1126 if (qhp->wq.flushed) {
1127 spin_unlock_irqrestore(&qhp->lock, flag);
1128 err = complete_sq_drain_wrs(qhp, wr, bad_wr);
1131 num_wrs = t4_sq_avail(&qhp->wq);
1133 spin_unlock_irqrestore(&qhp->lock, flag);
1139 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
1140 * the response for small NVMEe-oF READ requests. If the chain is
1141 * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
1142 * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
1143 * request, then build and post the write_cmpl WR. If any of the tests
1144 * below are not true, then we continue on with the tradtional WRITE
1147 if (qhp->rhp->rdev.lldi.write_cmpl_support &&
1148 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
1150 wr && wr->next && !wr->next->next &&
1151 wr->opcode == IB_WR_RDMA_WRITE &&
1152 wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
1153 (wr->next->opcode == IB_WR_SEND ||
1154 wr->next->opcode == IB_WR_SEND_WITH_INV) &&
1155 wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
1156 wr->next->num_sge == 1 && num_wrs >= 2) {
1157 post_write_cmpl(qhp, wr);
1158 spin_unlock_irqrestore(&qhp->lock, flag);
1168 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
1169 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
1172 if (wr->send_flags & IB_SEND_SOLICITED)
1173 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
1174 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
1175 fw_flags |= FW_RI_COMPLETION_FLAG;
1176 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
1177 switch (wr->opcode) {
1178 case IB_WR_SEND_WITH_INV:
1180 if (wr->send_flags & IB_SEND_FENCE)
1181 fw_flags |= FW_RI_READ_FENCE_FLAG;
1182 fw_opcode = FW_RI_SEND_WR;
1183 if (wr->opcode == IB_WR_SEND)
1184 swsqe->opcode = FW_RI_SEND;
1186 swsqe->opcode = FW_RI_SEND_WITH_INV;
1187 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
1189 case IB_WR_RDMA_WRITE_WITH_IMM:
1190 if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
1194 fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
1196 case IB_WR_RDMA_WRITE:
1197 fw_opcode = FW_RI_RDMA_WRITE_WR;
1198 swsqe->opcode = FW_RI_RDMA_WRITE;
1199 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
1201 case IB_WR_RDMA_READ:
1202 case IB_WR_RDMA_READ_WITH_INV:
1203 fw_opcode = FW_RI_RDMA_READ_WR;
1204 swsqe->opcode = FW_RI_READ_REQ;
1205 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
1206 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
1207 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
1211 err = build_rdma_read(wqe, wr, &len16);
1214 swsqe->read_len = wr->sg_list[0].length;
1215 if (!qhp->wq.sq.oldest_read)
1216 qhp->wq.sq.oldest_read = swsqe;
1218 case IB_WR_REG_MR: {
1219 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
1221 swsqe->opcode = FW_RI_FAST_REGISTER;
1222 if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
1223 !mhp->attr.state && mhp->mpl_len <= 2) {
1224 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
1225 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
1228 fw_opcode = FW_RI_FR_NSMR_WR;
1229 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
1231 rhp->rdev.lldi.ulptx_memwrite_dsgl);
1235 mhp->attr.state = 1;
1238 case IB_WR_LOCAL_INV:
1239 if (wr->send_flags & IB_SEND_FENCE)
1240 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
1241 fw_opcode = FW_RI_INV_LSTAG_WR;
1242 swsqe->opcode = FW_RI_LOCAL_INV;
1243 err = build_inv_stag(wqe, wr, &len16);
1244 c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
1247 pr_warn("%s post of type=%d TBD!\n", __func__,
1255 swsqe->idx = qhp->wq.sq.pidx;
1256 swsqe->complete = 0;
1257 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
1260 swsqe->wr_id = wr->wr_id;
1262 swsqe->sge_ts = cxgb4_read_sge_timestamp(
1263 rhp->rdev.lldi.ports[0]);
1264 swsqe->host_time = ktime_get();
1267 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
1269 pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
1270 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
1271 swsqe->opcode, swsqe->read_len);
1274 t4_sq_produce(&qhp->wq, len16);
1275 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1277 if (!rhp->rdev.status_page->db_off) {
1278 t4_ring_sq_db(&qhp->wq, idx, wqe);
1279 spin_unlock_irqrestore(&qhp->lock, flag);
1281 spin_unlock_irqrestore(&qhp->lock, flag);
1282 ring_kernel_sq_db(qhp, idx);
1287 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1288 const struct ib_recv_wr **bad_wr)
1291 struct c4iw_qp *qhp;
1292 union t4_recv_wr *wqe = NULL;
1298 qhp = to_c4iw_qp(ibqp);
1299 spin_lock_irqsave(&qhp->lock, flag);
1302 * If the qp has been flushed, then just insert a special
1305 if (qhp->wq.flushed) {
1306 spin_unlock_irqrestore(&qhp->lock, flag);
1307 complete_rq_drain_wrs(qhp, wr);
1310 num_wrs = t4_rq_avail(&qhp->wq);
1312 spin_unlock_irqrestore(&qhp->lock, flag);
1317 if (wr->num_sge > T4_MAX_RECV_SGE) {
1322 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1323 qhp->wq.rq.wq_pidx *
1326 err = build_rdma_recv(qhp, wqe, wr, &len16);
1334 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1336 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1337 cxgb4_read_sge_timestamp(
1338 qhp->rhp->rdev.lldi.ports[0]);
1339 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
1343 wqe->recv.opcode = FW_RI_RECV_WR;
1345 wqe->recv.wrid = qhp->wq.rq.pidx;
1346 wqe->recv.r2[0] = 0;
1347 wqe->recv.r2[1] = 0;
1348 wqe->recv.r2[2] = 0;
1349 wqe->recv.len16 = len16;
1350 pr_debug("cookie 0x%llx pidx %u\n",
1351 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1352 t4_rq_produce(&qhp->wq, len16);
1353 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1357 if (!qhp->rhp->rdev.status_page->db_off) {
1358 t4_ring_rq_db(&qhp->wq, idx, wqe);
1359 spin_unlock_irqrestore(&qhp->lock, flag);
1361 spin_unlock_irqrestore(&qhp->lock, flag);
1362 ring_kernel_rq_db(qhp, idx);
1367 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
1368 u64 wr_id, u8 len16)
1370 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
1372 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
1373 __func__, srq->cidx, srq->pidx, srq->wq_pidx,
1374 srq->in_use, srq->ooo_count,
1375 (unsigned long long)wr_id, srq->pending_cidx,
1376 srq->pending_pidx, srq->pending_in_use);
1379 memcpy(&pwr->wqe, wqe, len16 * 16);
1380 t4_srq_produce_pending_wr(srq);
1383 int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1384 const struct ib_recv_wr **bad_wr)
1386 union t4_recv_wr *wqe, lwqe;
1387 struct c4iw_srq *srq;
1394 srq = to_c4iw_srq(ibsrq);
1395 spin_lock_irqsave(&srq->lock, flag);
1396 num_wrs = t4_srq_avail(&srq->wq);
1398 spin_unlock_irqrestore(&srq->lock, flag);
1402 if (wr->num_sge > T4_MAX_RECV_SGE) {
1409 err = build_srq_recv(wqe, wr, &len16);
1417 wqe->recv.opcode = FW_RI_RECV_WR;
1419 wqe->recv.wrid = srq->wq.pidx;
1420 wqe->recv.r2[0] = 0;
1421 wqe->recv.r2[1] = 0;
1422 wqe->recv.r2[2] = 0;
1423 wqe->recv.len16 = len16;
1425 if (srq->wq.ooo_count ||
1426 srq->wq.pending_in_use ||
1427 srq->wq.sw_rq[srq->wq.pidx].valid) {
1428 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
1430 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
1431 srq->wq.sw_rq[srq->wq.pidx].valid = 1;
1432 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
1433 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
1434 __func__, srq->wq.cidx,
1435 srq->wq.pidx, srq->wq.wq_pidx,
1437 (unsigned long long)wr->wr_id);
1438 t4_srq_produce(&srq->wq, len16);
1439 idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
1445 t4_ring_srq_db(&srq->wq, idx, len16, wqe);
1446 spin_unlock_irqrestore(&srq->lock, flag);
1450 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1460 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1465 status = CQE_STATUS(err_cqe);
1466 opcode = CQE_OPCODE(err_cqe);
1467 rqtype = RQ_TYPE(err_cqe);
1468 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1469 (opcode == FW_RI_SEND_WITH_SE_INV);
1470 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1471 (rqtype && (opcode == FW_RI_READ_RESP));
1476 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1477 *ecode = RDMAP_CANT_INV_STAG;
1479 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1480 *ecode = RDMAP_INV_STAG;
1484 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1485 if ((opcode == FW_RI_SEND_WITH_INV) ||
1486 (opcode == FW_RI_SEND_WITH_SE_INV))
1487 *ecode = RDMAP_CANT_INV_STAG;
1489 *ecode = RDMAP_STAG_NOT_ASSOC;
1492 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1493 *ecode = RDMAP_STAG_NOT_ASSOC;
1496 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1497 *ecode = RDMAP_ACC_VIOL;
1500 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1501 *ecode = RDMAP_TO_WRAP;
1505 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1506 *ecode = DDPT_BASE_BOUNDS;
1508 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1509 *ecode = RDMAP_BASE_BOUNDS;
1512 case T4_ERR_INVALIDATE_SHARED_MR:
1513 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1514 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1515 *ecode = RDMAP_CANT_INV_STAG;
1518 case T4_ERR_ECC_PSTAG:
1519 case T4_ERR_INTERNAL_ERR:
1520 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1523 case T4_ERR_OUT_OF_RQE:
1524 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1525 *ecode = DDPU_INV_MSN_NOBUF;
1527 case T4_ERR_PBL_ADDR_BOUND:
1528 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1529 *ecode = DDPT_BASE_BOUNDS;
1532 *layer_type = LAYER_MPA|DDP_LLP;
1533 *ecode = MPA_CRC_ERR;
1536 *layer_type = LAYER_MPA|DDP_LLP;
1537 *ecode = MPA_MARKER_ERR;
1539 case T4_ERR_PDU_LEN_ERR:
1540 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1541 *ecode = DDPU_MSG_TOOBIG;
1543 case T4_ERR_DDP_VERSION:
1545 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1546 *ecode = DDPT_INV_VERS;
1548 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1549 *ecode = DDPU_INV_VERS;
1552 case T4_ERR_RDMA_VERSION:
1553 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1554 *ecode = RDMAP_INV_VERS;
1557 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1558 *ecode = RDMAP_INV_OPCODE;
1560 case T4_ERR_DDP_QUEUE_NUM:
1561 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1562 *ecode = DDPU_INV_QN;
1565 case T4_ERR_MSN_GAP:
1566 case T4_ERR_MSN_RANGE:
1567 case T4_ERR_IRD_OVERFLOW:
1568 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1569 *ecode = DDPU_INV_MSN_RANGE;
1572 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1576 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1577 *ecode = DDPU_INV_MO;
1580 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1586 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1589 struct fw_ri_wr *wqe;
1590 struct sk_buff *skb;
1591 struct terminate_message *term;
1593 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
1596 skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1600 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1602 wqe = __skb_put_zero(skb, sizeof(*wqe));
1603 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1604 wqe->flowid_len16 = cpu_to_be32(
1605 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1606 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1608 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1609 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1610 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1611 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1612 term->layer_etype = qhp->attr.layer_etype;
1613 term->ecode = qhp->attr.ecode;
1615 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1616 c4iw_ofld_send(&qhp->rhp->rdev, skb);
1620 * Assumes qhp lock is held.
1622 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1623 struct c4iw_cq *schp)
1626 int rq_flushed = 0, sq_flushed;
1629 pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
1631 /* locking hierarchy: cqs lock first, then qp lock. */
1632 spin_lock_irqsave(&rchp->lock, flag);
1634 spin_lock(&schp->lock);
1635 spin_lock(&qhp->lock);
1637 if (qhp->wq.flushed) {
1638 spin_unlock(&qhp->lock);
1640 spin_unlock(&schp->lock);
1641 spin_unlock_irqrestore(&rchp->lock, flag);
1644 qhp->wq.flushed = 1;
1645 t4_set_wq_in_error(&qhp->wq, 0);
1647 c4iw_flush_hw_cq(rchp, qhp);
1649 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1650 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1654 c4iw_flush_hw_cq(schp, qhp);
1655 sq_flushed = c4iw_flush_sq(qhp);
1657 spin_unlock(&qhp->lock);
1659 spin_unlock(&schp->lock);
1660 spin_unlock_irqrestore(&rchp->lock, flag);
1663 if ((rq_flushed || sq_flushed) &&
1664 t4_clear_cq_armed(&rchp->cq)) {
1665 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1666 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1667 rchp->ibcq.cq_context);
1668 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1671 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1672 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1673 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1674 rchp->ibcq.cq_context);
1675 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1677 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1678 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1679 (*schp->ibcq.comp_handler)(&schp->ibcq,
1680 schp->ibcq.cq_context);
1681 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1686 static void flush_qp(struct c4iw_qp *qhp)
1688 struct c4iw_cq *rchp, *schp;
1691 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1692 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1694 if (qhp->ibqp.uobject) {
1696 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1697 if (qhp->wq.flushed)
1700 qhp->wq.flushed = 1;
1701 t4_set_wq_in_error(&qhp->wq, 0);
1702 t4_set_cq_in_error(&rchp->cq);
1703 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1704 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1705 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1707 t4_set_cq_in_error(&schp->cq);
1708 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1709 (*schp->ibcq.comp_handler)(&schp->ibcq,
1710 schp->ibcq.cq_context);
1711 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1715 __flush_qp(qhp, rchp, schp);
1718 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1721 struct fw_ri_wr *wqe;
1723 struct sk_buff *skb;
1725 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
1727 skb = skb_dequeue(&ep->com.ep_skb_list);
1731 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1733 wqe = __skb_put_zero(skb, sizeof(*wqe));
1734 wqe->op_compl = cpu_to_be32(
1735 FW_WR_OP_V(FW_RI_INIT_WR) |
1737 wqe->flowid_len16 = cpu_to_be32(
1738 FW_WR_FLOWID_V(ep->hwtid) |
1739 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1740 wqe->cookie = (uintptr_t)ep->com.wr_waitp;
1742 wqe->u.fini.type = FW_RI_TYPE_FINI;
1744 ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
1745 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1747 pr_debug("ret %d\n", ret);
1751 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1753 pr_debug("p2p_type = %d\n", p2p_type);
1754 memset(&init->u, 0, sizeof init->u);
1756 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1757 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1758 init->u.write.stag_sink = cpu_to_be32(1);
1759 init->u.write.to_sink = cpu_to_be64(1);
1760 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1761 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1762 sizeof(struct fw_ri_immd),
1765 case FW_RI_INIT_P2PTYPE_READ_REQ:
1766 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1767 init->u.read.stag_src = cpu_to_be32(1);
1768 init->u.read.to_src_lo = cpu_to_be32(1);
1769 init->u.read.stag_sink = cpu_to_be32(1);
1770 init->u.read.to_sink_lo = cpu_to_be32(1);
1771 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1776 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1778 struct fw_ri_wr *wqe;
1780 struct sk_buff *skb;
1782 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
1783 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1785 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1790 ret = alloc_ird(rhp, qhp->attr.max_ird);
1792 qhp->attr.max_ird = 0;
1796 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1798 wqe = __skb_put_zero(skb, sizeof(*wqe));
1799 wqe->op_compl = cpu_to_be32(
1800 FW_WR_OP_V(FW_RI_INIT_WR) |
1802 wqe->flowid_len16 = cpu_to_be32(
1803 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1804 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1806 wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
1808 wqe->u.init.type = FW_RI_TYPE_INIT;
1809 wqe->u.init.mpareqbit_p2ptype =
1810 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1811 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1812 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1813 if (qhp->attr.mpa_attr.recv_marker_enabled)
1814 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1815 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1816 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1817 if (qhp->attr.mpa_attr.crc_enabled)
1818 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1820 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1821 FW_RI_QP_RDMA_WRITE_ENABLE |
1822 FW_RI_QP_BIND_ENABLE;
1823 if (!qhp->ibqp.uobject)
1824 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1825 FW_RI_QP_STAG0_ENABLE;
1826 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1827 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1828 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1829 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1831 wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
1834 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1835 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1836 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1837 rhp->rdev.lldi.vr->rq.start);
1839 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1840 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1841 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1842 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1843 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1844 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1845 if (qhp->attr.mpa_attr.initiator)
1846 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1848 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1849 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1853 free_ird(rhp, qhp->attr.max_ird);
1855 pr_debug("ret %d\n", ret);
1859 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1860 enum c4iw_qp_attr_mask mask,
1861 struct c4iw_qp_attributes *attrs,
1865 struct c4iw_qp_attributes newattr = qhp->attr;
1870 struct c4iw_ep *ep = NULL;
1872 pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1873 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1874 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1876 mutex_lock(&qhp->mutex);
1878 /* Process attr changes if in IDLE */
1879 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1880 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1884 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1885 newattr.enable_rdma_read = attrs->enable_rdma_read;
1886 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1887 newattr.enable_rdma_write = attrs->enable_rdma_write;
1888 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1889 newattr.enable_bind = attrs->enable_bind;
1890 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1891 if (attrs->max_ord > c4iw_max_read_depth) {
1895 newattr.max_ord = attrs->max_ord;
1897 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1898 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1902 newattr.max_ird = attrs->max_ird;
1904 qhp->attr = newattr;
1907 if (mask & C4IW_QP_ATTR_SQ_DB) {
1908 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1911 if (mask & C4IW_QP_ATTR_RQ_DB) {
1912 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1916 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1918 if (qhp->attr.state == attrs->next_state)
1921 switch (qhp->attr.state) {
1922 case C4IW_QP_STATE_IDLE:
1923 switch (attrs->next_state) {
1924 case C4IW_QP_STATE_RTS:
1925 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1929 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1933 qhp->attr.mpa_attr = attrs->mpa_attr;
1934 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1935 qhp->ep = qhp->attr.llp_stream_handle;
1936 set_state(qhp, C4IW_QP_STATE_RTS);
1939 * Ref the endpoint here and deref when we
1940 * disassociate the endpoint from the QP. This
1941 * happens in CLOSING->IDLE transition or *->ERROR
1944 c4iw_get_ep(&qhp->ep->com);
1945 ret = rdma_init(rhp, qhp);
1949 case C4IW_QP_STATE_ERROR:
1950 set_state(qhp, C4IW_QP_STATE_ERROR);
1958 case C4IW_QP_STATE_RTS:
1959 switch (attrs->next_state) {
1960 case C4IW_QP_STATE_CLOSING:
1961 t4_set_wq_in_error(&qhp->wq, 0);
1962 set_state(qhp, C4IW_QP_STATE_CLOSING);
1967 c4iw_get_ep(&qhp->ep->com);
1969 ret = rdma_fini(rhp, qhp, ep);
1973 case C4IW_QP_STATE_TERMINATE:
1974 t4_set_wq_in_error(&qhp->wq, 0);
1975 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1976 qhp->attr.layer_etype = attrs->layer_etype;
1977 qhp->attr.ecode = attrs->ecode;
1979 c4iw_get_ep(&ep->com);
1984 terminate = qhp->attr.send_term;
1985 ret = rdma_fini(rhp, qhp, ep);
1990 case C4IW_QP_STATE_ERROR:
1991 t4_set_wq_in_error(&qhp->wq, 0);
1992 set_state(qhp, C4IW_QP_STATE_ERROR);
1997 c4iw_get_ep(&qhp->ep->com);
2006 case C4IW_QP_STATE_CLOSING:
2009 * Allow kernel users to move to ERROR for qp draining.
2011 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
2012 C4IW_QP_STATE_ERROR)) {
2016 switch (attrs->next_state) {
2017 case C4IW_QP_STATE_IDLE:
2019 set_state(qhp, C4IW_QP_STATE_IDLE);
2020 qhp->attr.llp_stream_handle = NULL;
2021 c4iw_put_ep(&qhp->ep->com);
2023 wake_up(&qhp->wait);
2025 case C4IW_QP_STATE_ERROR:
2032 case C4IW_QP_STATE_ERROR:
2033 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
2037 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
2041 set_state(qhp, C4IW_QP_STATE_IDLE);
2043 case C4IW_QP_STATE_TERMINATE:
2051 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
2058 pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
2061 /* disassociate the LLP connection */
2062 qhp->attr.llp_stream_handle = NULL;
2066 set_state(qhp, C4IW_QP_STATE_ERROR);
2070 wake_up(&qhp->wait);
2072 mutex_unlock(&qhp->mutex);
2075 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
2078 * If disconnect is 1, then we need to initiate a disconnect
2079 * on the EP. This can be a normal close (RTS->CLOSING) or
2080 * an abnormal close (RTS/CLOSING->ERROR).
2083 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
2085 c4iw_put_ep(&ep->com);
2089 * If free is 1, then we've disassociated the EP from the QP
2090 * and we need to dereference the EP.
2093 c4iw_put_ep(&ep->com);
2094 pr_debug("exit state %d\n", qhp->attr.state);
2098 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
2100 struct c4iw_dev *rhp;
2101 struct c4iw_qp *qhp;
2102 struct c4iw_qp_attributes attrs;
2104 qhp = to_c4iw_qp(ib_qp);
2107 attrs.next_state = C4IW_QP_STATE_ERROR;
2108 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
2109 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2111 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
2112 wait_event(qhp->wait, !qhp->ep);
2114 xa_lock_irq(&rhp->qps);
2115 __xa_erase(&rhp->qps, qhp->wq.sq.qid);
2116 if (!list_empty(&qhp->db_fc_entry))
2117 list_del_init(&qhp->db_fc_entry);
2118 xa_unlock_irq(&rhp->qps);
2119 free_ird(rhp, qhp->attr.max_ird);
2121 c4iw_qp_rem_ref(ib_qp);
2123 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
2127 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
2128 struct ib_udata *udata)
2130 struct c4iw_dev *rhp;
2131 struct c4iw_qp *qhp;
2132 struct c4iw_pd *php;
2133 struct c4iw_cq *schp;
2134 struct c4iw_cq *rchp;
2135 struct c4iw_create_qp_resp uresp;
2136 unsigned int sqsize, rqsize = 0;
2137 struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
2138 udata, struct c4iw_ucontext, ibucontext);
2140 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
2141 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
2143 pr_debug("ib_pd %p\n", pd);
2145 if (attrs->qp_type != IB_QPT_RC)
2146 return ERR_PTR(-EINVAL);
2148 php = to_c4iw_pd(pd);
2150 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
2151 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
2153 return ERR_PTR(-EINVAL);
2155 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
2156 return ERR_PTR(-EINVAL);
2159 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2160 return ERR_PTR(-E2BIG);
2161 rqsize = attrs->cap.max_recv_wr + 1;
2166 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
2167 return ERR_PTR(-E2BIG);
2168 sqsize = attrs->cap.max_send_wr + 1;
2172 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
2174 return ERR_PTR(-ENOMEM);
2176 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2177 if (!qhp->wr_waitp) {
2182 qhp->wq.sq.size = sqsize;
2183 qhp->wq.sq.memsize =
2184 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2185 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
2186 qhp->wq.sq.flush_cidx = -1;
2188 qhp->wq.rq.size = rqsize;
2189 qhp->wq.rq.memsize =
2190 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2191 sizeof(*qhp->wq.rq.queue);
2195 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
2197 qhp->wq.rq.memsize =
2198 roundup(qhp->wq.rq.memsize, PAGE_SIZE);
2201 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
2202 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2203 qhp->wr_waitp, !attrs->srq);
2205 goto err_free_wr_wait;
2207 attrs->cap.max_recv_wr = rqsize - 1;
2208 attrs->cap.max_send_wr = sqsize - 1;
2209 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
2212 qhp->attr.pd = php->pdid;
2213 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
2214 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
2215 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
2216 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
2217 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
2219 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
2220 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
2222 qhp->attr.state = C4IW_QP_STATE_IDLE;
2223 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
2224 qhp->attr.enable_rdma_read = 1;
2225 qhp->attr.enable_rdma_write = 1;
2226 qhp->attr.enable_bind = 1;
2227 qhp->attr.max_ord = 0;
2228 qhp->attr.max_ird = 0;
2229 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
2230 spin_lock_init(&qhp->lock);
2231 mutex_init(&qhp->mutex);
2232 init_waitqueue_head(&qhp->wait);
2233 kref_init(&qhp->kref);
2234 INIT_WORK(&qhp->free_work, free_qp_work);
2236 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
2238 goto err_destroy_qp;
2240 if (udata && ucontext) {
2241 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
2244 goto err_remove_handle;
2247 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
2250 goto err_free_sq_key;
2253 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
2254 if (!sq_db_key_mm) {
2256 goto err_free_rq_key;
2260 kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
2261 if (!rq_db_key_mm) {
2263 goto err_free_sq_db_key;
2266 memset(&uresp, 0, sizeof(uresp));
2267 if (t4_sq_onchip(&qhp->wq.sq)) {
2268 ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
2270 if (!ma_sync_key_mm) {
2272 goto err_free_rq_db_key;
2274 uresp.flags = C4IW_QPF_ONCHIP;
2276 if (rhp->rdev.lldi.write_w_imm_support)
2277 uresp.flags |= C4IW_QPF_WRITE_W_IMM;
2278 uresp.qid_mask = rhp->rdev.qpmask;
2279 uresp.sqid = qhp->wq.sq.qid;
2280 uresp.sq_size = qhp->wq.sq.size;
2281 uresp.sq_memsize = qhp->wq.sq.memsize;
2283 uresp.rqid = qhp->wq.rq.qid;
2284 uresp.rq_size = qhp->wq.rq.size;
2285 uresp.rq_memsize = qhp->wq.rq.memsize;
2287 spin_lock(&ucontext->mmap_lock);
2288 if (ma_sync_key_mm) {
2289 uresp.ma_sync_key = ucontext->key;
2290 ucontext->key += PAGE_SIZE;
2292 uresp.sq_key = ucontext->key;
2293 ucontext->key += PAGE_SIZE;
2295 uresp.rq_key = ucontext->key;
2296 ucontext->key += PAGE_SIZE;
2298 uresp.sq_db_gts_key = ucontext->key;
2299 ucontext->key += PAGE_SIZE;
2301 uresp.rq_db_gts_key = ucontext->key;
2302 ucontext->key += PAGE_SIZE;
2304 spin_unlock(&ucontext->mmap_lock);
2305 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
2307 goto err_free_ma_sync_key;
2308 sq_key_mm->key = uresp.sq_key;
2309 sq_key_mm->addr = qhp->wq.sq.phys_addr;
2310 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2311 insert_mmap(ucontext, sq_key_mm);
2313 rq_key_mm->key = uresp.rq_key;
2314 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
2315 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2316 insert_mmap(ucontext, rq_key_mm);
2318 sq_db_key_mm->key = uresp.sq_db_gts_key;
2319 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2320 sq_db_key_mm->len = PAGE_SIZE;
2321 insert_mmap(ucontext, sq_db_key_mm);
2323 rq_db_key_mm->key = uresp.rq_db_gts_key;
2324 rq_db_key_mm->addr =
2325 (u64)(unsigned long)qhp->wq.rq.bar2_pa;
2326 rq_db_key_mm->len = PAGE_SIZE;
2327 insert_mmap(ucontext, rq_db_key_mm);
2329 if (ma_sync_key_mm) {
2330 ma_sync_key_mm->key = uresp.ma_sync_key;
2331 ma_sync_key_mm->addr =
2332 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
2333 PCIE_MA_SYNC_A) & PAGE_MASK;
2334 ma_sync_key_mm->len = PAGE_SIZE;
2335 insert_mmap(ucontext, ma_sync_key_mm);
2338 qhp->ucontext = ucontext;
2342 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
2345 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
2347 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
2350 qhp->ibqp.qp_num = qhp->wq.sq.qid;
2352 qhp->srq = to_c4iw_srq(attrs->srq);
2353 INIT_LIST_HEAD(&qhp->db_fc_entry);
2354 pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
2355 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
2356 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
2357 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
2359 err_free_ma_sync_key:
2360 kfree(ma_sync_key_mm);
2363 kfree(rq_db_key_mm);
2365 kfree(sq_db_key_mm);
2372 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
2374 destroy_qp(&rhp->rdev, &qhp->wq,
2375 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
2377 c4iw_put_wr_wait(qhp->wr_waitp);
2380 return ERR_PTR(ret);
2383 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2384 int attr_mask, struct ib_udata *udata)
2386 struct c4iw_dev *rhp;
2387 struct c4iw_qp *qhp;
2388 enum c4iw_qp_attr_mask mask = 0;
2389 struct c4iw_qp_attributes attrs;
2391 pr_debug("ib_qp %p\n", ibqp);
2393 /* iwarp does not support the RTR state */
2394 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
2395 attr_mask &= ~IB_QP_STATE;
2397 /* Make sure we still have something left to do */
2401 memset(&attrs, 0, sizeof attrs);
2402 qhp = to_c4iw_qp(ibqp);
2405 attrs.next_state = c4iw_convert_state(attr->qp_state);
2406 attrs.enable_rdma_read = (attr->qp_access_flags &
2407 IB_ACCESS_REMOTE_READ) ? 1 : 0;
2408 attrs.enable_rdma_write = (attr->qp_access_flags &
2409 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2410 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2413 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2414 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2415 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2416 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2417 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2420 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2421 * ringing the queue db when we're in DB_FULL mode.
2422 * Only allow this on T4 devices.
2424 attrs.sq_db_inc = attr->sq_psn;
2425 attrs.rq_db_inc = attr->rq_psn;
2426 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2427 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
2428 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
2429 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2432 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2435 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2437 pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
2438 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2441 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
2443 struct ib_event event = {};
2445 event.device = &srq->rhp->ibdev;
2446 event.element.srq = &srq->ibsrq;
2447 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2448 ib_dispatch_event(&event);
2451 int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
2452 enum ib_srq_attr_mask srq_attr_mask,
2453 struct ib_udata *udata)
2455 struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2459 * XXX 0 mask == a SW interrupt for srq_limit reached...
2461 if (udata && !srq_attr_mask) {
2462 c4iw_dispatch_srq_limit_reached_event(srq);
2466 /* no support for this yet */
2467 if (srq_attr_mask & IB_SRQ_MAX_WR) {
2472 if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
2474 srq->srq_limit = attr->srq_limit;
2480 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2481 int attr_mask, struct ib_qp_init_attr *init_attr)
2483 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2485 memset(attr, 0, sizeof *attr);
2486 memset(init_attr, 0, sizeof *init_attr);
2487 attr->qp_state = to_ib_qp_state(qhp->attr.state);
2488 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2489 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2490 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2491 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
2492 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2493 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
2497 static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2498 struct c4iw_wr_wait *wr_waitp)
2500 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2501 struct sk_buff *skb = srq->destroy_skb;
2502 struct t4_srq *wq = &srq->wq;
2503 struct fw_ri_res_wr *res_wr;
2504 struct fw_ri_res *res;
2507 wr_len = sizeof(*res_wr) + sizeof(*res);
2508 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2510 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2511 memset(res_wr, 0, wr_len);
2512 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2513 FW_RI_RES_WR_NRES_V(1) |
2515 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2516 res_wr->cookie = (uintptr_t)wr_waitp;
2518 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2519 res->u.srq.op = FW_RI_RES_OP_RESET;
2520 res->u.srq.srqid = cpu_to_be32(srq->idx);
2521 res->u.srq.eqid = cpu_to_be32(wq->qid);
2523 c4iw_init_wr_wait(wr_waitp);
2524 c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
2526 dma_free_coherent(&rdev->lldi.pdev->dev,
2527 wq->memsize, wq->queue,
2528 dma_unmap_addr(wq, mapping));
2529 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2531 c4iw_put_qpid(rdev, wq->qid, uctx);
2534 static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2535 struct c4iw_wr_wait *wr_waitp)
2537 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2538 int user = (uctx != &rdev->uctx);
2539 struct t4_srq *wq = &srq->wq;
2540 struct fw_ri_res_wr *res_wr;
2541 struct fw_ri_res *res;
2542 struct sk_buff *skb;
2547 wq->qid = c4iw_get_qpid(rdev, uctx);
2552 wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
2556 wq->pending_wrs = kcalloc(srq->wq.size,
2557 sizeof(*srq->wq.pending_wrs),
2559 if (!wq->pending_wrs)
2560 goto err_free_sw_rq;
2563 wq->rqt_size = wq->size;
2564 wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
2565 if (!wq->rqt_hwaddr)
2566 goto err_free_pending_wrs;
2567 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2570 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2571 &wq->dma_addr, GFP_KERNEL);
2573 goto err_free_rqtpool;
2575 dma_unmap_addr_set(wq, mapping, wq->dma_addr);
2577 wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
2579 user ? &wq->bar2_pa : NULL);
2582 * User mode must have bar2 access.
2585 if (user && !wq->bar2_va) {
2586 pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
2587 pci_name(rdev->lldi.pdev), wq->qid);
2589 goto err_free_queue;
2592 /* build fw_ri_res_wr */
2593 wr_len = sizeof(*res_wr) + sizeof(*res);
2595 skb = alloc_skb(wr_len, GFP_KERNEL);
2597 goto err_free_queue;
2598 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2600 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2601 memset(res_wr, 0, wr_len);
2602 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2603 FW_RI_RES_WR_NRES_V(1) |
2605 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2606 res_wr->cookie = (uintptr_t)wr_waitp;
2608 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2609 res->u.srq.op = FW_RI_RES_OP_WRITE;
2612 * eqsize is the number of 64B entries plus the status page size.
2614 eqsize = wq->size * T4_RQ_NUM_SLOTS +
2615 rdev->hw_queue.t4_eq_status_entries;
2616 res->u.srq.eqid = cpu_to_be32(wq->qid);
2617 res->u.srq.fetchszm_to_iqid =
2618 /* no host cidx updates */
2619 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
2620 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
2621 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
2622 FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
2623 res->u.srq.dcaen_to_eqsize =
2624 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
2625 FW_RI_RES_WR_DCACPU_V(0) |
2626 FW_RI_RES_WR_FBMIN_V(2) |
2627 FW_RI_RES_WR_FBMAX_V(3) |
2628 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
2629 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
2630 FW_RI_RES_WR_EQSIZE_V(eqsize));
2631 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
2632 res->u.srq.srqid = cpu_to_be32(srq->idx);
2633 res->u.srq.pdid = cpu_to_be32(srq->pdid);
2634 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
2635 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
2636 rdev->lldi.vr->rq.start);
2638 c4iw_init_wr_wait(wr_waitp);
2640 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
2642 goto err_free_queue;
2644 pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
2645 " bar2_addr %p rqt addr 0x%x size %d\n",
2646 __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
2647 (u64)virt_to_phys(wq->queue), wq->bar2_va,
2648 wq->rqt_hwaddr, wq->rqt_size);
2652 dma_free_coherent(&rdev->lldi.pdev->dev,
2653 wq->memsize, wq->queue,
2654 dma_unmap_addr(wq, mapping));
2656 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2657 err_free_pending_wrs:
2659 kfree(wq->pending_wrs);
2664 c4iw_put_qpid(rdev, wq->qid, uctx);
2669 void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
2674 dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
2677 if (dst >= (u64 *)&srq->queue[srq->size])
2678 dst = (u64 *)srq->queue;
2680 if (dst >= (u64 *)&srq->queue[srq->size])
2681 dst = (u64 *)srq->queue;
2686 int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
2687 struct ib_udata *udata)
2689 struct ib_pd *pd = ib_srq->pd;
2690 struct c4iw_dev *rhp;
2691 struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2692 struct c4iw_pd *php;
2693 struct c4iw_create_srq_resp uresp;
2694 struct c4iw_ucontext *ucontext;
2695 struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
2700 pr_debug("%s ib_pd %p\n", __func__, pd);
2702 php = to_c4iw_pd(pd);
2705 if (!rhp->rdev.lldi.vr->srq.size)
2707 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2709 if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
2713 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
2715 rqsize = attrs->attr.max_wr + 1;
2716 rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
2718 ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2721 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2725 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
2728 goto err_free_wr_wait;
2731 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
2732 srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
2733 if (!srq->destroy_skb) {
2735 goto err_free_srq_idx;
2739 srq->pdid = php->pdid;
2741 srq->wq.size = rqsize;
2743 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2744 sizeof(*srq->wq.queue);
2746 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
2748 ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
2749 &rhp->rdev.uctx, srq->wr_waitp);
2752 attrs->attr.max_wr = rqsize - 1;
2754 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
2755 srq->flags = T4_SRQ_LIMIT_SUPPORT;
2757 ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL);
2759 goto err_free_queue;
2762 srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
2765 goto err_remove_handle;
2767 srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
2768 if (!srq_db_key_mm) {
2770 goto err_free_srq_key_mm;
2772 memset(&uresp, 0, sizeof(uresp));
2773 uresp.flags = srq->flags;
2774 uresp.qid_mask = rhp->rdev.qpmask;
2775 uresp.srqid = srq->wq.qid;
2776 uresp.srq_size = srq->wq.size;
2777 uresp.srq_memsize = srq->wq.memsize;
2778 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
2779 spin_lock(&ucontext->mmap_lock);
2780 uresp.srq_key = ucontext->key;
2781 ucontext->key += PAGE_SIZE;
2782 uresp.srq_db_gts_key = ucontext->key;
2783 ucontext->key += PAGE_SIZE;
2784 spin_unlock(&ucontext->mmap_lock);
2785 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2787 goto err_free_srq_db_key_mm;
2788 srq_key_mm->key = uresp.srq_key;
2789 srq_key_mm->addr = virt_to_phys(srq->wq.queue);
2790 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
2791 insert_mmap(ucontext, srq_key_mm);
2792 srq_db_key_mm->key = uresp.srq_db_gts_key;
2793 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
2794 srq_db_key_mm->len = PAGE_SIZE;
2795 insert_mmap(ucontext, srq_db_key_mm);
2798 pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
2799 __func__, srq->wq.qid, srq->idx, srq->wq.size,
2800 (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
2802 spin_lock_init(&srq->lock);
2805 err_free_srq_db_key_mm:
2806 kfree(srq_db_key_mm);
2807 err_free_srq_key_mm:
2810 xa_erase_irq(&rhp->qps, srq->wq.qid);
2812 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2815 kfree_skb(srq->destroy_skb);
2817 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2819 c4iw_put_wr_wait(srq->wr_waitp);
2823 void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
2825 struct c4iw_dev *rhp;
2826 struct c4iw_srq *srq;
2827 struct c4iw_ucontext *ucontext;
2829 srq = to_c4iw_srq(ibsrq);
2832 pr_debug("%s id %d\n", __func__, srq->wq.qid);
2834 xa_erase_irq(&rhp->qps, srq->wq.qid);
2835 ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2837 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2839 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2840 c4iw_put_wr_wait(srq->wr_waitp);