2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
37 static int db_delay_usecs = 1;
38 module_param(db_delay_usecs, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41 static int ocqp_support = 1;
42 module_param(ocqp_support, int, 0644);
43 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45 int db_fc_threshold = 1000;
46 module_param(db_fc_threshold, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
51 int db_coalescing_threshold;
52 module_param(db_coalescing_threshold, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
57 static int max_fr_immd = T4_MAX_FR_IMMD;
58 module_param(max_fr_immd, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
61 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
70 spin_unlock_irq(&dev->lock);
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
79 static void free_ird(struct c4iw_dev *dev, int ird)
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
89 spin_lock_irqsave(&qhp->lock, flag);
90 qhp->attr.state = state;
91 spin_unlock_irqrestore(&qhp->lock, flag);
94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102 pci_unmap_addr(sq, mapping));
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107 if (t4_sq_onchip(sq))
108 dealloc_oc_sq(rdev, sq);
110 dealloc_host_sq(rdev, sq);
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
117 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
120 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121 rdev->lldi.vr->ocq.start;
122 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123 rdev->lldi.vr->ocq.start);
124 sq->flags |= T4_SQ_ONCHIP;
128 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131 &(sq->dma_addr), GFP_KERNEL);
134 sq->phys_addr = virt_to_phys(sq->queue);
135 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
139 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
143 ret = alloc_oc_sq(rdev, sq);
145 ret = alloc_host_sq(rdev, sq);
149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150 struct c4iw_dev_ucontext *uctx, int has_rq)
153 * uP clears EQ contexts when the connection exits rdma mode,
154 * so no need to post a RESET WR for these EQs.
156 dealloc_sq(rdev, &wq->sq);
158 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
161 dma_free_coherent(&rdev->lldi.pdev->dev,
162 wq->rq.memsize, wq->rq.queue,
163 dma_unmap_addr(&wq->rq, mapping));
164 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
166 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
172 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
173 * then this is a user mapping so compute the page-aligned physical address
176 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
177 enum cxgb4_bar2_qtype qtype,
178 unsigned int *pbar2_qid, u64 *pbar2_pa)
183 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
185 &bar2_qoffset, pbar2_qid);
190 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
192 if (is_t4(rdev->lldi.adapter_type))
195 return rdev->bar2_kva + bar2_qoffset;
198 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
199 struct t4_cq *rcq, struct t4_cq *scq,
200 struct c4iw_dev_ucontext *uctx,
201 struct c4iw_wr_wait *wr_waitp,
204 int user = (uctx != &rdev->uctx);
205 struct fw_ri_res_wr *res_wr;
206 struct fw_ri_res *res;
212 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
217 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
225 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
229 goto free_rq_qid;//FIXME
233 wq->rq.sw_rq = kcalloc(wq->rq.size,
234 sizeof(*wq->rq.sw_rq),
245 * RQT must be a power of 2 and at least 16 deep.
248 roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
249 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
250 if (!wq->rq.rqt_hwaddr) {
256 ret = alloc_sq(rdev, &wq->sq, user);
259 memset(wq->sq.queue, 0, wq->sq.memsize);
260 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
263 wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
271 pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
273 (unsigned long long)virt_to_phys(wq->sq.queue),
275 (unsigned long long)virt_to_phys(wq->rq.queue));
276 memset(wq->rq.queue, 0, wq->rq.memsize);
277 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
280 wq->db = rdev->lldi.db_reg;
282 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
284 user ? &wq->sq.bar2_pa : NULL);
286 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
287 T4_BAR2_QTYPE_EGRESS,
289 user ? &wq->rq.bar2_pa : NULL);
292 * User mode must have bar2 access.
294 if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
295 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
296 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
303 /* build fw_ri_res_wr */
304 wr_len = sizeof *res_wr + 2 * sizeof *res;
306 wr_len += sizeof(*res);
307 skb = alloc_skb(wr_len, GFP_KERNEL);
312 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
314 res_wr = __skb_put_zero(skb, wr_len);
315 res_wr->op_nres = cpu_to_be32(
316 FW_WR_OP_V(FW_RI_RES_WR) |
317 FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
319 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
320 res_wr->cookie = (uintptr_t)wr_waitp;
322 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
323 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
326 * eqsize is the number of 64B entries plus the status page size.
328 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
329 rdev->hw_queue.t4_eq_status_entries;
331 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
332 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
333 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
334 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
335 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
336 FW_RI_RES_WR_IQID_V(scq->cqid));
337 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
338 FW_RI_RES_WR_DCAEN_V(0) |
339 FW_RI_RES_WR_DCACPU_V(0) |
340 FW_RI_RES_WR_FBMIN_V(2) |
341 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
342 FW_RI_RES_WR_FBMAX_V(3)) |
343 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
344 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
345 FW_RI_RES_WR_EQSIZE_V(eqsize));
346 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
347 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
351 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
352 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
355 * eqsize is the number of 64B entries plus the status page size
357 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
358 rdev->hw_queue.t4_eq_status_entries;
359 res->u.sqrq.fetchszm_to_iqid =
360 /* no host cidx updates */
361 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
362 /* don't keep in chip cache */
363 FW_RI_RES_WR_CPRIO_V(0) |
364 /* set by uP at ri_init time */
365 FW_RI_RES_WR_PCIECHN_V(0) |
366 FW_RI_RES_WR_IQID_V(rcq->cqid));
367 res->u.sqrq.dcaen_to_eqsize =
368 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
369 FW_RI_RES_WR_DCACPU_V(0) |
370 FW_RI_RES_WR_FBMIN_V(2) |
371 FW_RI_RES_WR_FBMAX_V(3) |
372 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
373 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
374 FW_RI_RES_WR_EQSIZE_V(eqsize));
375 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
376 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
379 c4iw_init_wr_wait(wr_waitp);
380 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
384 pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
385 wq->sq.qid, wq->rq.qid, wq->db,
386 wq->sq.bar2_va, wq->rq.bar2_va);
391 dma_free_coherent(&rdev->lldi.pdev->dev,
392 wq->rq.memsize, wq->rq.queue,
393 dma_unmap_addr(&wq->rq, mapping));
395 dealloc_sq(rdev, &wq->sq);
398 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
406 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
408 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
412 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
413 const struct ib_send_wr *wr, int max, u32 *plenp)
420 dstp = (u8 *)immdp->data;
421 for (i = 0; i < wr->num_sge; i++) {
422 if ((plen + wr->sg_list[i].length) > max)
424 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
425 plen += wr->sg_list[i].length;
426 rem = wr->sg_list[i].length;
428 if (dstp == (u8 *)&sq->queue[sq->size])
429 dstp = (u8 *)sq->queue;
430 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
433 len = (u8 *)&sq->queue[sq->size] - dstp;
434 memcpy(dstp, srcp, len);
440 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
442 memset(dstp, 0, len);
443 immdp->op = FW_RI_DATA_IMMD;
446 immdp->immdlen = cpu_to_be32(plen);
451 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
452 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
453 int num_sge, u32 *plenp)
458 __be64 *flitp = (__be64 *)isglp->sge;
460 for (i = 0; i < num_sge; i++) {
461 if ((plen + sg_list[i].length) < plen)
463 plen += sg_list[i].length;
464 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
466 if (++flitp == queue_end)
468 *flitp = cpu_to_be64(sg_list[i].addr);
469 if (++flitp == queue_end)
472 *flitp = (__force __be64)0;
473 isglp->op = FW_RI_DATA_ISGL;
475 isglp->nsge = cpu_to_be16(num_sge);
482 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
483 const struct ib_send_wr *wr, u8 *len16)
489 if (wr->num_sge > T4_MAX_SEND_SGE)
491 switch (wr->opcode) {
493 if (wr->send_flags & IB_SEND_SOLICITED)
494 wqe->send.sendop_pkd = cpu_to_be32(
495 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
497 wqe->send.sendop_pkd = cpu_to_be32(
498 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
499 wqe->send.stag_inv = 0;
501 case IB_WR_SEND_WITH_INV:
502 if (wr->send_flags & IB_SEND_SOLICITED)
503 wqe->send.sendop_pkd = cpu_to_be32(
504 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
506 wqe->send.sendop_pkd = cpu_to_be32(
507 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
508 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
519 if (wr->send_flags & IB_SEND_INLINE) {
520 ret = build_immd(sq, wqe->send.u.immd_src, wr,
521 T4_MAX_SEND_INLINE, &plen);
524 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
527 ret = build_isgl((__be64 *)sq->queue,
528 (__be64 *)&sq->queue[sq->size],
529 wqe->send.u.isgl_src,
530 wr->sg_list, wr->num_sge, &plen);
533 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
534 wr->num_sge * sizeof(struct fw_ri_sge);
537 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
538 wqe->send.u.immd_src[0].r1 = 0;
539 wqe->send.u.immd_src[0].r2 = 0;
540 wqe->send.u.immd_src[0].immdlen = 0;
541 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
544 *len16 = DIV_ROUND_UP(size, 16);
545 wqe->send.plen = cpu_to_be32(plen);
549 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
550 const struct ib_send_wr *wr, u8 *len16)
556 if (wr->num_sge > T4_MAX_SEND_SGE)
559 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
560 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
562 if (wr->send_flags & IB_SEND_INLINE) {
563 ret = build_immd(sq, wqe->write.u.immd_src, wr,
564 T4_MAX_WRITE_INLINE, &plen);
567 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
570 ret = build_isgl((__be64 *)sq->queue,
571 (__be64 *)&sq->queue[sq->size],
572 wqe->write.u.isgl_src,
573 wr->sg_list, wr->num_sge, &plen);
576 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
577 wr->num_sge * sizeof(struct fw_ri_sge);
580 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
581 wqe->write.u.immd_src[0].r1 = 0;
582 wqe->write.u.immd_src[0].r2 = 0;
583 wqe->write.u.immd_src[0].immdlen = 0;
584 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
587 *len16 = DIV_ROUND_UP(size, 16);
588 wqe->write.plen = cpu_to_be32(plen);
592 static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
597 if (wr->num_sge && wr->sg_list[0].length) {
598 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
599 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
601 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
602 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
603 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
604 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
606 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
608 wqe->read.stag_src = cpu_to_be32(2);
609 wqe->read.to_src_hi = 0;
610 wqe->read.to_src_lo = 0;
611 wqe->read.stag_sink = cpu_to_be32(2);
613 wqe->read.to_sink_hi = 0;
614 wqe->read.to_sink_lo = 0;
618 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
622 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
623 struct ib_recv_wr *wr, u8 *len16)
627 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
628 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
629 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
632 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
633 wr->num_sge * sizeof(struct fw_ri_sge), 16);
637 static int build_srq_recv(union t4_recv_wr *wqe, struct ib_recv_wr *wr,
642 ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
643 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
646 *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
647 wr->num_sge * sizeof(struct fw_ri_sge), 16);
651 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
652 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
655 __be64 *p = (__be64 *)fr->pbl;
657 fr->r2 = cpu_to_be32(0);
658 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
660 fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
661 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
662 FW_RI_TPTE_STAGSTATE_V(1) |
663 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
664 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
665 fr->tpte.locread_to_qpid = cpu_to_be32(
666 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
667 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
668 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
669 fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
670 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
671 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
672 fr->tpte.len_hi = cpu_to_be32(0);
673 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
674 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
675 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
677 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
678 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
680 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
683 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
684 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
685 u8 *len16, bool dsgl_supported)
687 struct fw_ri_immd *imdp;
690 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
693 if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
696 wqe->fr.qpbinde_to_dcacpu = 0;
697 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
698 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
699 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
701 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
702 wqe->fr.stag = cpu_to_be32(wr->key);
703 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
704 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
707 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
708 struct fw_ri_dsgl *sglp;
710 for (i = 0; i < mhp->mpl_len; i++)
711 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
713 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
714 sglp->op = FW_RI_DATA_DSGL;
716 sglp->nsge = cpu_to_be16(1);
717 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
718 sglp->len0 = cpu_to_be32(pbllen);
720 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
722 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
723 imdp->op = FW_RI_DATA_IMMD;
726 imdp->immdlen = cpu_to_be32(pbllen);
727 p = (__be64 *)(imdp + 1);
729 for (i = 0; i < mhp->mpl_len; i++) {
730 *p = cpu_to_be64((u64)mhp->mpl[i]);
732 if (++p == (__be64 *)&sq->queue[sq->size])
733 p = (__be64 *)sq->queue;
738 if (++p == (__be64 *)&sq->queue[sq->size])
739 p = (__be64 *)sq->queue;
741 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
747 static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
750 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
752 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
756 static void free_qp_work(struct work_struct *work)
758 struct c4iw_ucontext *ucontext;
760 struct c4iw_dev *rhp;
762 qhp = container_of(work, struct c4iw_qp, free_work);
763 ucontext = qhp->ucontext;
766 pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
767 destroy_qp(&rhp->rdev, &qhp->wq,
768 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
771 c4iw_put_ucontext(ucontext);
772 c4iw_put_wr_wait(qhp->wr_waitp);
776 static void queue_qp_free(struct kref *kref)
780 qhp = container_of(kref, struct c4iw_qp, kref);
781 pr_debug("qhp %p\n", qhp);
782 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
785 void c4iw_qp_add_ref(struct ib_qp *qp)
787 pr_debug("ib_qp %p\n", qp);
788 kref_get(&to_c4iw_qp(qp)->kref);
791 void c4iw_qp_rem_ref(struct ib_qp *qp)
793 pr_debug("ib_qp %p\n", qp);
794 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
797 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
799 if (list_empty(entry))
800 list_add_tail(entry, head);
803 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
807 spin_lock_irqsave(&qhp->rhp->lock, flags);
808 spin_lock(&qhp->lock);
809 if (qhp->rhp->db_state == NORMAL)
810 t4_ring_sq_db(&qhp->wq, inc, NULL);
812 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
813 qhp->wq.sq.wq_pidx_inc += inc;
815 spin_unlock(&qhp->lock);
816 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
820 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
824 spin_lock_irqsave(&qhp->rhp->lock, flags);
825 spin_lock(&qhp->lock);
826 if (qhp->rhp->db_state == NORMAL)
827 t4_ring_rq_db(&qhp->wq, inc, NULL);
829 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
830 qhp->wq.rq.wq_pidx_inc += inc;
832 spin_unlock(&qhp->lock);
833 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
837 static int ib_to_fw_opcode(int ib_opcode)
842 case IB_WR_SEND_WITH_INV:
843 opcode = FW_RI_SEND_WITH_INV;
848 case IB_WR_RDMA_WRITE:
849 opcode = FW_RI_RDMA_WRITE;
851 case IB_WR_RDMA_READ:
852 case IB_WR_RDMA_READ_WITH_INV:
853 opcode = FW_RI_READ_REQ;
856 opcode = FW_RI_FAST_REGISTER;
858 case IB_WR_LOCAL_INV:
859 opcode = FW_RI_LOCAL_INV;
867 static int complete_sq_drain_wr(struct c4iw_qp *qhp,
868 const struct ib_send_wr *wr)
870 struct t4_cqe cqe = {};
871 struct c4iw_cq *schp;
876 schp = to_c4iw_cq(qhp->ibqp.send_cq);
879 opcode = ib_to_fw_opcode(wr->opcode);
883 cqe.u.drain_cookie = wr->wr_id;
884 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
885 CQE_OPCODE_V(opcode) |
889 CQE_QPID_V(qhp->wq.sq.qid));
891 spin_lock_irqsave(&schp->lock, flag);
892 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
893 cq->sw_queue[cq->sw_pidx] = cqe;
895 spin_unlock_irqrestore(&schp->lock, flag);
897 if (t4_clear_cq_armed(&schp->cq)) {
898 spin_lock_irqsave(&schp->comp_handler_lock, flag);
899 (*schp->ibcq.comp_handler)(&schp->ibcq,
900 schp->ibcq.cq_context);
901 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
906 static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
907 struct ib_send_wr **bad_wr)
912 ret = complete_sq_drain_wr(qhp, wr);
922 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
924 struct t4_cqe cqe = {};
925 struct c4iw_cq *rchp;
929 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
932 cqe.u.drain_cookie = wr->wr_id;
933 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
934 CQE_OPCODE_V(FW_RI_SEND) |
938 CQE_QPID_V(qhp->wq.sq.qid));
940 spin_lock_irqsave(&rchp->lock, flag);
941 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
942 cq->sw_queue[cq->sw_pidx] = cqe;
944 spin_unlock_irqrestore(&rchp->lock, flag);
946 if (t4_clear_cq_armed(&rchp->cq)) {
947 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
948 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
949 rchp->ibcq.cq_context);
950 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
954 static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
957 complete_rq_drain_wr(qhp, wr);
962 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
963 struct ib_send_wr **bad_wr)
967 enum fw_wr_opcodes fw_opcode = 0;
968 enum fw_ri_wr_flags fw_flags;
970 union t4_wr *wqe = NULL;
972 struct t4_swsqe *swsqe;
976 qhp = to_c4iw_qp(ibqp);
977 spin_lock_irqsave(&qhp->lock, flag);
980 * If the qp has been flushed, then just insert a special
983 if (qhp->wq.flushed) {
984 spin_unlock_irqrestore(&qhp->lock, flag);
985 err = complete_sq_drain_wrs(qhp, wr, bad_wr);
988 num_wrs = t4_sq_avail(&qhp->wq);
990 spin_unlock_irqrestore(&qhp->lock, flag);
1000 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
1001 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
1004 if (wr->send_flags & IB_SEND_SOLICITED)
1005 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
1006 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
1007 fw_flags |= FW_RI_COMPLETION_FLAG;
1008 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
1009 switch (wr->opcode) {
1010 case IB_WR_SEND_WITH_INV:
1012 if (wr->send_flags & IB_SEND_FENCE)
1013 fw_flags |= FW_RI_READ_FENCE_FLAG;
1014 fw_opcode = FW_RI_SEND_WR;
1015 if (wr->opcode == IB_WR_SEND)
1016 swsqe->opcode = FW_RI_SEND;
1018 swsqe->opcode = FW_RI_SEND_WITH_INV;
1019 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
1021 case IB_WR_RDMA_WRITE:
1022 fw_opcode = FW_RI_RDMA_WRITE_WR;
1023 swsqe->opcode = FW_RI_RDMA_WRITE;
1024 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
1026 case IB_WR_RDMA_READ:
1027 case IB_WR_RDMA_READ_WITH_INV:
1028 fw_opcode = FW_RI_RDMA_READ_WR;
1029 swsqe->opcode = FW_RI_READ_REQ;
1030 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
1031 c4iw_invalidate_mr(qhp->rhp,
1032 wr->sg_list[0].lkey);
1033 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
1037 err = build_rdma_read(wqe, wr, &len16);
1040 swsqe->read_len = wr->sg_list[0].length;
1041 if (!qhp->wq.sq.oldest_read)
1042 qhp->wq.sq.oldest_read = swsqe;
1044 case IB_WR_REG_MR: {
1045 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
1047 swsqe->opcode = FW_RI_FAST_REGISTER;
1048 if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
1049 !mhp->attr.state && mhp->mpl_len <= 2) {
1050 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
1051 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
1054 fw_opcode = FW_RI_FR_NSMR_WR;
1055 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
1057 qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
1061 mhp->attr.state = 1;
1064 case IB_WR_LOCAL_INV:
1065 if (wr->send_flags & IB_SEND_FENCE)
1066 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
1067 fw_opcode = FW_RI_INV_LSTAG_WR;
1068 swsqe->opcode = FW_RI_LOCAL_INV;
1069 err = build_inv_stag(wqe, wr, &len16);
1070 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
1073 pr_warn("%s post of type=%d TBD!\n", __func__,
1081 swsqe->idx = qhp->wq.sq.pidx;
1082 swsqe->complete = 0;
1083 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
1086 swsqe->wr_id = wr->wr_id;
1088 swsqe->sge_ts = cxgb4_read_sge_timestamp(
1089 qhp->rhp->rdev.lldi.ports[0]);
1090 swsqe->host_time = ktime_get();
1093 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
1095 pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
1096 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
1097 swsqe->opcode, swsqe->read_len);
1100 t4_sq_produce(&qhp->wq, len16);
1101 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1103 if (!qhp->rhp->rdev.status_page->db_off) {
1104 t4_ring_sq_db(&qhp->wq, idx, wqe);
1105 spin_unlock_irqrestore(&qhp->lock, flag);
1107 spin_unlock_irqrestore(&qhp->lock, flag);
1108 ring_kernel_sq_db(qhp, idx);
1113 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1114 struct ib_recv_wr **bad_wr)
1117 struct c4iw_qp *qhp;
1118 union t4_recv_wr *wqe = NULL;
1124 qhp = to_c4iw_qp(ibqp);
1125 spin_lock_irqsave(&qhp->lock, flag);
1128 * If the qp has been flushed, then just insert a special
1131 if (qhp->wq.flushed) {
1132 spin_unlock_irqrestore(&qhp->lock, flag);
1133 complete_rq_drain_wrs(qhp, wr);
1136 num_wrs = t4_rq_avail(&qhp->wq);
1138 spin_unlock_irqrestore(&qhp->lock, flag);
1143 if (wr->num_sge > T4_MAX_RECV_SGE) {
1148 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1149 qhp->wq.rq.wq_pidx *
1152 err = build_rdma_recv(qhp, wqe, wr, &len16);
1160 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1162 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1163 cxgb4_read_sge_timestamp(
1164 qhp->rhp->rdev.lldi.ports[0]);
1165 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
1169 wqe->recv.opcode = FW_RI_RECV_WR;
1171 wqe->recv.wrid = qhp->wq.rq.pidx;
1172 wqe->recv.r2[0] = 0;
1173 wqe->recv.r2[1] = 0;
1174 wqe->recv.r2[2] = 0;
1175 wqe->recv.len16 = len16;
1176 pr_debug("cookie 0x%llx pidx %u\n",
1177 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1178 t4_rq_produce(&qhp->wq, len16);
1179 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1183 if (!qhp->rhp->rdev.status_page->db_off) {
1184 t4_ring_rq_db(&qhp->wq, idx, wqe);
1185 spin_unlock_irqrestore(&qhp->lock, flag);
1187 spin_unlock_irqrestore(&qhp->lock, flag);
1188 ring_kernel_rq_db(qhp, idx);
1193 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
1194 u64 wr_id, u8 len16)
1196 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
1198 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
1199 __func__, srq->cidx, srq->pidx, srq->wq_pidx,
1200 srq->in_use, srq->ooo_count,
1201 (unsigned long long)wr_id, srq->pending_cidx,
1202 srq->pending_pidx, srq->pending_in_use);
1205 memcpy(&pwr->wqe, wqe, len16 * 16);
1206 t4_srq_produce_pending_wr(srq);
1209 int c4iw_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1210 struct ib_recv_wr **bad_wr)
1212 union t4_recv_wr *wqe, lwqe;
1213 struct c4iw_srq *srq;
1220 srq = to_c4iw_srq(ibsrq);
1221 spin_lock_irqsave(&srq->lock, flag);
1222 num_wrs = t4_srq_avail(&srq->wq);
1224 spin_unlock_irqrestore(&srq->lock, flag);
1228 if (wr->num_sge > T4_MAX_RECV_SGE) {
1235 err = build_srq_recv(wqe, wr, &len16);
1243 wqe->recv.opcode = FW_RI_RECV_WR;
1245 wqe->recv.wrid = srq->wq.pidx;
1246 wqe->recv.r2[0] = 0;
1247 wqe->recv.r2[1] = 0;
1248 wqe->recv.r2[2] = 0;
1249 wqe->recv.len16 = len16;
1251 if (srq->wq.ooo_count ||
1252 srq->wq.pending_in_use ||
1253 srq->wq.sw_rq[srq->wq.pidx].valid) {
1254 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
1256 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
1257 srq->wq.sw_rq[srq->wq.pidx].valid = 1;
1258 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
1259 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
1260 __func__, srq->wq.cidx,
1261 srq->wq.pidx, srq->wq.wq_pidx,
1263 (unsigned long long)wr->wr_id);
1264 t4_srq_produce(&srq->wq, len16);
1265 idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
1271 t4_ring_srq_db(&srq->wq, idx, len16, wqe);
1272 spin_unlock_irqrestore(&srq->lock, flag);
1276 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1286 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1291 status = CQE_STATUS(err_cqe);
1292 opcode = CQE_OPCODE(err_cqe);
1293 rqtype = RQ_TYPE(err_cqe);
1294 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1295 (opcode == FW_RI_SEND_WITH_SE_INV);
1296 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1297 (rqtype && (opcode == FW_RI_READ_RESP));
1302 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1303 *ecode = RDMAP_CANT_INV_STAG;
1305 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1306 *ecode = RDMAP_INV_STAG;
1310 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1311 if ((opcode == FW_RI_SEND_WITH_INV) ||
1312 (opcode == FW_RI_SEND_WITH_SE_INV))
1313 *ecode = RDMAP_CANT_INV_STAG;
1315 *ecode = RDMAP_STAG_NOT_ASSOC;
1318 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1319 *ecode = RDMAP_STAG_NOT_ASSOC;
1322 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1323 *ecode = RDMAP_ACC_VIOL;
1326 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1327 *ecode = RDMAP_TO_WRAP;
1331 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1332 *ecode = DDPT_BASE_BOUNDS;
1334 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1335 *ecode = RDMAP_BASE_BOUNDS;
1338 case T4_ERR_INVALIDATE_SHARED_MR:
1339 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1340 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1341 *ecode = RDMAP_CANT_INV_STAG;
1344 case T4_ERR_ECC_PSTAG:
1345 case T4_ERR_INTERNAL_ERR:
1346 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1349 case T4_ERR_OUT_OF_RQE:
1350 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1351 *ecode = DDPU_INV_MSN_NOBUF;
1353 case T4_ERR_PBL_ADDR_BOUND:
1354 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1355 *ecode = DDPT_BASE_BOUNDS;
1358 *layer_type = LAYER_MPA|DDP_LLP;
1359 *ecode = MPA_CRC_ERR;
1362 *layer_type = LAYER_MPA|DDP_LLP;
1363 *ecode = MPA_MARKER_ERR;
1365 case T4_ERR_PDU_LEN_ERR:
1366 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1367 *ecode = DDPU_MSG_TOOBIG;
1369 case T4_ERR_DDP_VERSION:
1371 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1372 *ecode = DDPT_INV_VERS;
1374 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1375 *ecode = DDPU_INV_VERS;
1378 case T4_ERR_RDMA_VERSION:
1379 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1380 *ecode = RDMAP_INV_VERS;
1383 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1384 *ecode = RDMAP_INV_OPCODE;
1386 case T4_ERR_DDP_QUEUE_NUM:
1387 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1388 *ecode = DDPU_INV_QN;
1391 case T4_ERR_MSN_GAP:
1392 case T4_ERR_MSN_RANGE:
1393 case T4_ERR_IRD_OVERFLOW:
1394 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1395 *ecode = DDPU_INV_MSN_RANGE;
1398 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1402 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1403 *ecode = DDPU_INV_MO;
1406 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1412 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1415 struct fw_ri_wr *wqe;
1416 struct sk_buff *skb;
1417 struct terminate_message *term;
1419 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
1422 skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1426 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1428 wqe = __skb_put_zero(skb, sizeof(*wqe));
1429 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1430 wqe->flowid_len16 = cpu_to_be32(
1431 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1432 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1434 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1435 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1436 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1437 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1438 term->layer_etype = qhp->attr.layer_etype;
1439 term->ecode = qhp->attr.ecode;
1441 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1442 c4iw_ofld_send(&qhp->rhp->rdev, skb);
1446 * Assumes qhp lock is held.
1448 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1449 struct c4iw_cq *schp)
1452 int rq_flushed = 0, sq_flushed;
1455 pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
1457 /* locking hierarchy: cqs lock first, then qp lock. */
1458 spin_lock_irqsave(&rchp->lock, flag);
1460 spin_lock(&schp->lock);
1461 spin_lock(&qhp->lock);
1463 if (qhp->wq.flushed) {
1464 spin_unlock(&qhp->lock);
1466 spin_unlock(&schp->lock);
1467 spin_unlock_irqrestore(&rchp->lock, flag);
1470 qhp->wq.flushed = 1;
1471 t4_set_wq_in_error(&qhp->wq, 0);
1473 c4iw_flush_hw_cq(rchp, qhp);
1475 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1476 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1480 c4iw_flush_hw_cq(schp, qhp);
1481 sq_flushed = c4iw_flush_sq(qhp);
1483 spin_unlock(&qhp->lock);
1485 spin_unlock(&schp->lock);
1486 spin_unlock_irqrestore(&rchp->lock, flag);
1489 if ((rq_flushed || sq_flushed) &&
1490 t4_clear_cq_armed(&rchp->cq)) {
1491 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1492 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1493 rchp->ibcq.cq_context);
1494 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1497 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1498 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1499 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1500 rchp->ibcq.cq_context);
1501 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1503 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1504 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1505 (*schp->ibcq.comp_handler)(&schp->ibcq,
1506 schp->ibcq.cq_context);
1507 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1512 static void flush_qp(struct c4iw_qp *qhp)
1514 struct c4iw_cq *rchp, *schp;
1517 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1518 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1520 if (qhp->ibqp.uobject) {
1521 t4_set_wq_in_error(&qhp->wq, 0);
1522 t4_set_cq_in_error(&rchp->cq);
1523 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1524 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1525 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1527 t4_set_cq_in_error(&schp->cq);
1528 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1529 (*schp->ibcq.comp_handler)(&schp->ibcq,
1530 schp->ibcq.cq_context);
1531 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1535 __flush_qp(qhp, rchp, schp);
1538 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1541 struct fw_ri_wr *wqe;
1543 struct sk_buff *skb;
1545 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
1547 skb = skb_dequeue(&ep->com.ep_skb_list);
1551 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1553 wqe = __skb_put_zero(skb, sizeof(*wqe));
1554 wqe->op_compl = cpu_to_be32(
1555 FW_WR_OP_V(FW_RI_INIT_WR) |
1557 wqe->flowid_len16 = cpu_to_be32(
1558 FW_WR_FLOWID_V(ep->hwtid) |
1559 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1560 wqe->cookie = (uintptr_t)ep->com.wr_waitp;
1562 wqe->u.fini.type = FW_RI_TYPE_FINI;
1564 ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
1565 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1567 pr_debug("ret %d\n", ret);
1571 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1573 pr_debug("p2p_type = %d\n", p2p_type);
1574 memset(&init->u, 0, sizeof init->u);
1576 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1577 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1578 init->u.write.stag_sink = cpu_to_be32(1);
1579 init->u.write.to_sink = cpu_to_be64(1);
1580 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1581 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1582 sizeof(struct fw_ri_immd),
1585 case FW_RI_INIT_P2PTYPE_READ_REQ:
1586 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1587 init->u.read.stag_src = cpu_to_be32(1);
1588 init->u.read.to_src_lo = cpu_to_be32(1);
1589 init->u.read.stag_sink = cpu_to_be32(1);
1590 init->u.read.to_sink_lo = cpu_to_be32(1);
1591 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1596 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1598 struct fw_ri_wr *wqe;
1600 struct sk_buff *skb;
1602 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
1603 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1605 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1610 ret = alloc_ird(rhp, qhp->attr.max_ird);
1612 qhp->attr.max_ird = 0;
1616 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1618 wqe = __skb_put_zero(skb, sizeof(*wqe));
1619 wqe->op_compl = cpu_to_be32(
1620 FW_WR_OP_V(FW_RI_INIT_WR) |
1622 wqe->flowid_len16 = cpu_to_be32(
1623 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1624 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1626 wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
1628 wqe->u.init.type = FW_RI_TYPE_INIT;
1629 wqe->u.init.mpareqbit_p2ptype =
1630 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1631 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1632 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1633 if (qhp->attr.mpa_attr.recv_marker_enabled)
1634 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1635 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1636 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1637 if (qhp->attr.mpa_attr.crc_enabled)
1638 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1640 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1641 FW_RI_QP_RDMA_WRITE_ENABLE |
1642 FW_RI_QP_BIND_ENABLE;
1643 if (!qhp->ibqp.uobject)
1644 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1645 FW_RI_QP_STAG0_ENABLE;
1646 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1647 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1648 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1649 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1651 wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
1654 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1655 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1656 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1657 rhp->rdev.lldi.vr->rq.start);
1659 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1660 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1661 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1662 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1663 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1664 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1665 if (qhp->attr.mpa_attr.initiator)
1666 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1668 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1669 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1673 free_ird(rhp, qhp->attr.max_ird);
1675 pr_debug("ret %d\n", ret);
1679 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1680 enum c4iw_qp_attr_mask mask,
1681 struct c4iw_qp_attributes *attrs,
1685 struct c4iw_qp_attributes newattr = qhp->attr;
1690 struct c4iw_ep *ep = NULL;
1692 pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1693 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1694 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1696 mutex_lock(&qhp->mutex);
1698 /* Process attr changes if in IDLE */
1699 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1700 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1704 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1705 newattr.enable_rdma_read = attrs->enable_rdma_read;
1706 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1707 newattr.enable_rdma_write = attrs->enable_rdma_write;
1708 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1709 newattr.enable_bind = attrs->enable_bind;
1710 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1711 if (attrs->max_ord > c4iw_max_read_depth) {
1715 newattr.max_ord = attrs->max_ord;
1717 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1718 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1722 newattr.max_ird = attrs->max_ird;
1724 qhp->attr = newattr;
1727 if (mask & C4IW_QP_ATTR_SQ_DB) {
1728 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1731 if (mask & C4IW_QP_ATTR_RQ_DB) {
1732 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1736 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1738 if (qhp->attr.state == attrs->next_state)
1741 switch (qhp->attr.state) {
1742 case C4IW_QP_STATE_IDLE:
1743 switch (attrs->next_state) {
1744 case C4IW_QP_STATE_RTS:
1745 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1749 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1753 qhp->attr.mpa_attr = attrs->mpa_attr;
1754 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1755 qhp->ep = qhp->attr.llp_stream_handle;
1756 set_state(qhp, C4IW_QP_STATE_RTS);
1759 * Ref the endpoint here and deref when we
1760 * disassociate the endpoint from the QP. This
1761 * happens in CLOSING->IDLE transition or *->ERROR
1764 c4iw_get_ep(&qhp->ep->com);
1765 ret = rdma_init(rhp, qhp);
1769 case C4IW_QP_STATE_ERROR:
1770 set_state(qhp, C4IW_QP_STATE_ERROR);
1778 case C4IW_QP_STATE_RTS:
1779 switch (attrs->next_state) {
1780 case C4IW_QP_STATE_CLOSING:
1781 t4_set_wq_in_error(&qhp->wq, 0);
1782 set_state(qhp, C4IW_QP_STATE_CLOSING);
1787 c4iw_get_ep(&qhp->ep->com);
1789 ret = rdma_fini(rhp, qhp, ep);
1793 case C4IW_QP_STATE_TERMINATE:
1794 t4_set_wq_in_error(&qhp->wq, 0);
1795 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1796 qhp->attr.layer_etype = attrs->layer_etype;
1797 qhp->attr.ecode = attrs->ecode;
1800 c4iw_get_ep(&qhp->ep->com);
1804 terminate = qhp->attr.send_term;
1805 ret = rdma_fini(rhp, qhp, ep);
1810 case C4IW_QP_STATE_ERROR:
1811 t4_set_wq_in_error(&qhp->wq, 0);
1812 set_state(qhp, C4IW_QP_STATE_ERROR);
1817 c4iw_get_ep(&qhp->ep->com);
1826 case C4IW_QP_STATE_CLOSING:
1829 * Allow kernel users to move to ERROR for qp draining.
1831 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1832 C4IW_QP_STATE_ERROR)) {
1836 switch (attrs->next_state) {
1837 case C4IW_QP_STATE_IDLE:
1839 set_state(qhp, C4IW_QP_STATE_IDLE);
1840 qhp->attr.llp_stream_handle = NULL;
1841 c4iw_put_ep(&qhp->ep->com);
1843 wake_up(&qhp->wait);
1845 case C4IW_QP_STATE_ERROR:
1852 case C4IW_QP_STATE_ERROR:
1853 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1857 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1861 set_state(qhp, C4IW_QP_STATE_IDLE);
1863 case C4IW_QP_STATE_TERMINATE:
1871 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
1878 pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
1881 /* disassociate the LLP connection */
1882 qhp->attr.llp_stream_handle = NULL;
1886 set_state(qhp, C4IW_QP_STATE_ERROR);
1890 wake_up(&qhp->wait);
1892 mutex_unlock(&qhp->mutex);
1895 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1898 * If disconnect is 1, then we need to initiate a disconnect
1899 * on the EP. This can be a normal close (RTS->CLOSING) or
1900 * an abnormal close (RTS/CLOSING->ERROR).
1903 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1905 c4iw_put_ep(&ep->com);
1909 * If free is 1, then we've disassociated the EP from the QP
1910 * and we need to dereference the EP.
1913 c4iw_put_ep(&ep->com);
1914 pr_debug("exit state %d\n", qhp->attr.state);
1918 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1920 struct c4iw_dev *rhp;
1921 struct c4iw_qp *qhp;
1922 struct c4iw_qp_attributes attrs;
1924 qhp = to_c4iw_qp(ib_qp);
1927 attrs.next_state = C4IW_QP_STATE_ERROR;
1928 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1929 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1931 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1932 wait_event(qhp->wait, !qhp->ep);
1934 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1936 spin_lock_irq(&rhp->lock);
1937 if (!list_empty(&qhp->db_fc_entry))
1938 list_del_init(&qhp->db_fc_entry);
1939 spin_unlock_irq(&rhp->lock);
1940 free_ird(rhp, qhp->attr.max_ird);
1942 c4iw_qp_rem_ref(ib_qp);
1944 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
1948 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1949 struct ib_udata *udata)
1951 struct c4iw_dev *rhp;
1952 struct c4iw_qp *qhp;
1953 struct c4iw_pd *php;
1954 struct c4iw_cq *schp;
1955 struct c4iw_cq *rchp;
1956 struct c4iw_create_qp_resp uresp;
1957 unsigned int sqsize, rqsize = 0;
1958 struct c4iw_ucontext *ucontext;
1960 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
1961 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
1963 pr_debug("ib_pd %p\n", pd);
1965 if (attrs->qp_type != IB_QPT_RC)
1966 return ERR_PTR(-EINVAL);
1968 php = to_c4iw_pd(pd);
1970 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1971 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1973 return ERR_PTR(-EINVAL);
1975 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1976 return ERR_PTR(-EINVAL);
1979 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1980 return ERR_PTR(-E2BIG);
1981 rqsize = attrs->cap.max_recv_wr + 1;
1986 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1987 return ERR_PTR(-E2BIG);
1988 sqsize = attrs->cap.max_send_wr + 1;
1992 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1994 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1996 return ERR_PTR(-ENOMEM);
1998 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
1999 if (!qhp->wr_waitp) {
2004 qhp->wq.sq.size = sqsize;
2005 qhp->wq.sq.memsize =
2006 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2007 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
2008 qhp->wq.sq.flush_cidx = -1;
2010 qhp->wq.rq.size = rqsize;
2011 qhp->wq.rq.memsize =
2012 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2013 sizeof(*qhp->wq.rq.queue);
2017 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
2019 qhp->wq.rq.memsize =
2020 roundup(qhp->wq.rq.memsize, PAGE_SIZE);
2023 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
2024 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2025 qhp->wr_waitp, !attrs->srq);
2027 goto err_free_wr_wait;
2029 attrs->cap.max_recv_wr = rqsize - 1;
2030 attrs->cap.max_send_wr = sqsize - 1;
2031 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
2034 qhp->attr.pd = php->pdid;
2035 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
2036 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
2037 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
2038 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
2039 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
2041 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
2042 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
2044 qhp->attr.state = C4IW_QP_STATE_IDLE;
2045 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
2046 qhp->attr.enable_rdma_read = 1;
2047 qhp->attr.enable_rdma_write = 1;
2048 qhp->attr.enable_bind = 1;
2049 qhp->attr.max_ord = 0;
2050 qhp->attr.max_ird = 0;
2051 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
2052 spin_lock_init(&qhp->lock);
2053 mutex_init(&qhp->mutex);
2054 init_waitqueue_head(&qhp->wait);
2055 kref_init(&qhp->kref);
2056 INIT_WORK(&qhp->free_work, free_qp_work);
2058 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
2060 goto err_destroy_qp;
2062 if (udata && ucontext) {
2063 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
2066 goto err_remove_handle;
2069 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
2072 goto err_free_sq_key;
2075 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
2076 if (!sq_db_key_mm) {
2078 goto err_free_rq_key;
2082 kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
2083 if (!rq_db_key_mm) {
2085 goto err_free_sq_db_key;
2088 if (t4_sq_onchip(&qhp->wq.sq)) {
2089 ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
2091 if (!ma_sync_key_mm) {
2093 goto err_free_rq_db_key;
2095 uresp.flags = C4IW_QPF_ONCHIP;
2098 uresp.qid_mask = rhp->rdev.qpmask;
2099 uresp.sqid = qhp->wq.sq.qid;
2100 uresp.sq_size = qhp->wq.sq.size;
2101 uresp.sq_memsize = qhp->wq.sq.memsize;
2103 uresp.rqid = qhp->wq.rq.qid;
2104 uresp.rq_size = qhp->wq.rq.size;
2105 uresp.rq_memsize = qhp->wq.rq.memsize;
2107 spin_lock(&ucontext->mmap_lock);
2108 if (ma_sync_key_mm) {
2109 uresp.ma_sync_key = ucontext->key;
2110 ucontext->key += PAGE_SIZE;
2112 uresp.ma_sync_key = 0;
2114 uresp.sq_key = ucontext->key;
2115 ucontext->key += PAGE_SIZE;
2117 uresp.rq_key = ucontext->key;
2118 ucontext->key += PAGE_SIZE;
2120 uresp.sq_db_gts_key = ucontext->key;
2121 ucontext->key += PAGE_SIZE;
2123 uresp.rq_db_gts_key = ucontext->key;
2124 ucontext->key += PAGE_SIZE;
2126 spin_unlock(&ucontext->mmap_lock);
2127 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
2129 goto err_free_ma_sync_key;
2130 sq_key_mm->key = uresp.sq_key;
2131 sq_key_mm->addr = qhp->wq.sq.phys_addr;
2132 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2133 insert_mmap(ucontext, sq_key_mm);
2135 rq_key_mm->key = uresp.rq_key;
2136 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
2137 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2138 insert_mmap(ucontext, rq_key_mm);
2140 sq_db_key_mm->key = uresp.sq_db_gts_key;
2141 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2142 sq_db_key_mm->len = PAGE_SIZE;
2143 insert_mmap(ucontext, sq_db_key_mm);
2145 rq_db_key_mm->key = uresp.rq_db_gts_key;
2146 rq_db_key_mm->addr =
2147 (u64)(unsigned long)qhp->wq.rq.bar2_pa;
2148 rq_db_key_mm->len = PAGE_SIZE;
2149 insert_mmap(ucontext, rq_db_key_mm);
2151 if (ma_sync_key_mm) {
2152 ma_sync_key_mm->key = uresp.ma_sync_key;
2153 ma_sync_key_mm->addr =
2154 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
2155 PCIE_MA_SYNC_A) & PAGE_MASK;
2156 ma_sync_key_mm->len = PAGE_SIZE;
2157 insert_mmap(ucontext, ma_sync_key_mm);
2160 c4iw_get_ucontext(ucontext);
2161 qhp->ucontext = ucontext;
2165 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
2168 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
2170 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
2173 qhp->ibqp.qp_num = qhp->wq.sq.qid;
2175 qhp->srq = to_c4iw_srq(attrs->srq);
2176 INIT_LIST_HEAD(&qhp->db_fc_entry);
2177 pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
2178 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
2179 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
2180 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
2182 err_free_ma_sync_key:
2183 kfree(ma_sync_key_mm);
2186 kfree(rq_db_key_mm);
2188 kfree(sq_db_key_mm);
2195 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
2197 destroy_qp(&rhp->rdev, &qhp->wq,
2198 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
2200 c4iw_put_wr_wait(qhp->wr_waitp);
2203 return ERR_PTR(ret);
2206 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2207 int attr_mask, struct ib_udata *udata)
2209 struct c4iw_dev *rhp;
2210 struct c4iw_qp *qhp;
2211 enum c4iw_qp_attr_mask mask = 0;
2212 struct c4iw_qp_attributes attrs;
2214 pr_debug("ib_qp %p\n", ibqp);
2216 /* iwarp does not support the RTR state */
2217 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
2218 attr_mask &= ~IB_QP_STATE;
2220 /* Make sure we still have something left to do */
2224 memset(&attrs, 0, sizeof attrs);
2225 qhp = to_c4iw_qp(ibqp);
2228 attrs.next_state = c4iw_convert_state(attr->qp_state);
2229 attrs.enable_rdma_read = (attr->qp_access_flags &
2230 IB_ACCESS_REMOTE_READ) ? 1 : 0;
2231 attrs.enable_rdma_write = (attr->qp_access_flags &
2232 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2233 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2236 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2237 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2238 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2239 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2240 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2243 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2244 * ringing the queue db when we're in DB_FULL mode.
2245 * Only allow this on T4 devices.
2247 attrs.sq_db_inc = attr->sq_psn;
2248 attrs.rq_db_inc = attr->rq_psn;
2249 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2250 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
2251 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
2252 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2255 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2258 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2260 pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
2261 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2264 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
2266 struct ib_event event = {0};
2268 event.device = &srq->rhp->ibdev;
2269 event.element.srq = &srq->ibsrq;
2270 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2271 ib_dispatch_event(&event);
2274 int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
2275 enum ib_srq_attr_mask srq_attr_mask,
2276 struct ib_udata *udata)
2278 struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2282 * XXX 0 mask == a SW interrupt for srq_limit reached...
2284 if (udata && !srq_attr_mask) {
2285 c4iw_dispatch_srq_limit_reached_event(srq);
2289 /* no support for this yet */
2290 if (srq_attr_mask & IB_SRQ_MAX_WR) {
2295 if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
2297 srq->srq_limit = attr->srq_limit;
2303 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2304 int attr_mask, struct ib_qp_init_attr *init_attr)
2306 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2308 memset(attr, 0, sizeof *attr);
2309 memset(init_attr, 0, sizeof *init_attr);
2310 attr->qp_state = to_ib_qp_state(qhp->attr.state);
2311 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2312 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2313 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2314 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
2315 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2316 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
2320 static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2321 struct c4iw_wr_wait *wr_waitp)
2323 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2324 struct sk_buff *skb = srq->destroy_skb;
2325 struct t4_srq *wq = &srq->wq;
2326 struct fw_ri_res_wr *res_wr;
2327 struct fw_ri_res *res;
2330 wr_len = sizeof(*res_wr) + sizeof(*res);
2331 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2333 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2334 memset(res_wr, 0, wr_len);
2335 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2336 FW_RI_RES_WR_NRES_V(1) |
2338 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2339 res_wr->cookie = (uintptr_t)wr_waitp;
2341 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2342 res->u.srq.op = FW_RI_RES_OP_RESET;
2343 res->u.srq.srqid = cpu_to_be32(srq->idx);
2344 res->u.srq.eqid = cpu_to_be32(wq->qid);
2346 c4iw_init_wr_wait(wr_waitp);
2347 c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
2349 dma_free_coherent(&rdev->lldi.pdev->dev,
2350 wq->memsize, wq->queue,
2351 pci_unmap_addr(wq, mapping));
2352 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2354 c4iw_put_qpid(rdev, wq->qid, uctx);
2357 static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2358 struct c4iw_wr_wait *wr_waitp)
2360 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2361 int user = (uctx != &rdev->uctx);
2362 struct t4_srq *wq = &srq->wq;
2363 struct fw_ri_res_wr *res_wr;
2364 struct fw_ri_res *res;
2365 struct sk_buff *skb;
2370 wq->qid = c4iw_get_qpid(rdev, uctx);
2375 wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
2379 wq->pending_wrs = kcalloc(srq->wq.size,
2380 sizeof(*srq->wq.pending_wrs),
2382 if (!wq->pending_wrs)
2383 goto err_free_sw_rq;
2386 wq->rqt_size = wq->size;
2387 wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
2388 if (!wq->rqt_hwaddr)
2389 goto err_free_pending_wrs;
2390 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2393 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
2394 wq->memsize, &wq->dma_addr,
2397 goto err_free_rqtpool;
2399 memset(wq->queue, 0, wq->memsize);
2400 pci_unmap_addr_set(wq, mapping, wq->dma_addr);
2402 wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, T4_BAR2_QTYPE_EGRESS,
2404 user ? &wq->bar2_pa : NULL);
2407 * User mode must have bar2 access.
2410 if (user && !wq->bar2_va) {
2411 pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
2412 pci_name(rdev->lldi.pdev), wq->qid);
2414 goto err_free_queue;
2417 /* build fw_ri_res_wr */
2418 wr_len = sizeof(*res_wr) + sizeof(*res);
2420 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
2422 goto err_free_queue;
2423 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2425 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2426 memset(res_wr, 0, wr_len);
2427 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2428 FW_RI_RES_WR_NRES_V(1) |
2430 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2431 res_wr->cookie = (uintptr_t)wr_waitp;
2433 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2434 res->u.srq.op = FW_RI_RES_OP_WRITE;
2437 * eqsize is the number of 64B entries plus the status page size.
2439 eqsize = wq->size * T4_RQ_NUM_SLOTS +
2440 rdev->hw_queue.t4_eq_status_entries;
2441 res->u.srq.eqid = cpu_to_be32(wq->qid);
2442 res->u.srq.fetchszm_to_iqid =
2443 /* no host cidx updates */
2444 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
2445 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
2446 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
2447 FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
2448 res->u.srq.dcaen_to_eqsize =
2449 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
2450 FW_RI_RES_WR_DCACPU_V(0) |
2451 FW_RI_RES_WR_FBMIN_V(2) |
2452 FW_RI_RES_WR_FBMAX_V(3) |
2453 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
2454 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
2455 FW_RI_RES_WR_EQSIZE_V(eqsize));
2456 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
2457 res->u.srq.srqid = cpu_to_be32(srq->idx);
2458 res->u.srq.pdid = cpu_to_be32(srq->pdid);
2459 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
2460 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
2461 rdev->lldi.vr->rq.start);
2463 c4iw_init_wr_wait(wr_waitp);
2465 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
2467 goto err_free_queue;
2469 pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
2470 " bar2_addr %p rqt addr 0x%x size %d\n",
2471 __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
2472 (u64)virt_to_phys(wq->queue), wq->bar2_va,
2473 wq->rqt_hwaddr, wq->rqt_size);
2477 dma_free_coherent(&rdev->lldi.pdev->dev,
2478 wq->memsize, wq->queue,
2479 pci_unmap_addr(wq, mapping));
2481 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2482 err_free_pending_wrs:
2484 kfree(wq->pending_wrs);
2489 c4iw_put_qpid(rdev, wq->qid, uctx);
2494 void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
2499 dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
2502 if (dst >= (u64 *)&srq->queue[srq->size])
2503 dst = (u64 *)srq->queue;
2505 if (dst >= (u64 *)&srq->queue[srq->size])
2506 dst = (u64 *)srq->queue;
2511 struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
2512 struct ib_udata *udata)
2514 struct c4iw_dev *rhp;
2515 struct c4iw_srq *srq;
2516 struct c4iw_pd *php;
2517 struct c4iw_create_srq_resp uresp;
2518 struct c4iw_ucontext *ucontext;
2519 struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
2524 pr_debug("%s ib_pd %p\n", __func__, pd);
2526 php = to_c4iw_pd(pd);
2529 if (!rhp->rdev.lldi.vr->srq.size)
2530 return ERR_PTR(-EINVAL);
2531 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2532 return ERR_PTR(-E2BIG);
2533 if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
2534 return ERR_PTR(-E2BIG);
2537 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
2539 rqsize = attrs->attr.max_wr + 1;
2540 rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
2542 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
2544 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
2546 return ERR_PTR(-ENOMEM);
2548 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2549 if (!srq->wr_waitp) {
2554 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
2557 goto err_free_wr_wait;
2560 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
2561 srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
2562 if (!srq->destroy_skb) {
2564 goto err_free_srq_idx;
2568 srq->pdid = php->pdid;
2570 srq->wq.size = rqsize;
2572 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2573 sizeof(*srq->wq.queue);
2575 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
2577 ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
2578 &rhp->rdev.uctx, srq->wr_waitp);
2581 attrs->attr.max_wr = rqsize - 1;
2583 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
2584 srq->flags = T4_SRQ_LIMIT_SUPPORT;
2586 ret = insert_handle(rhp, &rhp->qpidr, srq, srq->wq.qid);
2588 goto err_free_queue;
2591 srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
2594 goto err_remove_handle;
2596 srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
2597 if (!srq_db_key_mm) {
2599 goto err_free_srq_key_mm;
2601 uresp.flags = srq->flags;
2602 uresp.qid_mask = rhp->rdev.qpmask;
2603 uresp.srqid = srq->wq.qid;
2604 uresp.srq_size = srq->wq.size;
2605 uresp.srq_memsize = srq->wq.memsize;
2606 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
2607 spin_lock(&ucontext->mmap_lock);
2608 uresp.srq_key = ucontext->key;
2609 ucontext->key += PAGE_SIZE;
2610 uresp.srq_db_gts_key = ucontext->key;
2611 ucontext->key += PAGE_SIZE;
2612 spin_unlock(&ucontext->mmap_lock);
2613 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2615 goto err_free_srq_db_key_mm;
2616 srq_key_mm->key = uresp.srq_key;
2617 srq_key_mm->addr = virt_to_phys(srq->wq.queue);
2618 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
2619 insert_mmap(ucontext, srq_key_mm);
2620 srq_db_key_mm->key = uresp.srq_db_gts_key;
2621 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
2622 srq_db_key_mm->len = PAGE_SIZE;
2623 insert_mmap(ucontext, srq_db_key_mm);
2626 pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
2627 __func__, srq->wq.qid, srq->idx, srq->wq.size,
2628 (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
2630 spin_lock_init(&srq->lock);
2632 err_free_srq_db_key_mm:
2633 kfree(srq_db_key_mm);
2634 err_free_srq_key_mm:
2637 remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
2639 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2642 if (srq->destroy_skb)
2643 kfree_skb(srq->destroy_skb);
2645 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2647 c4iw_put_wr_wait(srq->wr_waitp);
2650 return ERR_PTR(ret);
2653 int c4iw_destroy_srq(struct ib_srq *ibsrq)
2655 struct c4iw_dev *rhp;
2656 struct c4iw_srq *srq;
2657 struct c4iw_ucontext *ucontext;
2659 srq = to_c4iw_srq(ibsrq);
2662 pr_debug("%s id %d\n", __func__, srq->wq.qid);
2664 remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
2665 ucontext = ibsrq->uobject ?
2666 to_c4iw_ucontext(ibsrq->uobject->context) : NULL;
2667 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2669 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2670 c4iw_put_wr_wait(srq->wr_waitp);