2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/uverbs_ioctl.h>
56 #include "qplib_res.h"
59 #include "qplib_rcfw.h"
63 #include <rdma/bnxt_re-abi.h>
65 static int __from_ib_access_flags(int iflags)
69 if (iflags & IB_ACCESS_LOCAL_WRITE)
70 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 if (iflags & IB_ACCESS_REMOTE_READ)
72 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 if (iflags & IB_ACCESS_REMOTE_WRITE)
74 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 if (iflags & IB_ACCESS_MW_BIND)
78 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 if (iflags & IB_ZERO_BASED)
80 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 if (iflags & IB_ACCESS_ON_DEMAND)
82 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
86 static enum ib_access_flags __to_ib_access_flags(int qflags)
88 enum ib_access_flags iflags = 0;
90 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 iflags |= IB_ACCESS_LOCAL_WRITE;
92 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 iflags |= IB_ACCESS_REMOTE_WRITE;
94 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 iflags |= IB_ACCESS_REMOTE_READ;
96 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 iflags |= IB_ACCESS_MW_BIND;
100 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 iflags |= IB_ZERO_BASED;
102 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 iflags |= IB_ACCESS_ON_DEMAND;
107 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 struct bnxt_qplib_sge *sg_list, int num)
112 for (i = 0; i < num; i++) {
113 sg_list[i].addr = ib_sg_list[i].addr;
114 sg_list[i].lkey = ib_sg_list[i].lkey;
115 sg_list[i].size = ib_sg_list[i].length;
116 total += sg_list[i].size;
122 int bnxt_re_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *ib_attr,
124 struct ib_udata *udata)
126 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
129 memset(ib_attr, 0, sizeof(*ib_attr));
130 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131 min(sizeof(dev_attr->fw_ver),
132 sizeof(ib_attr->fw_ver)));
133 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134 (u8 *)&ib_attr->sys_image_guid);
135 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
138 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141 ib_attr->max_qp = dev_attr->max_qp;
142 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143 ib_attr->device_cap_flags =
144 IB_DEVICE_CURR_QP_STATE_MOD
145 | IB_DEVICE_RC_RNR_NAK_GEN
146 | IB_DEVICE_SHUTDOWN_PORT
147 | IB_DEVICE_SYS_IMAGE_GUID
148 | IB_DEVICE_LOCAL_DMA_LKEY
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 ib_attr->max_send_sge = dev_attr->max_qp_sges;
156 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158 ib_attr->max_cq = dev_attr->max_cq;
159 ib_attr->max_cqe = dev_attr->max_cq_wqes;
160 ib_attr->max_mr = dev_attr->max_mr;
161 ib_attr->max_pd = dev_attr->max_pd;
162 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164 ib_attr->atomic_cap = IB_ATOMIC_NONE;
165 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
167 ib_attr->max_ee_rd_atom = 0;
168 ib_attr->max_res_rd_atom = 0;
169 ib_attr->max_ee_init_rd_atom = 0;
171 ib_attr->max_rdd = 0;
172 ib_attr->max_mw = dev_attr->max_mw;
173 ib_attr->max_raw_ipv6_qp = 0;
174 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
175 ib_attr->max_mcast_grp = 0;
176 ib_attr->max_mcast_qp_attach = 0;
177 ib_attr->max_total_mcast_qp_attach = 0;
178 ib_attr->max_ah = dev_attr->max_ah;
180 ib_attr->max_fmr = 0;
181 ib_attr->max_map_per_fmr = 0;
183 ib_attr->max_srq = dev_attr->max_srq;
184 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
185 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
187 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
189 ib_attr->max_pkeys = 1;
190 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
194 int bnxt_re_modify_device(struct ib_device *ibdev,
195 int device_modify_mask,
196 struct ib_device_modify *device_modify)
198 switch (device_modify_mask) {
199 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
200 /* Modify the GUID requires the modification of the GID table */
201 /* GUID should be made as READ-ONLY */
203 case IB_DEVICE_MODIFY_NODE_DESC:
204 /* Node Desc should be made as READ-ONLY */
213 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
214 struct ib_port_attr *port_attr)
216 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
217 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
219 memset(port_attr, 0, sizeof(*port_attr));
221 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
222 port_attr->state = IB_PORT_ACTIVE;
223 port_attr->phys_state = 5;
225 port_attr->state = IB_PORT_DOWN;
226 port_attr->phys_state = 3;
228 port_attr->max_mtu = IB_MTU_4096;
229 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
230 port_attr->gid_tbl_len = dev_attr->max_sgid;
231 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
232 IB_PORT_DEVICE_MGMT_SUP |
233 IB_PORT_VENDOR_CLASS_SUP;
234 port_attr->ip_gids = true;
236 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
237 port_attr->bad_pkey_cntr = 0;
238 port_attr->qkey_viol_cntr = 0;
239 port_attr->pkey_tbl_len = dev_attr->max_pkey;
241 port_attr->sm_lid = 0;
243 port_attr->max_vl_num = 4;
244 port_attr->sm_sl = 0;
245 port_attr->subnet_timeout = 0;
246 port_attr->init_type_reply = 0;
247 port_attr->active_speed = rdev->active_speed;
248 port_attr->active_width = rdev->active_width;
253 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
254 struct ib_port_immutable *immutable)
256 struct ib_port_attr port_attr;
258 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
261 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
262 immutable->gid_tbl_len = port_attr.gid_tbl_len;
263 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
264 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
265 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
269 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
271 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
273 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
274 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
275 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
278 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
279 u16 index, u16 *pkey)
281 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
283 /* Ignore port_num */
285 memset(pkey, 0, sizeof(*pkey));
286 return bnxt_qplib_get_pkey(&rdev->qplib_res,
287 &rdev->qplib_res.pkey_tbl, index, pkey);
290 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
291 int index, union ib_gid *gid)
293 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
296 /* Ignore port_num */
297 memset(gid, 0, sizeof(*gid));
298 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
299 &rdev->qplib_res.sgid_tbl, index,
300 (struct bnxt_qplib_gid *)gid);
304 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
307 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
308 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
309 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
310 struct bnxt_qplib_gid *gid_to_del;
312 /* Delete the entry from the hardware */
317 if (sgid_tbl && sgid_tbl->active) {
318 if (ctx->idx >= sgid_tbl->max)
320 gid_to_del = &sgid_tbl->tbl[ctx->idx];
321 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
322 * or via the ib_unregister_device path. In the former case QP1
323 * may not be destroyed yet, in which case just return as FW
324 * needs that entry to be present and will fail it's deletion.
325 * We could get invoked again after QP1 is destroyed OR get an
326 * ADD_GID call with a different GID value for the same index
327 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
330 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
331 ctx->refcnt == 1 && rdev->qp1_sqp) {
332 dev_dbg(rdev_to_dev(rdev),
333 "Trying to delete GID0 while QP1 is alive\n");
338 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
340 dev_err(rdev_to_dev(rdev),
341 "Failed to remove GID: %#x", rc);
343 ctx_tbl = sgid_tbl->ctx;
344 ctx_tbl[ctx->idx] = NULL;
354 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
358 u16 vlan_id = 0xFFFF;
359 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
360 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
361 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
363 if ((attr->ndev) && is_vlan_dev(attr->ndev))
364 vlan_id = vlan_dev_vlan_id(attr->ndev);
366 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
367 rdev->qplib_res.netdev->dev_addr,
368 vlan_id, true, &tbl_idx);
369 if (rc == -EALREADY) {
370 ctx_tbl = sgid_tbl->ctx;
371 ctx_tbl[tbl_idx]->refcnt++;
372 *context = ctx_tbl[tbl_idx];
377 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
381 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
384 ctx_tbl = sgid_tbl->ctx;
387 ctx_tbl[tbl_idx] = ctx;
393 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
396 return IB_LINK_LAYER_ETHERNET;
399 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
401 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
403 struct bnxt_re_fence_data *fence = &pd->fence;
404 struct ib_mr *ib_mr = &fence->mr->ib_mr;
405 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
407 memset(wqe, 0, sizeof(*wqe));
408 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
409 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
410 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
411 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
412 wqe->bind.zero_based = false;
413 wqe->bind.parent_l_key = ib_mr->lkey;
414 wqe->bind.va = (u64)(unsigned long)fence->va;
415 wqe->bind.length = fence->size;
416 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
417 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
419 /* Save the initial rkey in fence structure for now;
420 * wqe->bind.r_key will be set at (re)bind time.
422 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
425 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
427 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
429 struct ib_pd *ib_pd = qp->ib_qp.pd;
430 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
431 struct bnxt_re_fence_data *fence = &pd->fence;
432 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
433 struct bnxt_qplib_swqe wqe;
436 memcpy(&wqe, fence_wqe, sizeof(wqe));
437 wqe.bind.r_key = fence->bind_rkey;
438 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
440 dev_dbg(rdev_to_dev(qp->rdev),
441 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
442 wqe.bind.r_key, qp->qplib_qp.id, pd);
443 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
445 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
448 bnxt_qplib_post_send_db(&qp->qplib_qp);
453 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
455 struct bnxt_re_fence_data *fence = &pd->fence;
456 struct bnxt_re_dev *rdev = pd->rdev;
457 struct device *dev = &rdev->en_dev->pdev->dev;
458 struct bnxt_re_mr *mr = fence->mr;
461 bnxt_re_dealloc_mw(fence->mw);
466 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
469 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
473 if (fence->dma_addr) {
474 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
480 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
482 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
483 struct bnxt_re_fence_data *fence = &pd->fence;
484 struct bnxt_re_dev *rdev = pd->rdev;
485 struct device *dev = &rdev->en_dev->pdev->dev;
486 struct bnxt_re_mr *mr = NULL;
487 dma_addr_t dma_addr = 0;
492 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
494 rc = dma_mapping_error(dev, dma_addr);
496 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
501 fence->dma_addr = dma_addr;
504 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
511 mr->qplib_mr.pd = &pd->qplib_pd;
512 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
513 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
514 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
516 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
521 mr->ib_mr.lkey = mr->qplib_mr.lkey;
522 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
523 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
525 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
526 BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
528 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
531 mr->ib_mr.rkey = mr->qplib_mr.rkey;
533 /* Create a fence MW only for kernel consumers */
534 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
536 dev_err(rdev_to_dev(rdev),
537 "Failed to create fence-MW for PD: %p\n", pd);
543 bnxt_re_create_fence_wqe(pd);
547 bnxt_re_destroy_fence_mr(pd);
551 /* Protection Domains */
552 void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
554 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
555 struct bnxt_re_dev *rdev = pd->rdev;
557 bnxt_re_destroy_fence_mr(pd);
560 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
564 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
566 struct ib_device *ibdev = ibpd->device;
567 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
568 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
569 udata, struct bnxt_re_ucontext, ib_uctx);
570 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
574 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
575 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
581 struct bnxt_re_pd_resp resp;
583 if (!ucntx->dpi.dbr) {
584 /* Allocate DPI in alloc_pd to avoid failing of
585 * ibv_devinfo and family of application when DPIs
588 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
589 &ucntx->dpi, ucntx)) {
595 resp.pdid = pd->qplib_pd.id;
596 /* Still allow mapping this DBR to the new user PD. */
597 resp.dpi = ucntx->dpi.dpi;
598 resp.dbr = (u64)ucntx->dpi.umdbr;
600 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
602 dev_err(rdev_to_dev(rdev),
603 "Failed to copy user response\n");
609 if (bnxt_re_create_fence_mr(pd))
610 dev_warn(rdev_to_dev(rdev),
611 "Failed to create Fence-MR\n");
614 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
620 /* Address Handles */
621 void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
623 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
624 struct bnxt_re_dev *rdev = ah->rdev;
626 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
627 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
630 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
635 case RDMA_NETWORK_IPV4:
636 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
638 case RDMA_NETWORK_IPV6:
639 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
642 nw_type = CMDQ_CREATE_AH_TYPE_V1;
648 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
649 u32 flags, struct ib_udata *udata)
651 struct ib_pd *ib_pd = ib_ah->pd;
652 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
653 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
654 struct bnxt_re_dev *rdev = pd->rdev;
655 const struct ib_gid_attr *sgid_attr;
656 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
660 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
661 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
666 ah->qplib_ah.pd = &pd->qplib_pd;
668 /* Supply the configuration for the HW */
669 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
670 sizeof(union ib_gid));
672 * If RoCE V2 is enabled, stack will have two entries for
673 * each GID entry. Avoiding this duplicte entry in HW. Dividing
674 * the GID index by 2 for RoCE V2
676 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
677 ah->qplib_ah.host_sgid_index = grh->sgid_index;
678 ah->qplib_ah.traffic_class = grh->traffic_class;
679 ah->qplib_ah.flow_label = grh->flow_label;
680 ah->qplib_ah.hop_limit = grh->hop_limit;
681 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
683 sgid_attr = grh->sgid_attr;
684 /* Get network header type for this GID */
685 nw_type = rdma_gid_attr_network_type(sgid_attr);
686 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
688 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
689 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
690 !(flags & RDMA_CREATE_AH_SLEEPABLE));
692 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
696 /* Write AVID to shared page. */
698 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
699 udata, struct bnxt_re_ucontext, ib_uctx);
703 spin_lock_irqsave(&uctx->sh_lock, flag);
704 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
705 *wrptr = ah->qplib_ah.id;
706 wmb(); /* make sure cache is updated. */
707 spin_unlock_irqrestore(&uctx->sh_lock, flag);
713 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
718 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
720 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
722 ah_attr->type = ib_ah->type;
723 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
724 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
725 rdma_ah_set_grh(ah_attr, NULL, 0,
726 ah->qplib_ah.host_sgid_index,
727 0, ah->qplib_ah.traffic_class);
728 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
729 rdma_ah_set_port_num(ah_attr, 1);
730 rdma_ah_set_static_rate(ah_attr, 0);
734 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
735 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
739 spin_lock_irqsave(&qp->scq->cq_lock, flags);
740 if (qp->rcq != qp->scq)
741 spin_lock(&qp->rcq->cq_lock);
743 __acquire(&qp->rcq->cq_lock);
748 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
750 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
752 if (qp->rcq != qp->scq)
753 spin_unlock(&qp->rcq->cq_lock);
755 __release(&qp->rcq->cq_lock);
756 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
760 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
762 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
763 struct bnxt_re_dev *rdev = qp->rdev;
767 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
768 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
770 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
774 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
775 flags = bnxt_re_lock_cqs(qp);
776 bnxt_qplib_clean_qp(&qp->qplib_qp);
777 bnxt_re_unlock_cqs(qp, flags);
780 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
782 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
783 bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
786 bnxt_qplib_clean_qp(&qp->qplib_qp);
787 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
788 &rdev->qp1_sqp->qplib_qp);
790 dev_err(rdev_to_dev(rdev),
791 "Failed to destroy Shadow QP");
794 bnxt_qplib_free_qp_res(&rdev->qplib_res,
795 &rdev->qp1_sqp->qplib_qp);
796 mutex_lock(&rdev->qp_lock);
797 list_del(&rdev->qp1_sqp->list);
798 atomic_dec(&rdev->qp_count);
799 mutex_unlock(&rdev->qp_lock);
802 kfree(rdev->qp1_sqp);
803 rdev->qp1_sqp = NULL;
807 if (!IS_ERR_OR_NULL(qp->rumem))
808 ib_umem_release(qp->rumem);
809 if (!IS_ERR_OR_NULL(qp->sumem))
810 ib_umem_release(qp->sumem);
812 mutex_lock(&rdev->qp_lock);
814 atomic_dec(&rdev->qp_count);
815 mutex_unlock(&rdev->qp_lock);
820 static u8 __from_ib_qp_type(enum ib_qp_type type)
824 return CMDQ_CREATE_QP1_TYPE_GSI;
826 return CMDQ_CREATE_QP_TYPE_RC;
828 return CMDQ_CREATE_QP_TYPE_UD;
834 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
835 struct bnxt_re_qp *qp, struct ib_udata *udata)
837 struct bnxt_re_qp_req ureq;
838 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
839 struct ib_umem *umem;
840 int bytes = 0, psn_sz;
841 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
842 udata, struct bnxt_re_ucontext, ib_uctx);
844 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
847 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
848 /* Consider mapping PSN search memory only for RC QPs. */
849 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
850 psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
851 sizeof(struct sq_psn_search_ext) :
852 sizeof(struct sq_psn_search);
853 bytes += (qplib_qp->sq.max_wqe * psn_sz);
855 bytes = PAGE_ALIGN(bytes);
856 umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
858 return PTR_ERR(umem);
861 qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
862 qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
863 qplib_qp->sq.sg_info.nmap = umem->nmap;
864 qplib_qp->qp_handle = ureq.qp_handle;
866 if (!qp->qplib_qp.srq) {
867 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
868 bytes = PAGE_ALIGN(bytes);
869 umem = ib_umem_get(udata, ureq.qprva, bytes,
870 IB_ACCESS_LOCAL_WRITE, 1);
874 qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
875 qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
876 qplib_qp->rq.sg_info.nmap = umem->nmap;
879 qplib_qp->dpi = &cntx->dpi;
882 ib_umem_release(qp->sumem);
884 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
886 return PTR_ERR(umem);
889 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
890 (struct bnxt_re_pd *pd,
891 struct bnxt_qplib_res *qp1_res,
892 struct bnxt_qplib_qp *qp1_qp)
894 struct bnxt_re_dev *rdev = pd->rdev;
895 struct bnxt_re_ah *ah;
899 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
904 ah->qplib_ah.pd = &pd->qplib_pd;
906 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
910 /* supply the dgid data same as sgid */
911 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
912 sizeof(union ib_gid));
913 ah->qplib_ah.sgid_index = 0;
915 ah->qplib_ah.traffic_class = 0;
916 ah->qplib_ah.flow_label = 0;
917 ah->qplib_ah.hop_limit = 1;
919 /* Have DMAC same as SMAC */
920 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
922 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
924 dev_err(rdev_to_dev(rdev),
925 "Failed to allocate HW AH for Shadow QP");
936 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
937 (struct bnxt_re_pd *pd,
938 struct bnxt_qplib_res *qp1_res,
939 struct bnxt_qplib_qp *qp1_qp)
941 struct bnxt_re_dev *rdev = pd->rdev;
942 struct bnxt_re_qp *qp;
945 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
951 /* Initialize the shadow QP structure from the QP1 values */
952 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
954 qp->qplib_qp.pd = &pd->qplib_pd;
955 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
956 qp->qplib_qp.type = IB_QPT_UD;
958 qp->qplib_qp.max_inline_data = 0;
959 qp->qplib_qp.sig_type = true;
961 /* Shadow QP SQ depth should be same as QP1 RQ depth */
962 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
963 qp->qplib_qp.sq.max_sge = 2;
964 /* Q full delta can be 1 since it is internal QP */
965 qp->qplib_qp.sq.q_full_delta = 1;
967 qp->qplib_qp.scq = qp1_qp->scq;
968 qp->qplib_qp.rcq = qp1_qp->rcq;
970 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
971 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
972 /* Q full delta can be 1 since it is internal QP */
973 qp->qplib_qp.rq.q_full_delta = 1;
975 qp->qplib_qp.mtu = qp1_qp->mtu;
977 qp->qplib_qp.sq_hdr_buf_size = 0;
978 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
979 qp->qplib_qp.dpi = &rdev->dpi_privileged;
981 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
985 rdev->sqp_id = qp->qplib_qp.id;
987 spin_lock_init(&qp->sq_lock);
988 INIT_LIST_HEAD(&qp->list);
989 mutex_lock(&rdev->qp_lock);
990 list_add_tail(&qp->list, &rdev->qp_list);
991 atomic_inc(&rdev->qp_count);
992 mutex_unlock(&rdev->qp_lock);
999 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1000 struct ib_qp_init_attr *qp_init_attr,
1001 struct ib_udata *udata)
1003 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1004 struct bnxt_re_dev *rdev = pd->rdev;
1005 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1006 struct bnxt_re_qp *qp;
1007 struct bnxt_re_cq *cq;
1008 struct bnxt_re_srq *srq;
1011 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1012 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1013 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1014 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1015 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1016 return ERR_PTR(-EINVAL);
1018 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1020 return ERR_PTR(-ENOMEM);
1023 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1024 qp->qplib_qp.pd = &pd->qplib_pd;
1025 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1026 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1028 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1029 bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
1030 qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
1031 if (qp->qplib_qp.type == IB_QPT_MAX) {
1032 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1038 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1039 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1040 IB_SIGNAL_ALL_WR) ? true : false);
1042 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1043 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1044 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1046 if (qp_init_attr->send_cq) {
1047 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1050 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1054 qp->qplib_qp.scq = &cq->qplib_cq;
1058 if (qp_init_attr->recv_cq) {
1059 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1062 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1066 qp->qplib_qp.rcq = &cq->qplib_cq;
1070 if (qp_init_attr->srq) {
1071 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1074 dev_err(rdev_to_dev(rdev), "SRQ not found");
1078 qp->qplib_qp.srq = &srq->qplib_srq;
1079 qp->qplib_qp.rq.max_wqe = 0;
1081 /* Allocate 1 more than what's provided so posting max doesn't
1084 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1085 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1086 dev_attr->max_qp_wqes + 1);
1088 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1089 qp_init_attr->cap.max_recv_wr;
1091 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1092 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1093 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1096 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1098 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1099 !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
1100 /* Allocate 1 more than what's provided */
1101 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1102 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1103 dev_attr->max_qp_wqes + 1);
1104 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1105 qp_init_attr->cap.max_send_wr;
1106 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1107 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1108 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1109 qp->qplib_qp.sq.max_sge++;
1110 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1111 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1113 qp->qplib_qp.rq_hdr_buf_size =
1114 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1116 qp->qplib_qp.sq_hdr_buf_size =
1117 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1118 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1119 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1121 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1124 /* Create a shadow QP to handle the QP1 traffic */
1125 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1127 if (!rdev->qp1_sqp) {
1129 dev_err(rdev_to_dev(rdev),
1130 "Failed to create Shadow QP for QP1");
1133 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1135 if (!rdev->sqp_ah) {
1136 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1137 &rdev->qp1_sqp->qplib_qp);
1139 dev_err(rdev_to_dev(rdev),
1140 "Failed to create AH entry for ShadowQP");
1145 /* Allocate 128 + 1 more than what's provided */
1146 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1147 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1148 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1149 dev_attr->max_qp_wqes +
1150 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1151 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1154 * Reserving one slot for Phantom WQE. Application can
1155 * post one extra entry in this case. But allowing this to avoid
1156 * unexpected Queue full condition
1159 qp->qplib_qp.sq.q_full_delta -= 1;
1161 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1162 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1164 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1168 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1171 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1173 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1178 qp->ib_qp.qp_num = qp->qplib_qp.id;
1179 spin_lock_init(&qp->sq_lock);
1180 spin_lock_init(&qp->rq_lock);
1183 struct bnxt_re_qp_resp resp;
1185 resp.qpid = qp->ib_qp.qp_num;
1187 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1189 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1193 INIT_LIST_HEAD(&qp->list);
1194 mutex_lock(&rdev->qp_lock);
1195 list_add_tail(&qp->list, &rdev->qp_list);
1196 atomic_inc(&rdev->qp_count);
1197 mutex_unlock(&rdev->qp_lock);
1201 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1205 ib_umem_release(qp->rumem);
1207 ib_umem_release(qp->sumem);
1214 static u8 __from_ib_qp_state(enum ib_qp_state state)
1218 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1220 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1222 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1224 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1226 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1228 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1231 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1235 static enum ib_qp_state __to_ib_qp_state(u8 state)
1238 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1239 return IB_QPS_RESET;
1240 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1242 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1244 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1246 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1248 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1250 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1256 static u32 __from_ib_mtu(enum ib_mtu mtu)
1260 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1262 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1264 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1266 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1268 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1270 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1274 static enum ib_mtu __to_ib_mtu(u32 mtu)
1276 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1277 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1279 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1281 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1283 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1285 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1292 /* Shared Receive Queues */
1293 void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1295 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1297 struct bnxt_re_dev *rdev = srq->rdev;
1298 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1299 struct bnxt_qplib_nq *nq = NULL;
1302 nq = qplib_srq->cq->nq;
1303 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1305 ib_umem_release(srq->umem);
1306 atomic_dec(&rdev->srq_count);
1311 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1312 struct bnxt_re_pd *pd,
1313 struct bnxt_re_srq *srq,
1314 struct ib_udata *udata)
1316 struct bnxt_re_srq_req ureq;
1317 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1318 struct ib_umem *umem;
1320 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1321 udata, struct bnxt_re_ucontext, ib_uctx);
1323 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1326 bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1327 bytes = PAGE_ALIGN(bytes);
1328 umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
1330 return PTR_ERR(umem);
1333 qplib_srq->sg_info.sglist = umem->sg_head.sgl;
1334 qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
1335 qplib_srq->sg_info.nmap = umem->nmap;
1336 qplib_srq->srq_handle = ureq.srq_handle;
1337 qplib_srq->dpi = &cntx->dpi;
1342 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1343 struct ib_srq_init_attr *srq_init_attr,
1344 struct ib_udata *udata)
1346 struct ib_pd *ib_pd = ib_srq->pd;
1347 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1348 struct bnxt_re_dev *rdev = pd->rdev;
1349 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1350 struct bnxt_re_srq *srq =
1351 container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1352 struct bnxt_qplib_nq *nq = NULL;
1355 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1356 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1361 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1367 srq->qplib_srq.pd = &pd->qplib_pd;
1368 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1369 /* Allocate 1 more than what's provided so posting max doesn't
1372 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1373 if (entries > dev_attr->max_srq_wqes + 1)
1374 entries = dev_attr->max_srq_wqes + 1;
1376 srq->qplib_srq.max_wqe = entries;
1377 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1378 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1379 srq->srq_limit = srq_init_attr->attr.srq_limit;
1380 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1384 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1389 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1391 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1396 struct bnxt_re_srq_resp resp;
1398 resp.srqid = srq->qplib_srq.id;
1399 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1401 dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1402 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1409 atomic_inc(&rdev->srq_count);
1415 ib_umem_release(srq->umem);
1420 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1421 enum ib_srq_attr_mask srq_attr_mask,
1422 struct ib_udata *udata)
1424 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1426 struct bnxt_re_dev *rdev = srq->rdev;
1429 switch (srq_attr_mask) {
1431 /* SRQ resize is not supported */
1434 /* Change the SRQ threshold */
1435 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1438 srq->qplib_srq.threshold = srq_attr->srq_limit;
1439 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1441 dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1444 /* On success, update the shadow */
1445 srq->srq_limit = srq_attr->srq_limit;
1446 /* No need to Build and send response back to udata */
1449 dev_err(rdev_to_dev(rdev),
1450 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1456 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1458 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1460 struct bnxt_re_srq tsrq;
1461 struct bnxt_re_dev *rdev = srq->rdev;
1464 /* Get live SRQ attr */
1465 tsrq.qplib_srq.id = srq->qplib_srq.id;
1466 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1468 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1471 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1472 srq_attr->max_sge = srq->qplib_srq.max_sge;
1473 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1478 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1479 const struct ib_recv_wr **bad_wr)
1481 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1483 struct bnxt_qplib_swqe wqe;
1484 unsigned long flags;
1487 spin_lock_irqsave(&srq->lock, flags);
1489 /* Transcribe each ib_recv_wr to qplib_swqe */
1490 wqe.num_sge = wr->num_sge;
1491 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1492 wqe.wr_id = wr->wr_id;
1493 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1495 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1502 spin_unlock_irqrestore(&srq->lock, flags);
1506 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1507 struct bnxt_re_qp *qp1_qp,
1510 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1513 if (qp_attr_mask & IB_QP_STATE) {
1514 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1515 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1517 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1518 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1519 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1522 if (qp_attr_mask & IB_QP_QKEY) {
1523 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1524 /* Using a Random QKEY */
1525 qp->qplib_qp.qkey = 0x81818181;
1527 if (qp_attr_mask & IB_QP_SQ_PSN) {
1528 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1529 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1532 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1534 dev_err(rdev_to_dev(rdev),
1535 "Failed to modify Shadow QP for QP1");
1539 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1540 int qp_attr_mask, struct ib_udata *udata)
1542 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1543 struct bnxt_re_dev *rdev = qp->rdev;
1544 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1545 enum ib_qp_state curr_qp_state, new_qp_state;
1550 qp->qplib_qp.modify_flags = 0;
1551 if (qp_attr_mask & IB_QP_STATE) {
1552 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1553 new_qp_state = qp_attr->qp_state;
1554 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1555 ib_qp->qp_type, qp_attr_mask)) {
1556 dev_err(rdev_to_dev(rdev),
1557 "Invalid attribute mask: %#x specified ",
1559 dev_err(rdev_to_dev(rdev),
1560 "for qpn: %#x type: %#x",
1561 ib_qp->qp_num, ib_qp->qp_type);
1562 dev_err(rdev_to_dev(rdev),
1563 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1564 curr_qp_state, new_qp_state);
1567 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1568 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1571 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1572 dev_dbg(rdev_to_dev(rdev),
1573 "Move QP = %p to flush list\n",
1575 flags = bnxt_re_lock_cqs(qp);
1576 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1577 bnxt_re_unlock_cqs(qp, flags);
1580 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1581 dev_dbg(rdev_to_dev(rdev),
1582 "Move QP = %p out of flush list\n",
1584 flags = bnxt_re_lock_cqs(qp);
1585 bnxt_qplib_clean_qp(&qp->qplib_qp);
1586 bnxt_re_unlock_cqs(qp, flags);
1589 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1590 qp->qplib_qp.modify_flags |=
1591 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1592 qp->qplib_qp.en_sqd_async_notify = true;
1594 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1595 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1596 qp->qplib_qp.access =
1597 __from_ib_access_flags(qp_attr->qp_access_flags);
1598 /* LOCAL_WRITE access must be set to allow RC receive */
1599 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1600 /* Temp: Set all params on QP as of now */
1601 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1602 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1604 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1605 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1606 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1608 if (qp_attr_mask & IB_QP_QKEY) {
1609 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1610 qp->qplib_qp.qkey = qp_attr->qkey;
1612 if (qp_attr_mask & IB_QP_AV) {
1613 const struct ib_global_route *grh =
1614 rdma_ah_read_grh(&qp_attr->ah_attr);
1615 const struct ib_gid_attr *sgid_attr;
1617 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1618 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1619 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1620 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1621 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1622 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1623 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1624 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1625 sizeof(qp->qplib_qp.ah.dgid.data));
1626 qp->qplib_qp.ah.flow_label = grh->flow_label;
1627 /* If RoCE V2 is enabled, stack will have two entries for
1628 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1629 * the GID index by 2 for RoCE V2
1631 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1632 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1633 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1634 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1635 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1636 ether_addr_copy(qp->qplib_qp.ah.dmac,
1637 qp_attr->ah_attr.roce.dmac);
1639 sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
1640 memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
1642 nw_type = rdma_gid_attr_network_type(sgid_attr);
1644 case RDMA_NETWORK_IPV4:
1645 qp->qplib_qp.nw_type =
1646 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1648 case RDMA_NETWORK_IPV6:
1649 qp->qplib_qp.nw_type =
1650 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1653 qp->qplib_qp.nw_type =
1654 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1659 if (qp_attr_mask & IB_QP_PATH_MTU) {
1660 qp->qplib_qp.modify_flags |=
1661 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1662 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1663 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1664 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1665 qp->qplib_qp.modify_flags |=
1666 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1667 qp->qplib_qp.path_mtu =
1668 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1670 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1673 if (qp_attr_mask & IB_QP_TIMEOUT) {
1674 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1675 qp->qplib_qp.timeout = qp_attr->timeout;
1677 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1678 qp->qplib_qp.modify_flags |=
1679 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1680 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1682 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1683 qp->qplib_qp.modify_flags |=
1684 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1685 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1687 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1688 qp->qplib_qp.modify_flags |=
1689 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1690 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1692 if (qp_attr_mask & IB_QP_RQ_PSN) {
1693 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1694 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1696 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1697 qp->qplib_qp.modify_flags |=
1698 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1699 /* Cap the max_rd_atomic to device max */
1700 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1701 dev_attr->max_qp_rd_atom);
1703 if (qp_attr_mask & IB_QP_SQ_PSN) {
1704 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1705 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1707 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1708 if (qp_attr->max_dest_rd_atomic >
1709 dev_attr->max_qp_init_rd_atom) {
1710 dev_err(rdev_to_dev(rdev),
1711 "max_dest_rd_atomic requested%d is > dev_max%d",
1712 qp_attr->max_dest_rd_atomic,
1713 dev_attr->max_qp_init_rd_atom);
1717 qp->qplib_qp.modify_flags |=
1718 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1719 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1721 if (qp_attr_mask & IB_QP_CAP) {
1722 qp->qplib_qp.modify_flags |=
1723 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1724 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1725 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1726 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1727 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1728 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1729 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1730 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1731 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1732 (qp_attr->cap.max_inline_data >=
1733 dev_attr->max_inline_data)) {
1734 dev_err(rdev_to_dev(rdev),
1735 "Create QP failed - max exceeded");
1738 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1739 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1740 dev_attr->max_qp_wqes + 1);
1741 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1742 qp_attr->cap.max_send_wr;
1744 * Reserving one slot for Phantom WQE. Some application can
1745 * post one extra entry in this case. Allowing this to avoid
1746 * unexpected Queue full condition
1748 qp->qplib_qp.sq.q_full_delta -= 1;
1749 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1750 if (qp->qplib_qp.rq.max_wqe) {
1751 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1752 qp->qplib_qp.rq.max_wqe =
1753 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1754 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1755 qp_attr->cap.max_recv_wr;
1756 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1758 /* SRQ was used prior, just ignore the RQ caps */
1761 if (qp_attr_mask & IB_QP_DEST_QPN) {
1762 qp->qplib_qp.modify_flags |=
1763 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1764 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1766 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1768 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1771 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1772 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1776 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1777 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1779 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1780 struct bnxt_re_dev *rdev = qp->rdev;
1781 struct bnxt_qplib_qp *qplib_qp;
1784 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1788 qplib_qp->id = qp->qplib_qp.id;
1789 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1791 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1793 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1796 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1797 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1798 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1799 qp_attr->pkey_index = qplib_qp->pkey_index;
1800 qp_attr->qkey = qplib_qp->qkey;
1801 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1802 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1803 qplib_qp->ah.host_sgid_index,
1804 qplib_qp->ah.hop_limit,
1805 qplib_qp->ah.traffic_class);
1806 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1807 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1808 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1809 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1810 qp_attr->timeout = qplib_qp->timeout;
1811 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1812 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1813 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1814 qp_attr->rq_psn = qplib_qp->rq.psn;
1815 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1816 qp_attr->sq_psn = qplib_qp->sq.psn;
1817 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1818 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1820 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1822 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1823 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1824 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1825 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1826 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1827 qp_init_attr->cap = qp_attr->cap;
1834 /* Routine for sending QP1 packets for RoCE V1 an V2
1836 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1837 const struct ib_send_wr *wr,
1838 struct bnxt_qplib_swqe *wqe,
1841 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1843 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1844 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
1845 struct bnxt_qplib_sge sge;
1849 bool is_eth = false;
1850 bool is_vlan = false;
1851 bool is_grh = false;
1852 bool is_udp = false;
1854 u16 vlan_id = 0xFFFF;
1858 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1860 if (is_vlan_dev(sgid_attr->ndev))
1861 vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
1862 /* Get network header type for this GID */
1863 nw_type = rdma_gid_attr_network_type(sgid_attr);
1865 case RDMA_NETWORK_IPV4:
1866 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1868 case RDMA_NETWORK_IPV6:
1869 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1872 nw_type = BNXT_RE_ROCE_V1_PACKET;
1875 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1876 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1878 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
1880 ether_type = ETH_P_IP;
1883 ether_type = ETH_P_IPV6;
1887 ether_type = ETH_P_IBOE;
1892 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1894 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1895 ip_version, is_udp, 0, &qp->qp1_hdr);
1898 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1899 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1901 /* For vlan, check the sgid for vlan existence */
1904 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1906 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1907 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1910 if (is_grh || (ip_version == 6)) {
1911 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
1912 sizeof(sgid_attr->gid));
1913 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1914 sizeof(sgid_attr->gid));
1915 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1918 if (ip_version == 4) {
1919 qp->qp1_hdr.ip4.tos = 0;
1920 qp->qp1_hdr.ip4.id = 0;
1921 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1922 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1924 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
1925 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1926 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1930 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1931 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1932 qp->qp1_hdr.udp.csum = 0;
1936 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1937 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1938 qp->qp1_hdr.immediate_present = 1;
1940 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1942 if (wr->send_flags & IB_SEND_SOLICITED)
1943 qp->qp1_hdr.bth.solicited_event = 1;
1945 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1947 /* P_key for QP1 is for all members */
1948 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1949 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1950 qp->qp1_hdr.bth.ack_req = 0;
1952 qp->send_psn &= BTH_PSN_MASK;
1953 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1955 /* Use the priviledged Q_Key for QP1 */
1956 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1957 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1959 /* Pack the QP1 to the transmit buffer */
1960 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1962 ib_ud_header_pack(&qp->qp1_hdr, buf);
1963 for (i = wqe->num_sge; i; i--) {
1964 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1965 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1966 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1970 * Max Header buf size for IPV6 RoCE V2 is 86,
1971 * which is same as the QP1 SQ header buffer.
1972 * Header buf size for IPV4 RoCE V2 can be 66.
1973 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1974 * Subtract 20 bytes from QP1 SQ header buf size
1976 if (is_udp && ip_version == 4)
1979 * Max Header buf size for RoCE V1 is 78.
1980 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1981 * Subtract 8 bytes from QP1 SQ header buf size
1986 /* Subtract 4 bytes for non vlan packets */
1990 wqe->sg_list[0].addr = sge.addr;
1991 wqe->sg_list[0].lkey = sge.lkey;
1992 wqe->sg_list[0].size = sge.size;
1996 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2002 /* For the MAD layer, it only provides the recv SGE the size of
2003 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2004 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2005 * receive packet (334 bytes) with no VLAN and then copy the GRH
2006 * and the MAD datagram out to the provided SGE.
2008 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2009 const struct ib_recv_wr *wr,
2010 struct bnxt_qplib_swqe *wqe,
2013 struct bnxt_qplib_sge ref, sge;
2015 struct bnxt_re_sqp_entries *sqp_entry;
2017 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2019 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2022 /* Create 1 SGE to receive the entire
2025 /* Save the reference from ULP */
2026 ref.addr = wqe->sg_list[0].addr;
2027 ref.lkey = wqe->sg_list[0].lkey;
2028 ref.size = wqe->sg_list[0].size;
2030 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2033 wqe->sg_list[0].addr = sge.addr;
2034 wqe->sg_list[0].lkey = sge.lkey;
2035 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2036 sge.size -= wqe->sg_list[0].size;
2038 sqp_entry->sge.addr = ref.addr;
2039 sqp_entry->sge.lkey = ref.lkey;
2040 sqp_entry->sge.size = ref.size;
2041 /* Store the wrid for reporting completion */
2042 sqp_entry->wrid = wqe->wr_id;
2043 /* change the wqe->wrid to table index */
2044 wqe->wr_id = rq_prod_index;
2048 static int is_ud_qp(struct bnxt_re_qp *qp)
2050 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2051 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2054 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2055 const struct ib_send_wr *wr,
2056 struct bnxt_qplib_swqe *wqe)
2058 struct bnxt_re_ah *ah = NULL;
2061 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2062 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2063 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2064 wqe->send.avid = ah->qplib_ah.id;
2066 switch (wr->opcode) {
2068 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2070 case IB_WR_SEND_WITH_IMM:
2071 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2072 wqe->send.imm_data = wr->ex.imm_data;
2074 case IB_WR_SEND_WITH_INV:
2075 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2076 wqe->send.inv_key = wr->ex.invalidate_rkey;
2081 if (wr->send_flags & IB_SEND_SIGNALED)
2082 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2083 if (wr->send_flags & IB_SEND_FENCE)
2084 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2085 if (wr->send_flags & IB_SEND_SOLICITED)
2086 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2087 if (wr->send_flags & IB_SEND_INLINE)
2088 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2093 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2094 struct bnxt_qplib_swqe *wqe)
2096 switch (wr->opcode) {
2097 case IB_WR_RDMA_WRITE:
2098 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2100 case IB_WR_RDMA_WRITE_WITH_IMM:
2101 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2102 wqe->rdma.imm_data = wr->ex.imm_data;
2104 case IB_WR_RDMA_READ:
2105 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2106 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2111 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2112 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2113 if (wr->send_flags & IB_SEND_SIGNALED)
2114 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2115 if (wr->send_flags & IB_SEND_FENCE)
2116 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2117 if (wr->send_flags & IB_SEND_SOLICITED)
2118 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2119 if (wr->send_flags & IB_SEND_INLINE)
2120 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2125 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2126 struct bnxt_qplib_swqe *wqe)
2128 switch (wr->opcode) {
2129 case IB_WR_ATOMIC_CMP_AND_SWP:
2130 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2131 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2132 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2134 case IB_WR_ATOMIC_FETCH_AND_ADD:
2135 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2136 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2141 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2142 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2143 if (wr->send_flags & IB_SEND_SIGNALED)
2144 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2145 if (wr->send_flags & IB_SEND_FENCE)
2146 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2147 if (wr->send_flags & IB_SEND_SOLICITED)
2148 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2152 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2153 struct bnxt_qplib_swqe *wqe)
2155 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2156 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2158 /* Need unconditional fence for local invalidate
2159 * opcode to work as expected.
2161 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2163 if (wr->send_flags & IB_SEND_SIGNALED)
2164 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2165 if (wr->send_flags & IB_SEND_SOLICITED)
2166 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2171 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2172 struct bnxt_qplib_swqe *wqe)
2174 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2175 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2176 int access = wr->access;
2178 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2179 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2180 wqe->frmr.page_list = mr->pages;
2181 wqe->frmr.page_list_len = mr->npages;
2182 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2183 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2185 /* Need unconditional fence for reg_mr
2186 * opcode to function as expected.
2189 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2191 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2192 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2194 if (access & IB_ACCESS_LOCAL_WRITE)
2195 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2196 if (access & IB_ACCESS_REMOTE_READ)
2197 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2198 if (access & IB_ACCESS_REMOTE_WRITE)
2199 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2200 if (access & IB_ACCESS_REMOTE_ATOMIC)
2201 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2202 if (access & IB_ACCESS_MW_BIND)
2203 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2205 wqe->frmr.l_key = wr->key;
2206 wqe->frmr.length = wr->mr->length;
2207 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2208 wqe->frmr.va = wr->mr->iova;
2212 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2213 const struct ib_send_wr *wr,
2214 struct bnxt_qplib_swqe *wqe)
2216 /* Copy the inline data to the data field */
2221 in_data = wqe->inline_data;
2222 for (i = 0; i < wr->num_sge; i++) {
2223 sge_addr = (void *)(unsigned long)
2224 wr->sg_list[i].addr;
2225 sge_len = wr->sg_list[i].length;
2227 if ((sge_len + wqe->inline_len) >
2228 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2229 dev_err(rdev_to_dev(rdev),
2230 "Inline data size requested > supported value");
2233 sge_len = wr->sg_list[i].length;
2235 memcpy(in_data, sge_addr, sge_len);
2236 in_data += wr->sg_list[i].length;
2237 wqe->inline_len += wr->sg_list[i].length;
2239 return wqe->inline_len;
2242 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2243 const struct ib_send_wr *wr,
2244 struct bnxt_qplib_swqe *wqe)
2248 if (wr->send_flags & IB_SEND_INLINE)
2249 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2251 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2257 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2259 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2260 qp->ib_qp.qp_type == IB_QPT_GSI ||
2261 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2262 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2264 struct ib_qp_attr qp_attr;
2266 qp_attr_mask = IB_QP_STATE;
2267 qp_attr.qp_state = IB_QPS_RTS;
2268 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2269 qp->qplib_qp.wqe_cnt = 0;
2273 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2274 struct bnxt_re_qp *qp,
2275 const struct ib_send_wr *wr)
2277 struct bnxt_qplib_swqe wqe;
2278 int rc = 0, payload_sz = 0;
2279 unsigned long flags;
2281 spin_lock_irqsave(&qp->sq_lock, flags);
2282 memset(&wqe, 0, sizeof(wqe));
2285 memset(&wqe, 0, sizeof(wqe));
2288 wqe.num_sge = wr->num_sge;
2289 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2290 dev_err(rdev_to_dev(rdev),
2291 "Limit exceeded for Send SGEs");
2296 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2297 if (payload_sz < 0) {
2301 wqe.wr_id = wr->wr_id;
2303 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2305 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2307 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2310 dev_err(rdev_to_dev(rdev),
2311 "Post send failed opcode = %#x rc = %d",
2317 bnxt_qplib_post_send_db(&qp->qplib_qp);
2318 bnxt_ud_qp_hw_stall_workaround(qp);
2319 spin_unlock_irqrestore(&qp->sq_lock, flags);
2323 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2324 const struct ib_send_wr **bad_wr)
2326 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2327 struct bnxt_qplib_swqe wqe;
2328 int rc = 0, payload_sz = 0;
2329 unsigned long flags;
2331 spin_lock_irqsave(&qp->sq_lock, flags);
2334 memset(&wqe, 0, sizeof(wqe));
2337 wqe.num_sge = wr->num_sge;
2338 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2339 dev_err(rdev_to_dev(qp->rdev),
2340 "Limit exceeded for Send SGEs");
2345 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2346 if (payload_sz < 0) {
2350 wqe.wr_id = wr->wr_id;
2352 switch (wr->opcode) {
2354 case IB_WR_SEND_WITH_IMM:
2355 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2356 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2360 wqe.rawqp1.lflags |=
2361 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2363 switch (wr->send_flags) {
2364 case IB_SEND_IP_CSUM:
2365 wqe.rawqp1.lflags |=
2366 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2372 case IB_WR_SEND_WITH_INV:
2373 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2375 case IB_WR_RDMA_WRITE:
2376 case IB_WR_RDMA_WRITE_WITH_IMM:
2377 case IB_WR_RDMA_READ:
2378 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2380 case IB_WR_ATOMIC_CMP_AND_SWP:
2381 case IB_WR_ATOMIC_FETCH_AND_ADD:
2382 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2384 case IB_WR_RDMA_READ_WITH_INV:
2385 dev_err(rdev_to_dev(qp->rdev),
2386 "RDMA Read with Invalidate is not supported");
2389 case IB_WR_LOCAL_INV:
2390 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2393 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2396 /* Unsupported WRs */
2397 dev_err(rdev_to_dev(qp->rdev),
2398 "WR (%#x) is not supported", wr->opcode);
2403 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2406 dev_err(rdev_to_dev(qp->rdev),
2407 "post_send failed op:%#x qps = %#x rc = %d\n",
2408 wr->opcode, qp->qplib_qp.state, rc);
2414 bnxt_qplib_post_send_db(&qp->qplib_qp);
2415 bnxt_ud_qp_hw_stall_workaround(qp);
2416 spin_unlock_irqrestore(&qp->sq_lock, flags);
2421 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2422 struct bnxt_re_qp *qp,
2423 const struct ib_recv_wr *wr)
2425 struct bnxt_qplib_swqe wqe;
2428 memset(&wqe, 0, sizeof(wqe));
2431 memset(&wqe, 0, sizeof(wqe));
2434 wqe.num_sge = wr->num_sge;
2435 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2436 dev_err(rdev_to_dev(rdev),
2437 "Limit exceeded for Receive SGEs");
2441 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2442 wqe.wr_id = wr->wr_id;
2443 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2445 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2452 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2456 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2457 const struct ib_recv_wr **bad_wr)
2459 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2460 struct bnxt_qplib_swqe wqe;
2461 int rc = 0, payload_sz = 0;
2462 unsigned long flags;
2465 spin_lock_irqsave(&qp->rq_lock, flags);
2468 memset(&wqe, 0, sizeof(wqe));
2471 wqe.num_sge = wr->num_sge;
2472 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2473 dev_err(rdev_to_dev(qp->rdev),
2474 "Limit exceeded for Receive SGEs");
2480 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2482 wqe.wr_id = wr->wr_id;
2483 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2485 if (ib_qp->qp_type == IB_QPT_GSI &&
2486 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2487 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2490 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2496 /* Ring DB if the RQEs posted reaches a threshold value */
2497 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2498 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2506 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2508 spin_unlock_irqrestore(&qp->rq_lock, flags);
2513 /* Completion Queues */
2514 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2517 struct bnxt_re_cq *cq;
2518 struct bnxt_qplib_nq *nq;
2519 struct bnxt_re_dev *rdev;
2521 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2523 nq = cq->qplib_cq.nq;
2525 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2527 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2530 if (!IS_ERR_OR_NULL(cq->umem))
2531 ib_umem_release(cq->umem);
2533 atomic_dec(&rdev->cq_count);
2541 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2542 const struct ib_cq_init_attr *attr,
2543 struct ib_udata *udata)
2545 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2546 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2547 struct bnxt_re_cq *cq = NULL;
2549 int cqe = attr->cqe;
2550 struct bnxt_qplib_nq *nq = NULL;
2551 unsigned int nq_alloc_cnt;
2553 /* Validate CQ fields */
2554 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2555 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2556 return ERR_PTR(-EINVAL);
2558 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2560 return ERR_PTR(-ENOMEM);
2563 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2565 entries = roundup_pow_of_two(cqe + 1);
2566 if (entries > dev_attr->max_cq_wqes + 1)
2567 entries = dev_attr->max_cq_wqes + 1;
2570 struct bnxt_re_cq_req req;
2571 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2572 udata, struct bnxt_re_ucontext, ib_uctx);
2573 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2578 cq->umem = ib_umem_get(udata, req.cq_va,
2579 entries * sizeof(struct cq_base),
2580 IB_ACCESS_LOCAL_WRITE, 1);
2581 if (IS_ERR(cq->umem)) {
2582 rc = PTR_ERR(cq->umem);
2585 cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
2586 cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
2587 cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
2588 cq->qplib_cq.dpi = &uctx->dpi;
2590 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2591 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2598 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2601 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2602 * used for getting the NQ index.
2604 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2605 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2606 cq->qplib_cq.max_wqe = entries;
2607 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2608 cq->qplib_cq.nq = nq;
2610 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2612 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2616 cq->ib_cq.cqe = entries;
2617 cq->cq_period = cq->qplib_cq.period;
2620 atomic_inc(&rdev->cq_count);
2621 spin_lock_init(&cq->cq_lock);
2624 struct bnxt_re_cq_resp resp;
2626 resp.cqid = cq->qplib_cq.id;
2627 resp.tail = cq->qplib_cq.hwq.cons;
2628 resp.phase = cq->qplib_cq.period;
2630 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2632 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2633 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2642 ib_umem_release(cq->umem);
2649 static u8 __req_to_ib_wc_status(u8 qstatus)
2652 case CQ_REQ_STATUS_OK:
2653 return IB_WC_SUCCESS;
2654 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2655 return IB_WC_BAD_RESP_ERR;
2656 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2657 return IB_WC_LOC_LEN_ERR;
2658 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2659 return IB_WC_LOC_QP_OP_ERR;
2660 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2661 return IB_WC_LOC_PROT_ERR;
2662 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2663 return IB_WC_GENERAL_ERR;
2664 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2665 return IB_WC_REM_INV_REQ_ERR;
2666 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2667 return IB_WC_REM_ACCESS_ERR;
2668 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2669 return IB_WC_REM_OP_ERR;
2670 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2671 return IB_WC_RNR_RETRY_EXC_ERR;
2672 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2673 return IB_WC_RETRY_EXC_ERR;
2674 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2675 return IB_WC_WR_FLUSH_ERR;
2677 return IB_WC_GENERAL_ERR;
2682 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2685 case CQ_RES_RAWETH_QP1_STATUS_OK:
2686 return IB_WC_SUCCESS;
2687 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2688 return IB_WC_LOC_ACCESS_ERR;
2689 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2690 return IB_WC_LOC_LEN_ERR;
2691 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2692 return IB_WC_LOC_PROT_ERR;
2693 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2694 return IB_WC_LOC_QP_OP_ERR;
2695 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2696 return IB_WC_GENERAL_ERR;
2697 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2698 return IB_WC_WR_FLUSH_ERR;
2699 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2700 return IB_WC_WR_FLUSH_ERR;
2702 return IB_WC_GENERAL_ERR;
2706 static u8 __rc_to_ib_wc_status(u8 qstatus)
2709 case CQ_RES_RC_STATUS_OK:
2710 return IB_WC_SUCCESS;
2711 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2712 return IB_WC_LOC_ACCESS_ERR;
2713 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2714 return IB_WC_LOC_LEN_ERR;
2715 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2716 return IB_WC_LOC_PROT_ERR;
2717 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2718 return IB_WC_LOC_QP_OP_ERR;
2719 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2720 return IB_WC_GENERAL_ERR;
2721 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2722 return IB_WC_REM_INV_REQ_ERR;
2723 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2724 return IB_WC_WR_FLUSH_ERR;
2725 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2726 return IB_WC_WR_FLUSH_ERR;
2728 return IB_WC_GENERAL_ERR;
2732 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2734 switch (cqe->type) {
2735 case BNXT_QPLIB_SWQE_TYPE_SEND:
2736 wc->opcode = IB_WC_SEND;
2738 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2739 wc->opcode = IB_WC_SEND;
2740 wc->wc_flags |= IB_WC_WITH_IMM;
2742 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2743 wc->opcode = IB_WC_SEND;
2744 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2746 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2747 wc->opcode = IB_WC_RDMA_WRITE;
2749 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2750 wc->opcode = IB_WC_RDMA_WRITE;
2751 wc->wc_flags |= IB_WC_WITH_IMM;
2753 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2754 wc->opcode = IB_WC_RDMA_READ;
2756 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2757 wc->opcode = IB_WC_COMP_SWAP;
2759 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2760 wc->opcode = IB_WC_FETCH_ADD;
2762 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2763 wc->opcode = IB_WC_LOCAL_INV;
2765 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2766 wc->opcode = IB_WC_REG_MR;
2769 wc->opcode = IB_WC_SEND;
2773 wc->status = __req_to_ib_wc_status(cqe->status);
2776 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2777 u16 raweth_qp1_flags2)
2779 bool is_ipv6 = false, is_ipv4 = false;
2781 /* raweth_qp1_flags Bit 9-6 indicates itype */
2782 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2783 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2786 if (raweth_qp1_flags2 &
2787 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2789 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2790 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2791 (raweth_qp1_flags2 &
2792 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2793 (is_ipv6 = true) : (is_ipv4 = true);
2795 BNXT_RE_ROCEV2_IPV6_PACKET :
2796 BNXT_RE_ROCEV2_IPV4_PACKET);
2798 return BNXT_RE_ROCE_V1_PACKET;
2802 static int bnxt_re_to_ib_nw_type(int nw_type)
2804 u8 nw_hdr_type = 0xFF;
2807 case BNXT_RE_ROCE_V1_PACKET:
2808 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2810 case BNXT_RE_ROCEV2_IPV4_PACKET:
2811 nw_hdr_type = RDMA_NETWORK_IPV4;
2813 case BNXT_RE_ROCEV2_IPV6_PACKET:
2814 nw_hdr_type = RDMA_NETWORK_IPV6;
2820 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2824 struct ethhdr *eth_hdr;
2828 tmp_buf = (u8 *)rq_hdr_buf;
2830 * If dest mac is not same as I/F mac, this could be a
2831 * loopback address or multicast address, check whether
2832 * it is a loopback packet
2834 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2836 /* Check the ether type */
2837 eth_hdr = (struct ethhdr *)tmp_buf;
2838 eth_type = ntohs(eth_hdr->h_proto);
2846 struct udphdr *udp_hdr;
2848 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2849 sizeof(struct ipv6hdr));
2850 tmp_buf += sizeof(struct ethhdr) + len;
2851 udp_hdr = (struct udphdr *)tmp_buf;
2852 if (ntohs(udp_hdr->dest) ==
2865 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2866 struct bnxt_qplib_cqe *cqe)
2868 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2869 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2870 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2871 struct ib_send_wr *swr;
2872 struct ib_ud_wr udwr;
2873 struct ib_recv_wr rwr;
2877 dma_addr_t rq_hdr_buf_map;
2878 dma_addr_t shrq_hdr_buf_map;
2881 struct ib_sge s_sge[2];
2882 struct ib_sge r_sge[2];
2885 memset(&udwr, 0, sizeof(udwr));
2886 memset(&rwr, 0, sizeof(rwr));
2887 memset(&s_sge, 0, sizeof(s_sge));
2888 memset(&r_sge, 0, sizeof(r_sge));
2891 tbl_idx = cqe->wr_id;
2893 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2894 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2895 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2898 /* Shadow QP header buffer */
2899 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2901 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2903 /* Store this cqe */
2904 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2905 sqp_entry->qp1_qp = qp1_qp;
2907 /* Find packet type from the cqe */
2909 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2910 cqe->raweth_qp1_flags2);
2912 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2916 /* Adjust the offset for the user buffer and post in the rq */
2918 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2922 * QP1 loopback packet has 4 bytes of internal header before
2923 * ether header. Skip these four bytes.
2925 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2928 /* First send SGE . Skip the ether header*/
2929 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2931 s_sge[0].lkey = 0xFFFFFFFF;
2932 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2933 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2935 /* Second Send SGE */
2936 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2937 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2938 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2940 s_sge[1].lkey = 0xFFFFFFFF;
2941 s_sge[1].length = 256;
2943 /* First recv SGE */
2945 r_sge[0].addr = shrq_hdr_buf_map;
2946 r_sge[0].lkey = 0xFFFFFFFF;
2947 r_sge[0].length = 40;
2949 r_sge[1].addr = sqp_entry->sge.addr + offset;
2950 r_sge[1].lkey = sqp_entry->sge.lkey;
2951 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2953 /* Create receive work request */
2955 rwr.sg_list = r_sge;
2956 rwr.wr_id = tbl_idx;
2959 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2961 dev_err(rdev_to_dev(rdev),
2962 "Failed to post Rx buffers to shadow QP");
2967 swr->sg_list = s_sge;
2968 swr->wr_id = tbl_idx;
2969 swr->opcode = IB_WR_SEND;
2972 udwr.ah = &rdev->sqp_ah->ib_ah;
2973 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2974 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2976 /* post data received in the send queue */
2977 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2982 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2983 struct bnxt_qplib_cqe *cqe)
2985 wc->opcode = IB_WC_RECV;
2986 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2987 wc->wc_flags |= IB_WC_GRH;
2990 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
2997 metadata = orig_cqe->raweth_qp1_metadata;
2998 if (orig_cqe->raweth_qp1_flags2 &
2999 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3001 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3002 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3003 if (tpid == ETH_P_8021Q) {
3005 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3007 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3008 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3016 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3017 struct bnxt_qplib_cqe *cqe)
3019 wc->opcode = IB_WC_RECV;
3020 wc->status = __rc_to_ib_wc_status(cqe->status);
3022 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3023 wc->wc_flags |= IB_WC_WITH_IMM;
3024 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3025 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3026 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3027 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3028 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3031 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3033 struct bnxt_qplib_cqe *cqe)
3035 struct bnxt_re_dev *rdev = qp->rdev;
3036 struct bnxt_re_qp *qp1_qp = NULL;
3037 struct bnxt_qplib_cqe *orig_cqe = NULL;
3038 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3044 tbl_idx = cqe->wr_id;
3046 sqp_entry = &rdev->sqp_tbl[tbl_idx];
3047 qp1_qp = sqp_entry->qp1_qp;
3048 orig_cqe = &sqp_entry->cqe;
3050 wc->wr_id = sqp_entry->wrid;
3051 wc->byte_len = orig_cqe->length;
3052 wc->qp = &qp1_qp->ib_qp;
3054 wc->ex.imm_data = orig_cqe->immdata;
3055 wc->src_qp = orig_cqe->src_qp;
3056 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3057 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3058 wc->vlan_id = vlan_id;
3060 wc->wc_flags |= IB_WC_WITH_VLAN;
3063 wc->vendor_err = orig_cqe->status;
3065 wc->opcode = IB_WC_RECV;
3066 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3067 wc->wc_flags |= IB_WC_GRH;
3069 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3070 orig_cqe->raweth_qp1_flags2);
3072 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3073 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3077 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3079 struct bnxt_qplib_cqe *cqe)
3083 wc->opcode = IB_WC_RECV;
3084 wc->status = __rc_to_ib_wc_status(cqe->status);
3086 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3087 wc->wc_flags |= IB_WC_WITH_IMM;
3088 /* report only on GSI QP for Thor */
3089 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3090 wc->wc_flags |= IB_WC_GRH;
3091 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3092 wc->wc_flags |= IB_WC_WITH_SMAC;
3093 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3094 wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3095 if (wc->vlan_id < 0x1000)
3096 wc->wc_flags |= IB_WC_WITH_VLAN;
3098 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3099 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3100 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3101 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3106 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3108 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3109 unsigned long flags;
3112 spin_lock_irqsave(&qp->sq_lock, flags);
3114 rc = bnxt_re_bind_fence_mw(lib_qp);
3116 lib_qp->sq.phantom_wqe_cnt++;
3117 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3118 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3119 lib_qp->id, lib_qp->sq.hwq.prod,
3120 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3121 lib_qp->sq.phantom_wqe_cnt);
3124 spin_unlock_irqrestore(&qp->sq_lock, flags);
3128 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3130 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3131 struct bnxt_re_qp *qp;
3132 struct bnxt_qplib_cqe *cqe;
3133 int i, ncqe, budget;
3134 struct bnxt_qplib_q *sq;
3135 struct bnxt_qplib_qp *lib_qp;
3137 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3138 unsigned long flags;
3140 spin_lock_irqsave(&cq->cq_lock, flags);
3141 budget = min_t(u32, num_entries, cq->max_cql);
3142 num_entries = budget;
3144 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3150 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3153 if (sq->send_phantom) {
3154 qp = container_of(lib_qp,
3155 struct bnxt_re_qp, qplib_qp);
3156 if (send_phantom_wqe(qp) == -ENOMEM)
3157 dev_err(rdev_to_dev(cq->rdev),
3158 "Phantom failed! Scheduled to send again\n");
3160 sq->send_phantom = false;
3164 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3171 for (i = 0; i < ncqe; i++, cqe++) {
3172 /* Transcribe each qplib_wqe back to ib_wc */
3173 memset(wc, 0, sizeof(*wc));
3175 wc->wr_id = cqe->wr_id;
3176 wc->byte_len = cqe->length;
3178 ((struct bnxt_qplib_qp *)
3179 (unsigned long)(cqe->qp_handle),
3180 struct bnxt_re_qp, qplib_qp);
3182 dev_err(rdev_to_dev(cq->rdev),
3183 "POLL CQ : bad QP handle");
3186 wc->qp = &qp->ib_qp;
3187 wc->ex.imm_data = cqe->immdata;
3188 wc->src_qp = cqe->src_qp;
3189 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3191 wc->vendor_err = cqe->status;
3193 switch (cqe->opcode) {
3194 case CQ_BASE_CQE_TYPE_REQ:
3195 if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3196 qp->rdev->qp1_sqp->qplib_qp.id) {
3197 /* Handle this completion with
3198 * the stored completion
3200 memset(wc, 0, sizeof(*wc));
3203 bnxt_re_process_req_wc(wc, cqe);
3205 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3209 rc = bnxt_re_process_raw_qp_pkt_rx
3212 memset(wc, 0, sizeof(*wc));
3217 /* Errors need not be looped back.
3218 * But change the wr_id to the one
3219 * stored in the table
3221 tbl_idx = cqe->wr_id;
3222 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3223 wc->wr_id = sqp_entry->wrid;
3224 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3226 case CQ_BASE_CQE_TYPE_RES_RC:
3227 bnxt_re_process_res_rc_wc(wc, cqe);
3229 case CQ_BASE_CQE_TYPE_RES_UD:
3230 if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3231 qp->rdev->qp1_sqp->qplib_qp.id) {
3232 /* Handle this completion with
3233 * the stored completion
3238 bnxt_re_process_res_shadow_qp_wc
3243 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3246 dev_err(rdev_to_dev(cq->rdev),
3247 "POLL CQ : type 0x%x not handled",
3256 spin_unlock_irqrestore(&cq->cq_lock, flags);
3257 return num_entries - budget;
3260 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3261 enum ib_cq_notify_flags ib_cqn_flags)
3263 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3264 int type = 0, rc = 0;
3265 unsigned long flags;
3267 spin_lock_irqsave(&cq->cq_lock, flags);
3268 /* Trigger on the very next completion */
3269 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3270 type = DBC_DBC_TYPE_CQ_ARMALL;
3271 /* Trigger on the next solicited completion */
3272 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3273 type = DBC_DBC_TYPE_CQ_ARMSE;
3275 /* Poll to see if there are missed events */
3276 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3277 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3281 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3284 spin_unlock_irqrestore(&cq->cq_lock, flags);
3288 /* Memory Regions */
3289 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3291 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3292 struct bnxt_re_dev *rdev = pd->rdev;
3293 struct bnxt_re_mr *mr;
3297 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3299 return ERR_PTR(-ENOMEM);
3302 mr->qplib_mr.pd = &pd->qplib_pd;
3303 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3304 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3306 /* Allocate and register 0 as the address */
3307 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3311 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3312 mr->qplib_mr.total_size = -1; /* Infinte length */
3313 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3318 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3319 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3320 IB_ACCESS_REMOTE_ATOMIC))
3321 mr->ib_mr.rkey = mr->ib_mr.lkey;
3322 atomic_inc(&rdev->mr_count);
3327 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3333 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3335 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3336 struct bnxt_re_dev *rdev = mr->rdev;
3339 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3341 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3344 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3350 if (!IS_ERR_OR_NULL(mr->ib_umem))
3351 ib_umem_release(mr->ib_umem);
3354 atomic_dec(&rdev->mr_count);
3358 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3360 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3362 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3365 mr->pages[mr->npages++] = addr;
3369 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3370 unsigned int *sg_offset)
3372 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3375 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3378 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3379 u32 max_num_sg, struct ib_udata *udata)
3381 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3382 struct bnxt_re_dev *rdev = pd->rdev;
3383 struct bnxt_re_mr *mr = NULL;
3386 if (type != IB_MR_TYPE_MEM_REG) {
3387 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3388 return ERR_PTR(-EINVAL);
3390 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3391 return ERR_PTR(-EINVAL);
3393 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3395 return ERR_PTR(-ENOMEM);
3398 mr->qplib_mr.pd = &pd->qplib_pd;
3399 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3400 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3402 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3406 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3407 mr->ib_mr.rkey = mr->ib_mr.lkey;
3409 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3414 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3415 &mr->qplib_frpl, max_num_sg);
3417 dev_err(rdev_to_dev(rdev),
3418 "Failed to allocate HW FR page list");
3422 atomic_inc(&rdev->mr_count);
3428 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3434 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3435 struct ib_udata *udata)
3437 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3438 struct bnxt_re_dev *rdev = pd->rdev;
3439 struct bnxt_re_mw *mw;
3442 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3444 return ERR_PTR(-ENOMEM);
3446 mw->qplib_mw.pd = &pd->qplib_pd;
3448 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3449 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3450 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3451 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3453 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3456 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3458 atomic_inc(&rdev->mw_count);
3466 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3468 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3469 struct bnxt_re_dev *rdev = mw->rdev;
3472 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3474 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3479 atomic_dec(&rdev->mw_count);
3483 static int bnxt_re_page_size_ok(int page_shift)
3485 switch (page_shift) {
3486 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3487 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3488 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3489 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3490 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3491 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3492 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3493 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3500 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3503 u64 *pbl_tbl = pbl_tbl_orig;
3505 u64 page_mask = (1ULL << page_shift) - 1;
3506 struct sg_dma_page_iter sg_iter;
3508 for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
3509 paddr = sg_page_iter_dma_address(&sg_iter);
3510 if (pbl_tbl == pbl_tbl_orig)
3511 *pbl_tbl++ = paddr & ~page_mask;
3512 else if ((paddr & page_mask) == 0)
3515 return pbl_tbl - pbl_tbl_orig;
3519 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3520 u64 virt_addr, int mr_access_flags,
3521 struct ib_udata *udata)
3523 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3524 struct bnxt_re_dev *rdev = pd->rdev;
3525 struct bnxt_re_mr *mr;
3526 struct ib_umem *umem;
3527 u64 *pbl_tbl = NULL;
3528 int umem_pgs, page_shift, rc;
3530 if (length > BNXT_RE_MAX_MR_SIZE) {
3531 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3532 length, BNXT_RE_MAX_MR_SIZE);
3533 return ERR_PTR(-ENOMEM);
3536 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3538 return ERR_PTR(-ENOMEM);
3541 mr->qplib_mr.pd = &pd->qplib_pd;
3542 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3543 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3545 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3547 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3550 /* The fixed portion of the rkey is the same as the lkey */
3551 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3553 umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
3555 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3561 mr->qplib_mr.va = virt_addr;
3562 umem_pgs = ib_umem_page_count(umem);
3564 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3568 mr->qplib_mr.total_size = length;
3570 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3576 page_shift = PAGE_SHIFT;
3578 if (!bnxt_re_page_size_ok(page_shift)) {
3579 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3584 if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3585 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3586 length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3590 if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3591 page_shift = BNXT_RE_PAGE_SHIFT_2M;
3592 dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3596 /* Map umem buf ptrs to the PBL */
3597 umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3598 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3599 umem_pgs, false, 1 << page_shift);
3601 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3607 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3608 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3609 atomic_inc(&rdev->mr_count);
3615 ib_umem_release(umem);
3617 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3623 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3625 struct ib_device *ibdev = ctx->device;
3626 struct bnxt_re_ucontext *uctx =
3627 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3628 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3629 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3630 struct bnxt_re_uctx_resp resp;
3631 u32 chip_met_rev_num = 0;
3634 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3635 ibdev->uverbs_abi_ver);
3637 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3638 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3639 BNXT_RE_ABI_VERSION);
3645 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3650 spin_lock_init(&uctx->sh_lock);
3652 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3653 chip_met_rev_num = rdev->chip_ctx.chip_num;
3654 chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) <<
3655 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3656 chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) <<
3657 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3658 resp.chip_id0 = chip_met_rev_num;
3659 /* Future extension of chip info */
3661 /*Temp, Use idr_alloc instead */
3662 resp.dev_id = rdev->en_dev->pdev->devfn;
3663 resp.max_qp = rdev->qplib_ctx.qpc_count;
3664 resp.pg_size = PAGE_SIZE;
3665 resp.cqe_sz = sizeof(struct cq_base);
3666 resp.max_cqd = dev_attr->max_cq_wqes;
3669 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3671 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3678 free_page((unsigned long)uctx->shpg);
3684 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3686 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3687 struct bnxt_re_ucontext,
3690 struct bnxt_re_dev *rdev = uctx->rdev;
3693 free_page((unsigned long)uctx->shpg);
3695 if (uctx->dpi.dbr) {
3696 /* Free DPI only if this is the first PD allocated by the
3697 * application and mark the context dpi as NULL
3699 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3700 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3701 uctx->dpi.dbr = NULL;
3705 /* Helper function to mmap the virtual memory from user app */
3706 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3708 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3709 struct bnxt_re_ucontext,
3711 struct bnxt_re_dev *rdev = uctx->rdev;
3714 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3717 if (vma->vm_pgoff) {
3718 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3719 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3720 PAGE_SIZE, vma->vm_page_prot)) {
3721 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3725 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3726 if (remap_pfn_range(vma, vma->vm_start,
3727 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3728 dev_err(rdev_to_dev(rdev),
3729 "Failed to map shared page");