2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 #include <net/addrconf.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/ib_umem.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_mad.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/uverbs_ioctl.h>
53 #include <linux/hashtable.h>
58 #include "qplib_res.h"
61 #include "qplib_rcfw.h"
66 #include <rdma/uverbs_types.h>
67 #include <rdma/uverbs_std_types.h>
69 #include <rdma/ib_user_ioctl_cmds.h>
71 #define UVERBS_MODULE_NAME bnxt_re
72 #include <rdma/uverbs_named_ioctl.h>
74 #include <rdma/bnxt_re-abi.h>
76 static int __from_ib_access_flags(int iflags)
80 if (iflags & IB_ACCESS_LOCAL_WRITE)
81 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
82 if (iflags & IB_ACCESS_REMOTE_READ)
83 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
84 if (iflags & IB_ACCESS_REMOTE_WRITE)
85 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
86 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
87 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
88 if (iflags & IB_ACCESS_MW_BIND)
89 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
90 if (iflags & IB_ZERO_BASED)
91 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
92 if (iflags & IB_ACCESS_ON_DEMAND)
93 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
97 static enum ib_access_flags __to_ib_access_flags(int qflags)
99 enum ib_access_flags iflags = 0;
101 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
102 iflags |= IB_ACCESS_LOCAL_WRITE;
103 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
104 iflags |= IB_ACCESS_REMOTE_WRITE;
105 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
106 iflags |= IB_ACCESS_REMOTE_READ;
107 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
108 iflags |= IB_ACCESS_REMOTE_ATOMIC;
109 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
110 iflags |= IB_ACCESS_MW_BIND;
111 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
112 iflags |= IB_ZERO_BASED;
113 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
114 iflags |= IB_ACCESS_ON_DEMAND;
118 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
119 struct bnxt_qplib_sge *sg_list, int num)
123 for (i = 0; i < num; i++) {
124 sg_list[i].addr = ib_sg_list[i].addr;
125 sg_list[i].lkey = ib_sg_list[i].lkey;
126 sg_list[i].size = ib_sg_list[i].length;
127 total += sg_list[i].size;
133 int bnxt_re_query_device(struct ib_device *ibdev,
134 struct ib_device_attr *ib_attr,
135 struct ib_udata *udata)
137 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
138 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
140 memset(ib_attr, 0, sizeof(*ib_attr));
141 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
142 min(sizeof(dev_attr->fw_ver),
143 sizeof(ib_attr->fw_ver)));
144 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
145 rdev->netdev->dev_addr);
146 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
147 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
149 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
150 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
151 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
152 ib_attr->max_qp = dev_attr->max_qp;
153 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
154 ib_attr->device_cap_flags =
155 IB_DEVICE_CURR_QP_STATE_MOD
156 | IB_DEVICE_RC_RNR_NAK_GEN
157 | IB_DEVICE_SHUTDOWN_PORT
158 | IB_DEVICE_SYS_IMAGE_GUID
159 | IB_DEVICE_RESIZE_MAX_WR
160 | IB_DEVICE_PORT_ACTIVE_EVENT
161 | IB_DEVICE_N_NOTIFY_CQ
162 | IB_DEVICE_MEM_WINDOW
163 | IB_DEVICE_MEM_WINDOW_TYPE_2B
164 | IB_DEVICE_MEM_MGT_EXTENSIONS;
165 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
166 ib_attr->max_send_sge = dev_attr->max_qp_sges;
167 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
168 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
169 ib_attr->max_cq = dev_attr->max_cq;
170 ib_attr->max_cqe = dev_attr->max_cq_wqes;
171 ib_attr->max_mr = dev_attr->max_mr;
172 ib_attr->max_pd = dev_attr->max_pd;
173 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
174 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
175 ib_attr->atomic_cap = IB_ATOMIC_NONE;
176 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
177 if (dev_attr->is_atomic) {
178 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
182 ib_attr->max_ee_rd_atom = 0;
183 ib_attr->max_res_rd_atom = 0;
184 ib_attr->max_ee_init_rd_atom = 0;
186 ib_attr->max_rdd = 0;
187 ib_attr->max_mw = dev_attr->max_mw;
188 ib_attr->max_raw_ipv6_qp = 0;
189 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
190 ib_attr->max_mcast_grp = 0;
191 ib_attr->max_mcast_qp_attach = 0;
192 ib_attr->max_total_mcast_qp_attach = 0;
193 ib_attr->max_ah = dev_attr->max_ah;
195 ib_attr->max_srq = dev_attr->max_srq;
196 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
197 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
199 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
201 ib_attr->max_pkeys = 1;
202 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
207 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
208 struct ib_port_attr *port_attr)
210 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
211 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
214 memset(port_attr, 0, sizeof(*port_attr));
216 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
217 port_attr->state = IB_PORT_ACTIVE;
218 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
220 port_attr->state = IB_PORT_DOWN;
221 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
223 port_attr->max_mtu = IB_MTU_4096;
224 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
225 port_attr->gid_tbl_len = dev_attr->max_sgid;
226 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
227 IB_PORT_DEVICE_MGMT_SUP |
228 IB_PORT_VENDOR_CLASS_SUP;
229 port_attr->ip_gids = true;
231 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
232 port_attr->bad_pkey_cntr = 0;
233 port_attr->qkey_viol_cntr = 0;
234 port_attr->pkey_tbl_len = dev_attr->max_pkey;
236 port_attr->sm_lid = 0;
238 port_attr->max_vl_num = 4;
239 port_attr->sm_sl = 0;
240 port_attr->subnet_timeout = 0;
241 port_attr->init_type_reply = 0;
242 rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
243 &port_attr->active_width);
248 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
249 struct ib_port_immutable *immutable)
251 struct ib_port_attr port_attr;
253 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
256 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
257 immutable->gid_tbl_len = port_attr.gid_tbl_len;
258 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
259 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
260 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
264 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
266 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
268 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
269 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
270 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
273 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
274 u16 index, u16 *pkey)
279 *pkey = IB_DEFAULT_PKEY_FULL;
284 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
285 int index, union ib_gid *gid)
287 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
290 /* Ignore port_num */
291 memset(gid, 0, sizeof(*gid));
292 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
293 &rdev->qplib_res.sgid_tbl, index,
294 (struct bnxt_qplib_gid *)gid);
298 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
301 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
302 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
303 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
304 struct bnxt_qplib_gid *gid_to_del;
305 u16 vlan_id = 0xFFFF;
307 /* Delete the entry from the hardware */
312 if (sgid_tbl && sgid_tbl->active) {
313 if (ctx->idx >= sgid_tbl->max)
315 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
316 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
317 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
318 * or via the ib_unregister_device path. In the former case QP1
319 * may not be destroyed yet, in which case just return as FW
320 * needs that entry to be present and will fail it's deletion.
321 * We could get invoked again after QP1 is destroyed OR get an
322 * ADD_GID call with a different GID value for the same index
323 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
326 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
327 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
328 ibdev_dbg(&rdev->ibdev,
329 "Trying to delete GID0 while QP1 is alive\n");
334 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
337 ibdev_err(&rdev->ibdev,
338 "Failed to remove GID: %#x", rc);
340 ctx_tbl = sgid_tbl->ctx;
341 ctx_tbl[ctx->idx] = NULL;
351 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
355 u16 vlan_id = 0xFFFF;
356 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
357 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
358 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
360 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
364 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
365 rdev->qplib_res.netdev->dev_addr,
366 vlan_id, true, &tbl_idx);
367 if (rc == -EALREADY) {
368 ctx_tbl = sgid_tbl->ctx;
369 ctx_tbl[tbl_idx]->refcnt++;
370 *context = ctx_tbl[tbl_idx];
375 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
379 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
382 ctx_tbl = sgid_tbl->ctx;
385 ctx_tbl[tbl_idx] = ctx;
391 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
394 return IB_LINK_LAYER_ETHERNET;
397 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
399 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
401 struct bnxt_re_fence_data *fence = &pd->fence;
402 struct ib_mr *ib_mr = &fence->mr->ib_mr;
403 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
404 struct bnxt_re_dev *rdev = pd->rdev;
406 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
409 memset(wqe, 0, sizeof(*wqe));
410 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
411 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
412 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
413 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
414 wqe->bind.zero_based = false;
415 wqe->bind.parent_l_key = ib_mr->lkey;
416 wqe->bind.va = (u64)(unsigned long)fence->va;
417 wqe->bind.length = fence->size;
418 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
419 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
421 /* Save the initial rkey in fence structure for now;
422 * wqe->bind.r_key will be set at (re)bind time.
424 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
427 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
429 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
431 struct ib_pd *ib_pd = qp->ib_qp.pd;
432 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
433 struct bnxt_re_fence_data *fence = &pd->fence;
434 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
435 struct bnxt_qplib_swqe wqe;
438 memcpy(&wqe, fence_wqe, sizeof(wqe));
439 wqe.bind.r_key = fence->bind_rkey;
440 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
442 ibdev_dbg(&qp->rdev->ibdev,
443 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
444 wqe.bind.r_key, qp->qplib_qp.id, pd);
445 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
447 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
450 bnxt_qplib_post_send_db(&qp->qplib_qp);
455 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
457 struct bnxt_re_fence_data *fence = &pd->fence;
458 struct bnxt_re_dev *rdev = pd->rdev;
459 struct device *dev = &rdev->en_dev->pdev->dev;
460 struct bnxt_re_mr *mr = fence->mr;
462 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
466 bnxt_re_dealloc_mw(fence->mw);
471 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
474 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
478 if (fence->dma_addr) {
479 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
485 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
487 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
488 struct bnxt_re_fence_data *fence = &pd->fence;
489 struct bnxt_re_dev *rdev = pd->rdev;
490 struct device *dev = &rdev->en_dev->pdev->dev;
491 struct bnxt_re_mr *mr = NULL;
492 dma_addr_t dma_addr = 0;
496 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
499 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
501 rc = dma_mapping_error(dev, dma_addr);
503 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
508 fence->dma_addr = dma_addr;
511 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
518 mr->qplib_mr.pd = &pd->qplib_pd;
519 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
520 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
521 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
523 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
528 mr->ib_mr.lkey = mr->qplib_mr.lkey;
529 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
530 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
531 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
532 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
534 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
537 mr->ib_mr.rkey = mr->qplib_mr.rkey;
539 /* Create a fence MW only for kernel consumers */
540 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
542 ibdev_err(&rdev->ibdev,
543 "Failed to create fence-MW for PD: %p\n", pd);
549 bnxt_re_create_fence_wqe(pd);
553 bnxt_re_destroy_fence_mr(pd);
557 static struct bnxt_re_user_mmap_entry*
558 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
559 enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
561 struct bnxt_re_user_mmap_entry *entry;
564 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
568 entry->mem_offset = mem_offset;
569 entry->mmap_flag = mmap_flag;
573 case BNXT_RE_MMAP_SH_PAGE:
574 ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
575 &entry->rdma_entry, PAGE_SIZE, 0);
577 case BNXT_RE_MMAP_UC_DB:
578 case BNXT_RE_MMAP_WC_DB:
579 case BNXT_RE_MMAP_DBR_BAR:
580 case BNXT_RE_MMAP_DBR_PAGE:
581 case BNXT_RE_MMAP_TOGGLE_PAGE:
582 ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
583 &entry->rdma_entry, PAGE_SIZE);
595 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
600 /* Protection Domains */
601 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
603 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
604 struct bnxt_re_dev *rdev = pd->rdev;
607 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
608 pd->pd_db_mmap = NULL;
611 bnxt_re_destroy_fence_mr(pd);
613 if (pd->qplib_pd.id) {
614 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
615 &rdev->qplib_res.pd_tbl,
617 atomic_dec(&rdev->stats.res.pd_count);
622 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
624 struct ib_device *ibdev = ibpd->device;
625 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
626 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
627 udata, struct bnxt_re_ucontext, ib_uctx);
628 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
629 struct bnxt_re_user_mmap_entry *entry = NULL;
634 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
635 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
641 struct bnxt_re_pd_resp resp = {};
643 if (!ucntx->dpi.dbr) {
644 /* Allocate DPI in alloc_pd to avoid failing of
645 * ibv_devinfo and family of application when DPIs
648 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
649 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
655 resp.pdid = pd->qplib_pd.id;
656 /* Still allow mapping this DBR to the new user PD. */
657 resp.dpi = ucntx->dpi.dpi;
659 entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
660 BNXT_RE_MMAP_UC_DB, &resp.dbr);
667 pd->pd_db_mmap = &entry->rdma_entry;
669 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
671 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
678 if (bnxt_re_create_fence_mr(pd))
679 ibdev_warn(&rdev->ibdev,
680 "Failed to create Fence-MR\n");
681 active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
682 if (active_pds > rdev->stats.res.pd_watermark)
683 rdev->stats.res.pd_watermark = active_pds;
687 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
693 /* Address Handles */
694 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
696 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
697 struct bnxt_re_dev *rdev = ah->rdev;
701 block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
702 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
703 if (BNXT_RE_CHECK_RC(rc)) {
704 if (rc == -ETIMEDOUT)
709 atomic_dec(&rdev->stats.res.ah_count);
714 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
719 case RDMA_NETWORK_IPV4:
720 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
722 case RDMA_NETWORK_IPV6:
723 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
726 nw_type = CMDQ_CREATE_AH_TYPE_V1;
732 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
733 struct ib_udata *udata)
735 struct ib_pd *ib_pd = ib_ah->pd;
736 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
737 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
738 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
739 struct bnxt_re_dev *rdev = pd->rdev;
740 const struct ib_gid_attr *sgid_attr;
741 struct bnxt_re_gid_ctx *ctx;
742 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
747 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
748 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
753 ah->qplib_ah.pd = &pd->qplib_pd;
755 /* Supply the configuration for the HW */
756 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
757 sizeof(union ib_gid));
758 sgid_attr = grh->sgid_attr;
759 /* Get the HW context of the GID. The reference
760 * of GID table entry is already taken by the caller.
762 ctx = rdma_read_gid_hw_context(sgid_attr);
763 ah->qplib_ah.sgid_index = ctx->idx;
764 ah->qplib_ah.host_sgid_index = grh->sgid_index;
765 ah->qplib_ah.traffic_class = grh->traffic_class;
766 ah->qplib_ah.flow_label = grh->flow_label;
767 ah->qplib_ah.hop_limit = grh->hop_limit;
768 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
770 /* Get network header type for this GID */
771 nw_type = rdma_gid_attr_network_type(sgid_attr);
772 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
774 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
775 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
777 RDMA_CREATE_AH_SLEEPABLE));
779 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
783 /* Write AVID to shared page. */
785 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
786 udata, struct bnxt_re_ucontext, ib_uctx);
790 spin_lock_irqsave(&uctx->sh_lock, flag);
791 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
792 *wrptr = ah->qplib_ah.id;
793 wmb(); /* make sure cache is updated. */
794 spin_unlock_irqrestore(&uctx->sh_lock, flag);
796 active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
797 if (active_ahs > rdev->stats.res.ah_watermark)
798 rdev->stats.res.ah_watermark = active_ahs;
803 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
805 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
807 ah_attr->type = ib_ah->type;
808 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
809 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
810 rdma_ah_set_grh(ah_attr, NULL, 0,
811 ah->qplib_ah.host_sgid_index,
812 0, ah->qplib_ah.traffic_class);
813 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
814 rdma_ah_set_port_num(ah_attr, 1);
815 rdma_ah_set_static_rate(ah_attr, 0);
819 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
820 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
824 spin_lock_irqsave(&qp->scq->cq_lock, flags);
825 if (qp->rcq != qp->scq)
826 spin_lock(&qp->rcq->cq_lock);
828 __acquire(&qp->rcq->cq_lock);
833 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
835 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
837 if (qp->rcq != qp->scq)
838 spin_unlock(&qp->rcq->cq_lock);
840 __release(&qp->rcq->cq_lock);
841 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
844 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
846 struct bnxt_re_qp *gsi_sqp;
847 struct bnxt_re_ah *gsi_sah;
848 struct bnxt_re_dev *rdev;
852 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
853 gsi_sah = rdev->gsi_ctx.gsi_sah;
855 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
856 bnxt_qplib_destroy_ah(&rdev->qplib_res,
859 atomic_dec(&rdev->stats.res.ah_count);
860 bnxt_qplib_clean_qp(&qp->qplib_qp);
862 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
863 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
865 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
868 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
870 /* remove from active qp list */
871 mutex_lock(&rdev->qp_lock);
872 list_del(&gsi_sqp->list);
873 mutex_unlock(&rdev->qp_lock);
874 atomic_dec(&rdev->stats.res.qp_count);
876 kfree(rdev->gsi_ctx.sqp_tbl);
879 rdev->gsi_ctx.gsi_sqp = NULL;
880 rdev->gsi_ctx.gsi_sah = NULL;
881 rdev->gsi_ctx.sqp_tbl = NULL;
889 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
891 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
892 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
893 struct bnxt_re_dev *rdev = qp->rdev;
894 struct bnxt_qplib_nq *scq_nq = NULL;
895 struct bnxt_qplib_nq *rcq_nq = NULL;
899 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
901 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
903 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
907 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
908 flags = bnxt_re_lock_cqs(qp);
909 bnxt_qplib_clean_qp(&qp->qplib_qp);
910 bnxt_re_unlock_cqs(qp, flags);
913 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
915 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
916 rc = bnxt_re_destroy_gsi_sqp(qp);
921 mutex_lock(&rdev->qp_lock);
923 mutex_unlock(&rdev->qp_lock);
924 atomic_dec(&rdev->stats.res.qp_count);
925 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
926 atomic_dec(&rdev->stats.res.rc_qp_count);
927 else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
928 atomic_dec(&rdev->stats.res.ud_qp_count);
930 ib_umem_release(qp->rumem);
931 ib_umem_release(qp->sumem);
933 /* Flush all the entries of notification queue associated with
936 scq_nq = qplib_qp->scq->nq;
937 rcq_nq = qplib_qp->rcq->nq;
938 bnxt_re_synchronize_nq(scq_nq);
939 if (scq_nq != rcq_nq)
940 bnxt_re_synchronize_nq(rcq_nq);
945 static u8 __from_ib_qp_type(enum ib_qp_type type)
949 return CMDQ_CREATE_QP1_TYPE_GSI;
951 return CMDQ_CREATE_QP_TYPE_RC;
953 return CMDQ_CREATE_QP_TYPE_UD;
959 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
962 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
964 return bnxt_re_get_rwqe_size(rsge);
967 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
969 u16 wqe_size, calc_ils;
971 wqe_size = bnxt_re_get_swqe_size(nsge);
973 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
974 wqe_size = max_t(u16, calc_ils, wqe_size);
975 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
980 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
981 struct ib_qp_init_attr *init_attr)
983 struct bnxt_qplib_dev_attr *dev_attr;
984 struct bnxt_qplib_qp *qplqp;
985 struct bnxt_re_dev *rdev;
986 struct bnxt_qplib_q *sq;
990 qplqp = &qp->qplib_qp;
992 dev_attr = &rdev->dev_attr;
994 align = sizeof(struct sq_send_hdr);
995 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
997 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
998 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
1000 /* For gen p4 and gen p5 backward compatibility mode
1001 * wqe size is fixed to 128 bytes
1003 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
1004 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1005 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
1007 if (init_attr->cap.max_inline_data) {
1008 qplqp->max_inline_data = sq->wqe_size -
1009 sizeof(struct sq_send_hdr);
1010 init_attr->cap.max_inline_data = qplqp->max_inline_data;
1011 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1012 sq->max_sge = qplqp->max_inline_data /
1013 sizeof(struct sq_sge);
1019 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1020 struct bnxt_re_qp *qp, struct ib_udata *udata)
1022 struct bnxt_qplib_qp *qplib_qp;
1023 struct bnxt_re_ucontext *cntx;
1024 struct bnxt_re_qp_req ureq;
1025 int bytes = 0, psn_sz;
1026 struct ib_umem *umem;
1029 qplib_qp = &qp->qplib_qp;
1030 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
1032 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1035 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1036 /* Consider mapping PSN search memory only for RC QPs. */
1037 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1038 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
1039 sizeof(struct sq_psn_search_ext) :
1040 sizeof(struct sq_psn_search);
1041 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1042 qplib_qp->sq.max_wqe :
1043 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1044 sizeof(struct bnxt_qplib_sge));
1045 bytes += (psn_nume * psn_sz);
1048 bytes = PAGE_ALIGN(bytes);
1049 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
1050 IB_ACCESS_LOCAL_WRITE);
1052 return PTR_ERR(umem);
1055 qplib_qp->sq.sg_info.umem = umem;
1056 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1057 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1058 qplib_qp->qp_handle = ureq.qp_handle;
1060 if (!qp->qplib_qp.srq) {
1061 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1062 bytes = PAGE_ALIGN(bytes);
1063 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
1064 IB_ACCESS_LOCAL_WRITE);
1068 qplib_qp->rq.sg_info.umem = umem;
1069 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1070 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1073 qplib_qp->dpi = &cntx->dpi;
1076 ib_umem_release(qp->sumem);
1078 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1080 return PTR_ERR(umem);
1083 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1084 (struct bnxt_re_pd *pd,
1085 struct bnxt_qplib_res *qp1_res,
1086 struct bnxt_qplib_qp *qp1_qp)
1088 struct bnxt_re_dev *rdev = pd->rdev;
1089 struct bnxt_re_ah *ah;
1093 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1098 ah->qplib_ah.pd = &pd->qplib_pd;
1100 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1104 /* supply the dgid data same as sgid */
1105 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1106 sizeof(union ib_gid));
1107 ah->qplib_ah.sgid_index = 0;
1109 ah->qplib_ah.traffic_class = 0;
1110 ah->qplib_ah.flow_label = 0;
1111 ah->qplib_ah.hop_limit = 1;
1112 ah->qplib_ah.sl = 0;
1113 /* Have DMAC same as SMAC */
1114 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1116 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1118 ibdev_err(&rdev->ibdev,
1119 "Failed to allocate HW AH for Shadow QP");
1122 atomic_inc(&rdev->stats.res.ah_count);
1131 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1132 (struct bnxt_re_pd *pd,
1133 struct bnxt_qplib_res *qp1_res,
1134 struct bnxt_qplib_qp *qp1_qp)
1136 struct bnxt_re_dev *rdev = pd->rdev;
1137 struct bnxt_re_qp *qp;
1140 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1146 /* Initialize the shadow QP structure from the QP1 values */
1147 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1149 qp->qplib_qp.pd = &pd->qplib_pd;
1150 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1151 qp->qplib_qp.type = IB_QPT_UD;
1153 qp->qplib_qp.max_inline_data = 0;
1154 qp->qplib_qp.sig_type = true;
1156 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1157 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1158 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1159 qp->qplib_qp.sq.max_sge = 2;
1160 /* Q full delta can be 1 since it is internal QP */
1161 qp->qplib_qp.sq.q_full_delta = 1;
1162 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1163 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1165 qp->qplib_qp.scq = qp1_qp->scq;
1166 qp->qplib_qp.rcq = qp1_qp->rcq;
1168 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1169 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1170 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1171 /* Q full delta can be 1 since it is internal QP */
1172 qp->qplib_qp.rq.q_full_delta = 1;
1173 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1174 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1176 qp->qplib_qp.mtu = qp1_qp->mtu;
1178 qp->qplib_qp.sq_hdr_buf_size = 0;
1179 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1180 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1182 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1186 spin_lock_init(&qp->sq_lock);
1187 INIT_LIST_HEAD(&qp->list);
1188 mutex_lock(&rdev->qp_lock);
1189 list_add_tail(&qp->list, &rdev->qp_list);
1190 atomic_inc(&rdev->stats.res.qp_count);
1191 mutex_unlock(&rdev->qp_lock);
1198 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1199 struct ib_qp_init_attr *init_attr,
1200 struct bnxt_re_ucontext *uctx)
1202 struct bnxt_qplib_dev_attr *dev_attr;
1203 struct bnxt_qplib_qp *qplqp;
1204 struct bnxt_re_dev *rdev;
1205 struct bnxt_qplib_q *rq;
1209 qplqp = &qp->qplib_qp;
1211 dev_attr = &rdev->dev_attr;
1213 if (init_attr->srq) {
1214 struct bnxt_re_srq *srq;
1216 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1217 qplqp->srq = &srq->qplib_srq;
1220 rq->max_sge = init_attr->cap.max_recv_sge;
1221 if (rq->max_sge > dev_attr->max_qp_sges)
1222 rq->max_sge = dev_attr->max_qp_sges;
1223 init_attr->cap.max_recv_sge = rq->max_sge;
1224 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1225 dev_attr->max_qp_sges);
1226 /* Allocate 1 more than what's provided so posting max doesn't
1229 entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
1230 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1231 rq->q_full_delta = 0;
1232 rq->sg_info.pgsize = PAGE_SIZE;
1233 rq->sg_info.pgshft = PAGE_SHIFT;
1239 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1241 struct bnxt_qplib_dev_attr *dev_attr;
1242 struct bnxt_qplib_qp *qplqp;
1243 struct bnxt_re_dev *rdev;
1246 qplqp = &qp->qplib_qp;
1247 dev_attr = &rdev->dev_attr;
1249 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1250 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1251 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1252 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1253 qplqp->rq.max_sge = 6;
1257 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1258 struct ib_qp_init_attr *init_attr,
1259 struct bnxt_re_ucontext *uctx)
1261 struct bnxt_qplib_dev_attr *dev_attr;
1262 struct bnxt_qplib_qp *qplqp;
1263 struct bnxt_re_dev *rdev;
1264 struct bnxt_qplib_q *sq;
1270 qplqp = &qp->qplib_qp;
1272 dev_attr = &rdev->dev_attr;
1274 sq->max_sge = init_attr->cap.max_send_sge;
1275 if (sq->max_sge > dev_attr->max_qp_sges) {
1276 sq->max_sge = dev_attr->max_qp_sges;
1277 init_attr->cap.max_send_sge = sq->max_sge;
1280 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1284 entries = init_attr->cap.max_send_wr;
1285 /* Allocate 128 + 1 more than what's provided */
1286 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1287 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1288 entries = bnxt_re_init_depth(entries + diff + 1, uctx);
1289 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1290 sq->q_full_delta = diff + 1;
1292 * Reserving one slot for Phantom WQE. Application can
1293 * post one extra entry in this case. But allowing this to avoid
1294 * unexpected Queue full condition
1296 qplqp->sq.q_full_delta -= 1;
1297 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1298 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1303 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1304 struct ib_qp_init_attr *init_attr,
1305 struct bnxt_re_ucontext *uctx)
1307 struct bnxt_qplib_dev_attr *dev_attr;
1308 struct bnxt_qplib_qp *qplqp;
1309 struct bnxt_re_dev *rdev;
1313 qplqp = &qp->qplib_qp;
1314 dev_attr = &rdev->dev_attr;
1316 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1317 entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
1318 qplqp->sq.max_wqe = min_t(u32, entries,
1319 dev_attr->max_qp_wqes + 1);
1320 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1321 init_attr->cap.max_send_wr;
1322 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1323 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1324 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1328 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1329 struct ib_qp_init_attr *init_attr)
1331 struct bnxt_qplib_chip_ctx *chip_ctx;
1334 chip_ctx = rdev->chip_ctx;
1336 qptype = __from_ib_qp_type(init_attr->qp_type);
1337 if (qptype == IB_QPT_MAX) {
1338 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1339 qptype = -EOPNOTSUPP;
1343 if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
1344 init_attr->qp_type == IB_QPT_GSI)
1345 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1350 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1351 struct ib_qp_init_attr *init_attr,
1352 struct ib_udata *udata)
1354 struct bnxt_qplib_dev_attr *dev_attr;
1355 struct bnxt_re_ucontext *uctx;
1356 struct bnxt_qplib_qp *qplqp;
1357 struct bnxt_re_dev *rdev;
1358 struct bnxt_re_cq *cq;
1362 qplqp = &qp->qplib_qp;
1363 dev_attr = &rdev->dev_attr;
1365 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1366 /* Setup misc params */
1367 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1368 qplqp->pd = &pd->qplib_pd;
1369 qplqp->qp_handle = (u64)qplqp;
1370 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1371 qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1372 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1377 qplqp->type = (u8)qptype;
1378 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1380 if (init_attr->qp_type == IB_QPT_RC) {
1381 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1382 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1384 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1385 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1386 if (init_attr->create_flags) {
1387 ibdev_dbg(&rdev->ibdev,
1388 "QP create flags 0x%x not supported",
1389 init_attr->create_flags);
1394 if (init_attr->send_cq) {
1395 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1396 qplqp->scq = &cq->qplib_cq;
1400 if (init_attr->recv_cq) {
1401 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1402 qplqp->rcq = &cq->qplib_cq;
1407 rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
1410 if (init_attr->qp_type == IB_QPT_GSI)
1411 bnxt_re_adjust_gsi_rq_attr(qp);
1414 rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
1417 if (init_attr->qp_type == IB_QPT_GSI)
1418 bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
1420 if (udata) /* This will update DPI and qp_handle */
1421 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1426 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1427 struct bnxt_re_pd *pd)
1429 struct bnxt_re_sqp_entries *sqp_tbl;
1430 struct bnxt_re_dev *rdev;
1431 struct bnxt_re_qp *sqp;
1432 struct bnxt_re_ah *sah;
1436 /* Create a shadow QP to handle the QP1 traffic */
1437 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1441 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1443 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1446 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1449 rdev->gsi_ctx.gsi_sqp = sqp;
1453 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1456 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1459 ibdev_err(&rdev->ibdev,
1460 "Failed to create AH entry for ShadowQP");
1463 rdev->gsi_ctx.gsi_sah = sah;
1471 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1472 struct ib_qp_init_attr *init_attr)
1474 struct bnxt_re_dev *rdev;
1475 struct bnxt_qplib_qp *qplqp;
1479 qplqp = &qp->qplib_qp;
1481 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1482 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1484 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1486 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1490 rc = bnxt_re_create_shadow_gsi(qp, pd);
1495 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1496 struct ib_qp_init_attr *init_attr,
1497 struct bnxt_qplib_dev_attr *dev_attr)
1501 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1502 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1503 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1504 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1505 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1506 ibdev_err(&rdev->ibdev,
1507 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1508 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1509 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1510 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1511 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1512 init_attr->cap.max_inline_data,
1513 dev_attr->max_inline_data);
1519 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1520 struct ib_udata *udata)
1522 struct ib_pd *ib_pd = ib_qp->pd;
1523 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1524 struct bnxt_re_dev *rdev = pd->rdev;
1525 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1526 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1530 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1537 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1541 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1542 !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
1543 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1549 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1551 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1555 struct bnxt_re_qp_resp resp;
1557 resp.qpid = qp->qplib_qp.id;
1559 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1561 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1567 qp->ib_qp.qp_num = qp->qplib_qp.id;
1568 if (qp_init_attr->qp_type == IB_QPT_GSI)
1569 rdev->gsi_ctx.gsi_qp = qp;
1570 spin_lock_init(&qp->sq_lock);
1571 spin_lock_init(&qp->rq_lock);
1572 INIT_LIST_HEAD(&qp->list);
1573 mutex_lock(&rdev->qp_lock);
1574 list_add_tail(&qp->list, &rdev->qp_list);
1575 mutex_unlock(&rdev->qp_lock);
1576 active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1577 if (active_qps > rdev->stats.res.qp_watermark)
1578 rdev->stats.res.qp_watermark = active_qps;
1579 if (qp_init_attr->qp_type == IB_QPT_RC) {
1580 active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1581 if (active_qps > rdev->stats.res.rc_qp_watermark)
1582 rdev->stats.res.rc_qp_watermark = active_qps;
1583 } else if (qp_init_attr->qp_type == IB_QPT_UD) {
1584 active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1585 if (active_qps > rdev->stats.res.ud_qp_watermark)
1586 rdev->stats.res.ud_qp_watermark = active_qps;
1591 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1593 ib_umem_release(qp->rumem);
1594 ib_umem_release(qp->sumem);
1599 static u8 __from_ib_qp_state(enum ib_qp_state state)
1603 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1605 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1607 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1609 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1611 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1613 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1616 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1620 static enum ib_qp_state __to_ib_qp_state(u8 state)
1623 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1624 return IB_QPS_RESET;
1625 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1627 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1629 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1631 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1633 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1635 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1641 static u32 __from_ib_mtu(enum ib_mtu mtu)
1645 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1647 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1649 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1651 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1653 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1655 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1659 static enum ib_mtu __to_ib_mtu(u32 mtu)
1661 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1662 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1664 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1666 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1668 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1670 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1677 /* Shared Receive Queues */
1678 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1680 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1682 struct bnxt_re_dev *rdev = srq->rdev;
1683 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1684 struct bnxt_qplib_nq *nq = NULL;
1687 nq = qplib_srq->cq->nq;
1688 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1689 ib_umem_release(srq->umem);
1690 atomic_dec(&rdev->stats.res.srq_count);
1696 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1697 struct bnxt_re_pd *pd,
1698 struct bnxt_re_srq *srq,
1699 struct ib_udata *udata)
1701 struct bnxt_re_srq_req ureq;
1702 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1703 struct ib_umem *umem;
1705 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1706 udata, struct bnxt_re_ucontext, ib_uctx);
1708 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1711 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1712 bytes = PAGE_ALIGN(bytes);
1713 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1714 IB_ACCESS_LOCAL_WRITE);
1716 return PTR_ERR(umem);
1719 qplib_srq->sg_info.umem = umem;
1720 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1721 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1722 qplib_srq->srq_handle = ureq.srq_handle;
1723 qplib_srq->dpi = &cntx->dpi;
1728 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1729 struct ib_srq_init_attr *srq_init_attr,
1730 struct ib_udata *udata)
1732 struct bnxt_qplib_dev_attr *dev_attr;
1733 struct bnxt_qplib_nq *nq = NULL;
1734 struct bnxt_re_ucontext *uctx;
1735 struct bnxt_re_dev *rdev;
1736 struct bnxt_re_srq *srq;
1737 struct bnxt_re_pd *pd;
1738 struct ib_pd *ib_pd;
1743 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1745 dev_attr = &rdev->dev_attr;
1746 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1748 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1749 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1754 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1759 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1761 srq->qplib_srq.pd = &pd->qplib_pd;
1762 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1763 /* Allocate 1 more than what's provided so posting max doesn't
1766 entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
1767 if (entries > dev_attr->max_srq_wqes + 1)
1768 entries = dev_attr->max_srq_wqes + 1;
1769 srq->qplib_srq.max_wqe = entries;
1771 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1772 /* 128 byte wqe size for SRQ . So use max sges */
1773 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1774 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1775 srq->srq_limit = srq_init_attr->attr.srq_limit;
1776 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1780 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1785 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1787 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1792 struct bnxt_re_srq_resp resp;
1794 resp.srqid = srq->qplib_srq.id;
1795 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1797 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1798 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1805 active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1806 if (active_srqs > rdev->stats.res.srq_watermark)
1807 rdev->stats.res.srq_watermark = active_srqs;
1808 spin_lock_init(&srq->lock);
1813 ib_umem_release(srq->umem);
1818 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1819 enum ib_srq_attr_mask srq_attr_mask,
1820 struct ib_udata *udata)
1822 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1824 struct bnxt_re_dev *rdev = srq->rdev;
1827 switch (srq_attr_mask) {
1829 /* SRQ resize is not supported */
1832 /* Change the SRQ threshold */
1833 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1836 srq->qplib_srq.threshold = srq_attr->srq_limit;
1837 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1839 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1842 /* On success, update the shadow */
1843 srq->srq_limit = srq_attr->srq_limit;
1844 /* No need to Build and send response back to udata */
1847 ibdev_err(&rdev->ibdev,
1848 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1853 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1855 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1857 struct bnxt_re_srq tsrq;
1858 struct bnxt_re_dev *rdev = srq->rdev;
1861 /* Get live SRQ attr */
1862 tsrq.qplib_srq.id = srq->qplib_srq.id;
1863 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1865 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1868 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1869 srq_attr->max_sge = srq->qplib_srq.max_sge;
1870 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1875 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1876 const struct ib_recv_wr **bad_wr)
1878 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1880 struct bnxt_qplib_swqe wqe;
1881 unsigned long flags;
1884 spin_lock_irqsave(&srq->lock, flags);
1886 /* Transcribe each ib_recv_wr to qplib_swqe */
1887 wqe.num_sge = wr->num_sge;
1888 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1889 wqe.wr_id = wr->wr_id;
1890 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1892 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1899 spin_unlock_irqrestore(&srq->lock, flags);
1903 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1904 struct bnxt_re_qp *qp1_qp,
1907 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1910 if (qp_attr_mask & IB_QP_STATE) {
1911 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1912 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1914 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1915 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1916 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1919 if (qp_attr_mask & IB_QP_QKEY) {
1920 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1921 /* Using a Random QKEY */
1922 qp->qplib_qp.qkey = 0x81818181;
1924 if (qp_attr_mask & IB_QP_SQ_PSN) {
1925 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1926 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1929 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1931 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1935 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1936 int qp_attr_mask, struct ib_udata *udata)
1938 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1939 struct bnxt_re_dev *rdev = qp->rdev;
1940 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1941 enum ib_qp_state curr_qp_state, new_qp_state;
1946 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1949 qp->qplib_qp.modify_flags = 0;
1950 if (qp_attr_mask & IB_QP_STATE) {
1951 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1952 new_qp_state = qp_attr->qp_state;
1953 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1954 ib_qp->qp_type, qp_attr_mask)) {
1955 ibdev_err(&rdev->ibdev,
1956 "Invalid attribute mask: %#x specified ",
1958 ibdev_err(&rdev->ibdev,
1959 "for qpn: %#x type: %#x",
1960 ib_qp->qp_num, ib_qp->qp_type);
1961 ibdev_err(&rdev->ibdev,
1962 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1963 curr_qp_state, new_qp_state);
1966 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1967 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1970 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1971 ibdev_dbg(&rdev->ibdev,
1972 "Move QP = %p to flush list\n", qp);
1973 flags = bnxt_re_lock_cqs(qp);
1974 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1975 bnxt_re_unlock_cqs(qp, flags);
1978 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1979 ibdev_dbg(&rdev->ibdev,
1980 "Move QP = %p out of flush list\n", qp);
1981 flags = bnxt_re_lock_cqs(qp);
1982 bnxt_qplib_clean_qp(&qp->qplib_qp);
1983 bnxt_re_unlock_cqs(qp, flags);
1986 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1987 qp->qplib_qp.modify_flags |=
1988 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1989 qp->qplib_qp.en_sqd_async_notify = true;
1991 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1992 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1993 qp->qplib_qp.access =
1994 __from_ib_access_flags(qp_attr->qp_access_flags);
1995 /* LOCAL_WRITE access must be set to allow RC receive */
1996 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1997 /* Temp: Set all params on QP as of now */
1998 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1999 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2001 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2002 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2003 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2005 if (qp_attr_mask & IB_QP_QKEY) {
2006 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2007 qp->qplib_qp.qkey = qp_attr->qkey;
2009 if (qp_attr_mask & IB_QP_AV) {
2010 const struct ib_global_route *grh =
2011 rdma_ah_read_grh(&qp_attr->ah_attr);
2012 const struct ib_gid_attr *sgid_attr;
2013 struct bnxt_re_gid_ctx *ctx;
2015 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2016 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2017 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2018 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2019 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2020 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2021 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2022 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2023 sizeof(qp->qplib_qp.ah.dgid.data));
2024 qp->qplib_qp.ah.flow_label = grh->flow_label;
2025 sgid_attr = grh->sgid_attr;
2026 /* Get the HW context of the GID. The reference
2027 * of GID table entry is already taken by the caller.
2029 ctx = rdma_read_gid_hw_context(sgid_attr);
2030 qp->qplib_qp.ah.sgid_index = ctx->idx;
2031 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2032 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2033 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
2034 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2035 ether_addr_copy(qp->qplib_qp.ah.dmac,
2036 qp_attr->ah_attr.roce.dmac);
2038 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2039 &qp->qplib_qp.smac[0]);
2043 nw_type = rdma_gid_attr_network_type(sgid_attr);
2045 case RDMA_NETWORK_IPV4:
2046 qp->qplib_qp.nw_type =
2047 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2049 case RDMA_NETWORK_IPV6:
2050 qp->qplib_qp.nw_type =
2051 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2054 qp->qplib_qp.nw_type =
2055 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2060 if (qp_attr_mask & IB_QP_PATH_MTU) {
2061 qp->qplib_qp.modify_flags |=
2062 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2063 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
2064 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
2065 } else if (qp_attr->qp_state == IB_QPS_RTR) {
2066 qp->qplib_qp.modify_flags |=
2067 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2068 qp->qplib_qp.path_mtu =
2069 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
2071 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
2074 if (qp_attr_mask & IB_QP_TIMEOUT) {
2075 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2076 qp->qplib_qp.timeout = qp_attr->timeout;
2078 if (qp_attr_mask & IB_QP_RETRY_CNT) {
2079 qp->qplib_qp.modify_flags |=
2080 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2081 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2083 if (qp_attr_mask & IB_QP_RNR_RETRY) {
2084 qp->qplib_qp.modify_flags |=
2085 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2086 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2088 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2089 qp->qplib_qp.modify_flags |=
2090 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2091 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2093 if (qp_attr_mask & IB_QP_RQ_PSN) {
2094 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2095 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2097 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2098 qp->qplib_qp.modify_flags |=
2099 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2100 /* Cap the max_rd_atomic to device max */
2101 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2102 dev_attr->max_qp_rd_atom);
2104 if (qp_attr_mask & IB_QP_SQ_PSN) {
2105 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2106 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2108 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2109 if (qp_attr->max_dest_rd_atomic >
2110 dev_attr->max_qp_init_rd_atom) {
2111 ibdev_err(&rdev->ibdev,
2112 "max_dest_rd_atomic requested%d is > dev_max%d",
2113 qp_attr->max_dest_rd_atomic,
2114 dev_attr->max_qp_init_rd_atom);
2118 qp->qplib_qp.modify_flags |=
2119 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2120 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2122 if (qp_attr_mask & IB_QP_CAP) {
2123 struct bnxt_re_ucontext *uctx =
2124 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2126 qp->qplib_qp.modify_flags |=
2127 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2128 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2129 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2130 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2131 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2132 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2133 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2134 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2135 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2136 (qp_attr->cap.max_inline_data >=
2137 dev_attr->max_inline_data)) {
2138 ibdev_err(&rdev->ibdev,
2139 "Create QP failed - max exceeded");
2142 entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
2143 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2144 dev_attr->max_qp_wqes + 1);
2145 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2146 qp_attr->cap.max_send_wr;
2148 * Reserving one slot for Phantom WQE. Some application can
2149 * post one extra entry in this case. Allowing this to avoid
2150 * unexpected Queue full condition
2152 qp->qplib_qp.sq.q_full_delta -= 1;
2153 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2154 if (qp->qplib_qp.rq.max_wqe) {
2155 entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
2156 qp->qplib_qp.rq.max_wqe =
2157 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2158 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2159 qp_attr->cap.max_recv_wr;
2160 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2162 /* SRQ was used prior, just ignore the RQ caps */
2165 if (qp_attr_mask & IB_QP_DEST_QPN) {
2166 qp->qplib_qp.modify_flags |=
2167 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2168 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2170 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2172 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2175 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2176 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2180 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2181 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2183 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2184 struct bnxt_re_dev *rdev = qp->rdev;
2185 struct bnxt_qplib_qp *qplib_qp;
2188 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2192 qplib_qp->id = qp->qplib_qp.id;
2193 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2195 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2197 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2200 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2201 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2202 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2203 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2204 qp_attr->pkey_index = qplib_qp->pkey_index;
2205 qp_attr->qkey = qplib_qp->qkey;
2206 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2207 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2208 qplib_qp->ah.host_sgid_index,
2209 qplib_qp->ah.hop_limit,
2210 qplib_qp->ah.traffic_class);
2211 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2212 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2213 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2214 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2215 qp_attr->timeout = qplib_qp->timeout;
2216 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2217 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2218 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2219 qp_attr->rq_psn = qplib_qp->rq.psn;
2220 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2221 qp_attr->sq_psn = qplib_qp->sq.psn;
2222 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2223 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2225 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2227 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2228 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2229 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2230 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2231 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2232 qp_init_attr->cap = qp_attr->cap;
2239 /* Routine for sending QP1 packets for RoCE V1 an V2
2241 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2242 const struct ib_send_wr *wr,
2243 struct bnxt_qplib_swqe *wqe,
2246 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2248 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2249 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2250 struct bnxt_qplib_sge sge;
2254 bool is_eth = false;
2255 bool is_vlan = false;
2256 bool is_grh = false;
2257 bool is_udp = false;
2259 u16 vlan_id = 0xFFFF;
2263 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2265 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2269 /* Get network header type for this GID */
2270 nw_type = rdma_gid_attr_network_type(sgid_attr);
2272 case RDMA_NETWORK_IPV4:
2273 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2275 case RDMA_NETWORK_IPV6:
2276 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2279 nw_type = BNXT_RE_ROCE_V1_PACKET;
2282 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2283 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2285 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2287 ether_type = ETH_P_IP;
2290 ether_type = ETH_P_IPV6;
2294 ether_type = ETH_P_IBOE;
2299 is_vlan = vlan_id && (vlan_id < 0x1000);
2301 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2302 ip_version, is_udp, 0, &qp->qp1_hdr);
2305 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2306 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2308 /* For vlan, check the sgid for vlan existence */
2311 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2313 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2314 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2317 if (is_grh || (ip_version == 6)) {
2318 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2319 sizeof(sgid_attr->gid));
2320 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2321 sizeof(sgid_attr->gid));
2322 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2325 if (ip_version == 4) {
2326 qp->qp1_hdr.ip4.tos = 0;
2327 qp->qp1_hdr.ip4.id = 0;
2328 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2329 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2331 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2332 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2333 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2337 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2338 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2339 qp->qp1_hdr.udp.csum = 0;
2343 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2344 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2345 qp->qp1_hdr.immediate_present = 1;
2347 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2349 if (wr->send_flags & IB_SEND_SOLICITED)
2350 qp->qp1_hdr.bth.solicited_event = 1;
2352 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2354 /* P_key for QP1 is for all members */
2355 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2356 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2357 qp->qp1_hdr.bth.ack_req = 0;
2359 qp->send_psn &= BTH_PSN_MASK;
2360 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2362 /* Use the priviledged Q_Key for QP1 */
2363 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2364 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2366 /* Pack the QP1 to the transmit buffer */
2367 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2369 ib_ud_header_pack(&qp->qp1_hdr, buf);
2370 for (i = wqe->num_sge; i; i--) {
2371 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2372 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2373 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2377 * Max Header buf size for IPV6 RoCE V2 is 86,
2378 * which is same as the QP1 SQ header buffer.
2379 * Header buf size for IPV4 RoCE V2 can be 66.
2380 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2381 * Subtract 20 bytes from QP1 SQ header buf size
2383 if (is_udp && ip_version == 4)
2386 * Max Header buf size for RoCE V1 is 78.
2387 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2388 * Subtract 8 bytes from QP1 SQ header buf size
2393 /* Subtract 4 bytes for non vlan packets */
2397 wqe->sg_list[0].addr = sge.addr;
2398 wqe->sg_list[0].lkey = sge.lkey;
2399 wqe->sg_list[0].size = sge.size;
2403 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2409 /* For the MAD layer, it only provides the recv SGE the size of
2410 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2411 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2412 * receive packet (334 bytes) with no VLAN and then copy the GRH
2413 * and the MAD datagram out to the provided SGE.
2415 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2416 const struct ib_recv_wr *wr,
2417 struct bnxt_qplib_swqe *wqe,
2420 struct bnxt_re_sqp_entries *sqp_entry;
2421 struct bnxt_qplib_sge ref, sge;
2422 struct bnxt_re_dev *rdev;
2427 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2429 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2432 /* Create 1 SGE to receive the entire
2435 /* Save the reference from ULP */
2436 ref.addr = wqe->sg_list[0].addr;
2437 ref.lkey = wqe->sg_list[0].lkey;
2438 ref.size = wqe->sg_list[0].size;
2440 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2443 wqe->sg_list[0].addr = sge.addr;
2444 wqe->sg_list[0].lkey = sge.lkey;
2445 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2446 sge.size -= wqe->sg_list[0].size;
2448 sqp_entry->sge.addr = ref.addr;
2449 sqp_entry->sge.lkey = ref.lkey;
2450 sqp_entry->sge.size = ref.size;
2451 /* Store the wrid for reporting completion */
2452 sqp_entry->wrid = wqe->wr_id;
2453 /* change the wqe->wrid to table index */
2454 wqe->wr_id = rq_prod_index;
2458 static int is_ud_qp(struct bnxt_re_qp *qp)
2460 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2461 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2464 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2465 const struct ib_send_wr *wr,
2466 struct bnxt_qplib_swqe *wqe)
2468 struct bnxt_re_ah *ah = NULL;
2471 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2472 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2473 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2474 wqe->send.avid = ah->qplib_ah.id;
2476 switch (wr->opcode) {
2478 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2480 case IB_WR_SEND_WITH_IMM:
2481 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2482 wqe->send.imm_data = wr->ex.imm_data;
2484 case IB_WR_SEND_WITH_INV:
2485 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2486 wqe->send.inv_key = wr->ex.invalidate_rkey;
2491 if (wr->send_flags & IB_SEND_SIGNALED)
2492 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2493 if (wr->send_flags & IB_SEND_FENCE)
2494 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2495 if (wr->send_flags & IB_SEND_SOLICITED)
2496 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2497 if (wr->send_flags & IB_SEND_INLINE)
2498 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2503 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2504 struct bnxt_qplib_swqe *wqe)
2506 switch (wr->opcode) {
2507 case IB_WR_RDMA_WRITE:
2508 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2510 case IB_WR_RDMA_WRITE_WITH_IMM:
2511 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2512 wqe->rdma.imm_data = wr->ex.imm_data;
2514 case IB_WR_RDMA_READ:
2515 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2516 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2521 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2522 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2523 if (wr->send_flags & IB_SEND_SIGNALED)
2524 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2525 if (wr->send_flags & IB_SEND_FENCE)
2526 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2527 if (wr->send_flags & IB_SEND_SOLICITED)
2528 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2529 if (wr->send_flags & IB_SEND_INLINE)
2530 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2535 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2536 struct bnxt_qplib_swqe *wqe)
2538 switch (wr->opcode) {
2539 case IB_WR_ATOMIC_CMP_AND_SWP:
2540 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2541 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2542 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2544 case IB_WR_ATOMIC_FETCH_AND_ADD:
2545 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2546 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2551 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2552 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2553 if (wr->send_flags & IB_SEND_SIGNALED)
2554 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2555 if (wr->send_flags & IB_SEND_FENCE)
2556 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2557 if (wr->send_flags & IB_SEND_SOLICITED)
2558 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2562 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2563 struct bnxt_qplib_swqe *wqe)
2565 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2566 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2568 if (wr->send_flags & IB_SEND_SIGNALED)
2569 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2570 if (wr->send_flags & IB_SEND_SOLICITED)
2571 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2576 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2577 struct bnxt_qplib_swqe *wqe)
2579 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2580 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2581 int access = wr->access;
2583 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2584 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2585 wqe->frmr.page_list = mr->pages;
2586 wqe->frmr.page_list_len = mr->npages;
2587 wqe->frmr.levels = qplib_frpl->hwq.level;
2588 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2590 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2591 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2593 if (access & IB_ACCESS_LOCAL_WRITE)
2594 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2595 if (access & IB_ACCESS_REMOTE_READ)
2596 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2597 if (access & IB_ACCESS_REMOTE_WRITE)
2598 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2599 if (access & IB_ACCESS_REMOTE_ATOMIC)
2600 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2601 if (access & IB_ACCESS_MW_BIND)
2602 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2604 wqe->frmr.l_key = wr->key;
2605 wqe->frmr.length = wr->mr->length;
2606 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2607 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2608 wqe->frmr.va = wr->mr->iova;
2612 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2613 const struct ib_send_wr *wr,
2614 struct bnxt_qplib_swqe *wqe)
2616 /* Copy the inline data to the data field */
2621 in_data = wqe->inline_data;
2622 for (i = 0; i < wr->num_sge; i++) {
2623 sge_addr = (void *)(unsigned long)
2624 wr->sg_list[i].addr;
2625 sge_len = wr->sg_list[i].length;
2627 if ((sge_len + wqe->inline_len) >
2628 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2629 ibdev_err(&rdev->ibdev,
2630 "Inline data size requested > supported value");
2633 sge_len = wr->sg_list[i].length;
2635 memcpy(in_data, sge_addr, sge_len);
2636 in_data += wr->sg_list[i].length;
2637 wqe->inline_len += wr->sg_list[i].length;
2639 return wqe->inline_len;
2642 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2643 const struct ib_send_wr *wr,
2644 struct bnxt_qplib_swqe *wqe)
2648 if (wr->send_flags & IB_SEND_INLINE)
2649 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2651 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2657 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2659 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2660 qp->ib_qp.qp_type == IB_QPT_GSI ||
2661 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2662 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2664 struct ib_qp_attr qp_attr;
2666 qp_attr_mask = IB_QP_STATE;
2667 qp_attr.qp_state = IB_QPS_RTS;
2668 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2669 qp->qplib_qp.wqe_cnt = 0;
2673 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2674 struct bnxt_re_qp *qp,
2675 const struct ib_send_wr *wr)
2677 int rc = 0, payload_sz = 0;
2678 unsigned long flags;
2680 spin_lock_irqsave(&qp->sq_lock, flags);
2682 struct bnxt_qplib_swqe wqe = {};
2685 wqe.num_sge = wr->num_sge;
2686 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2687 ibdev_err(&rdev->ibdev,
2688 "Limit exceeded for Send SGEs");
2693 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2694 if (payload_sz < 0) {
2698 wqe.wr_id = wr->wr_id;
2700 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2702 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2704 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2707 ibdev_err(&rdev->ibdev,
2708 "Post send failed opcode = %#x rc = %d",
2714 bnxt_qplib_post_send_db(&qp->qplib_qp);
2715 bnxt_ud_qp_hw_stall_workaround(qp);
2716 spin_unlock_irqrestore(&qp->sq_lock, flags);
2720 static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
2722 /* Need unconditional fence for non-wire memory opcode
2723 * to work as expected.
2725 if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
2726 wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
2727 wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
2728 wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
2729 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2732 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2733 const struct ib_send_wr **bad_wr)
2735 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2736 struct bnxt_qplib_swqe wqe;
2737 int rc = 0, payload_sz = 0;
2738 unsigned long flags;
2740 spin_lock_irqsave(&qp->sq_lock, flags);
2743 memset(&wqe, 0, sizeof(wqe));
2746 wqe.num_sge = wr->num_sge;
2747 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2748 ibdev_err(&qp->rdev->ibdev,
2749 "Limit exceeded for Send SGEs");
2754 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2755 if (payload_sz < 0) {
2759 wqe.wr_id = wr->wr_id;
2761 switch (wr->opcode) {
2763 case IB_WR_SEND_WITH_IMM:
2764 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2765 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2769 wqe.rawqp1.lflags |=
2770 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2772 switch (wr->send_flags) {
2773 case IB_SEND_IP_CSUM:
2774 wqe.rawqp1.lflags |=
2775 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2781 case IB_WR_SEND_WITH_INV:
2782 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2784 case IB_WR_RDMA_WRITE:
2785 case IB_WR_RDMA_WRITE_WITH_IMM:
2786 case IB_WR_RDMA_READ:
2787 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2789 case IB_WR_ATOMIC_CMP_AND_SWP:
2790 case IB_WR_ATOMIC_FETCH_AND_ADD:
2791 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2793 case IB_WR_RDMA_READ_WITH_INV:
2794 ibdev_err(&qp->rdev->ibdev,
2795 "RDMA Read with Invalidate is not supported");
2798 case IB_WR_LOCAL_INV:
2799 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2802 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2805 /* Unsupported WRs */
2806 ibdev_err(&qp->rdev->ibdev,
2807 "WR (%#x) is not supported", wr->opcode);
2812 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2813 bnxt_re_legacy_set_uc_fence(&wqe);
2814 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2818 ibdev_err(&qp->rdev->ibdev,
2819 "post_send failed op:%#x qps = %#x rc = %d\n",
2820 wr->opcode, qp->qplib_qp.state, rc);
2826 bnxt_qplib_post_send_db(&qp->qplib_qp);
2827 bnxt_ud_qp_hw_stall_workaround(qp);
2828 spin_unlock_irqrestore(&qp->sq_lock, flags);
2833 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2834 struct bnxt_re_qp *qp,
2835 const struct ib_recv_wr *wr)
2837 struct bnxt_qplib_swqe wqe;
2842 memset(&wqe, 0, sizeof(wqe));
2845 wqe.num_sge = wr->num_sge;
2846 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2847 ibdev_err(&rdev->ibdev,
2848 "Limit exceeded for Receive SGEs");
2852 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2853 wqe.wr_id = wr->wr_id;
2854 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2856 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2863 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2867 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2868 const struct ib_recv_wr **bad_wr)
2870 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2871 struct bnxt_qplib_swqe wqe;
2872 int rc = 0, payload_sz = 0;
2873 unsigned long flags;
2876 spin_lock_irqsave(&qp->rq_lock, flags);
2879 memset(&wqe, 0, sizeof(wqe));
2882 wqe.num_sge = wr->num_sge;
2883 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2884 ibdev_err(&qp->rdev->ibdev,
2885 "Limit exceeded for Receive SGEs");
2891 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2893 wqe.wr_id = wr->wr_id;
2894 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2896 if (ib_qp->qp_type == IB_QPT_GSI &&
2897 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2898 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2901 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2907 /* Ring DB if the RQEs posted reaches a threshold value */
2908 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2909 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2917 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2919 spin_unlock_irqrestore(&qp->rq_lock, flags);
2924 /* Completion Queues */
2925 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2927 struct bnxt_qplib_chip_ctx *cctx;
2928 struct bnxt_qplib_nq *nq;
2929 struct bnxt_re_dev *rdev;
2930 struct bnxt_re_cq *cq;
2932 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2934 nq = cq->qplib_cq.nq;
2935 cctx = rdev->chip_ctx;
2937 if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
2938 free_page((unsigned long)cq->uctx_cq_page);
2939 hash_del(&cq->hash_entry);
2941 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2942 ib_umem_release(cq->umem);
2944 atomic_dec(&rdev->stats.res.cq_count);
2950 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2951 struct ib_udata *udata)
2953 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2954 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2955 struct bnxt_re_ucontext *uctx =
2956 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2957 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2958 struct bnxt_qplib_chip_ctx *cctx;
2959 struct bnxt_qplib_nq *nq = NULL;
2960 unsigned int nq_alloc_cnt;
2961 int cqe = attr->cqe;
2968 /* Validate CQ fields */
2969 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2970 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2975 cctx = rdev->chip_ctx;
2976 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2978 entries = bnxt_re_init_depth(cqe + 1, uctx);
2979 if (entries > dev_attr->max_cq_wqes + 1)
2980 entries = dev_attr->max_cq_wqes + 1;
2982 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2983 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2985 struct bnxt_re_cq_req req;
2986 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2991 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2992 entries * sizeof(struct cq_base),
2993 IB_ACCESS_LOCAL_WRITE);
2994 if (IS_ERR(cq->umem)) {
2995 rc = PTR_ERR(cq->umem);
2998 cq->qplib_cq.sg_info.umem = cq->umem;
2999 cq->qplib_cq.dpi = &uctx->dpi;
3001 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
3002 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3009 cq->qplib_cq.dpi = &rdev->dpi_privileged;
3012 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
3013 * used for getting the NQ index.
3015 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
3016 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
3017 cq->qplib_cq.max_wqe = entries;
3018 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
3019 cq->qplib_cq.nq = nq;
3021 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
3023 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
3027 cq->ib_cq.cqe = entries;
3028 cq->cq_period = cq->qplib_cq.period;
3031 active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
3032 if (active_cqs > rdev->stats.res.cq_watermark)
3033 rdev->stats.res.cq_watermark = active_cqs;
3034 spin_lock_init(&cq->cq_lock);
3037 struct bnxt_re_cq_resp resp = {};
3039 if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3040 hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
3041 /* Allocate a page */
3042 cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
3043 if (!cq->uctx_cq_page) {
3047 resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
3049 resp.cqid = cq->qplib_cq.id;
3050 resp.tail = cq->qplib_cq.hwq.cons;
3051 resp.phase = cq->qplib_cq.period;
3053 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
3055 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3056 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3064 free_page((unsigned long)cq->uctx_cq_page);
3066 ib_umem_release(cq->umem);
3072 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3074 struct bnxt_re_dev *rdev = cq->rdev;
3076 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3078 cq->qplib_cq.max_wqe = cq->resize_cqe;
3079 if (cq->resize_umem) {
3080 ib_umem_release(cq->umem);
3081 cq->umem = cq->resize_umem;
3082 cq->resize_umem = NULL;
3087 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3089 struct bnxt_qplib_sg_info sg_info = {};
3090 struct bnxt_qplib_dpi *orig_dpi = NULL;
3091 struct bnxt_qplib_dev_attr *dev_attr;
3092 struct bnxt_re_ucontext *uctx = NULL;
3093 struct bnxt_re_resize_cq_req req;
3094 struct bnxt_re_dev *rdev;
3095 struct bnxt_re_cq *cq;
3098 cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3100 dev_attr = &rdev->dev_attr;
3101 if (!ibcq->uobject) {
3102 ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3106 if (cq->resize_umem) {
3107 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3112 /* Check the requested cq depth out of supported depth */
3113 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3114 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3115 cq->qplib_cq.id, cqe);
3119 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3120 entries = bnxt_re_init_depth(cqe + 1, uctx);
3121 if (entries > dev_attr->max_cq_wqes + 1)
3122 entries = dev_attr->max_cq_wqes + 1;
3124 /* uverbs consumer */
3125 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3130 cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3131 entries * sizeof(struct cq_base),
3132 IB_ACCESS_LOCAL_WRITE);
3133 if (IS_ERR(cq->resize_umem)) {
3134 rc = PTR_ERR(cq->resize_umem);
3135 cq->resize_umem = NULL;
3136 ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
3140 cq->resize_cqe = entries;
3141 memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3142 orig_dpi = cq->qplib_cq.dpi;
3144 cq->qplib_cq.sg_info.umem = cq->resize_umem;
3145 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3146 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3147 cq->qplib_cq.dpi = &uctx->dpi;
3149 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3151 ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3156 cq->ib_cq.cqe = cq->resize_cqe;
3157 atomic_inc(&rdev->stats.res.resize_count);
3162 if (cq->resize_umem) {
3163 ib_umem_release(cq->resize_umem);
3164 cq->resize_umem = NULL;
3166 memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3167 cq->qplib_cq.dpi = orig_dpi;
3172 static u8 __req_to_ib_wc_status(u8 qstatus)
3175 case CQ_REQ_STATUS_OK:
3176 return IB_WC_SUCCESS;
3177 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3178 return IB_WC_BAD_RESP_ERR;
3179 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3180 return IB_WC_LOC_LEN_ERR;
3181 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3182 return IB_WC_LOC_QP_OP_ERR;
3183 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3184 return IB_WC_LOC_PROT_ERR;
3185 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3186 return IB_WC_GENERAL_ERR;
3187 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3188 return IB_WC_REM_INV_REQ_ERR;
3189 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3190 return IB_WC_REM_ACCESS_ERR;
3191 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3192 return IB_WC_REM_OP_ERR;
3193 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3194 return IB_WC_RNR_RETRY_EXC_ERR;
3195 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3196 return IB_WC_RETRY_EXC_ERR;
3197 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3198 return IB_WC_WR_FLUSH_ERR;
3200 return IB_WC_GENERAL_ERR;
3205 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3208 case CQ_RES_RAWETH_QP1_STATUS_OK:
3209 return IB_WC_SUCCESS;
3210 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3211 return IB_WC_LOC_ACCESS_ERR;
3212 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3213 return IB_WC_LOC_LEN_ERR;
3214 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3215 return IB_WC_LOC_PROT_ERR;
3216 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3217 return IB_WC_LOC_QP_OP_ERR;
3218 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3219 return IB_WC_GENERAL_ERR;
3220 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3221 return IB_WC_WR_FLUSH_ERR;
3222 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3223 return IB_WC_WR_FLUSH_ERR;
3225 return IB_WC_GENERAL_ERR;
3229 static u8 __rc_to_ib_wc_status(u8 qstatus)
3232 case CQ_RES_RC_STATUS_OK:
3233 return IB_WC_SUCCESS;
3234 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3235 return IB_WC_LOC_ACCESS_ERR;
3236 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3237 return IB_WC_LOC_LEN_ERR;
3238 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3239 return IB_WC_LOC_PROT_ERR;
3240 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3241 return IB_WC_LOC_QP_OP_ERR;
3242 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3243 return IB_WC_GENERAL_ERR;
3244 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3245 return IB_WC_REM_INV_REQ_ERR;
3246 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3247 return IB_WC_WR_FLUSH_ERR;
3248 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3249 return IB_WC_WR_FLUSH_ERR;
3251 return IB_WC_GENERAL_ERR;
3255 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3257 switch (cqe->type) {
3258 case BNXT_QPLIB_SWQE_TYPE_SEND:
3259 wc->opcode = IB_WC_SEND;
3261 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3262 wc->opcode = IB_WC_SEND;
3263 wc->wc_flags |= IB_WC_WITH_IMM;
3265 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3266 wc->opcode = IB_WC_SEND;
3267 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3269 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3270 wc->opcode = IB_WC_RDMA_WRITE;
3272 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3273 wc->opcode = IB_WC_RDMA_WRITE;
3274 wc->wc_flags |= IB_WC_WITH_IMM;
3276 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3277 wc->opcode = IB_WC_RDMA_READ;
3279 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3280 wc->opcode = IB_WC_COMP_SWAP;
3282 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3283 wc->opcode = IB_WC_FETCH_ADD;
3285 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3286 wc->opcode = IB_WC_LOCAL_INV;
3288 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3289 wc->opcode = IB_WC_REG_MR;
3292 wc->opcode = IB_WC_SEND;
3296 wc->status = __req_to_ib_wc_status(cqe->status);
3299 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3300 u16 raweth_qp1_flags2)
3302 bool is_ipv6 = false, is_ipv4 = false;
3304 /* raweth_qp1_flags Bit 9-6 indicates itype */
3305 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3306 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3309 if (raweth_qp1_flags2 &
3310 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3312 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3313 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3314 (raweth_qp1_flags2 &
3315 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3316 (is_ipv6 = true) : (is_ipv4 = true);
3318 BNXT_RE_ROCEV2_IPV6_PACKET :
3319 BNXT_RE_ROCEV2_IPV4_PACKET);
3321 return BNXT_RE_ROCE_V1_PACKET;
3325 static int bnxt_re_to_ib_nw_type(int nw_type)
3327 u8 nw_hdr_type = 0xFF;
3330 case BNXT_RE_ROCE_V1_PACKET:
3331 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3333 case BNXT_RE_ROCEV2_IPV4_PACKET:
3334 nw_hdr_type = RDMA_NETWORK_IPV4;
3336 case BNXT_RE_ROCEV2_IPV6_PACKET:
3337 nw_hdr_type = RDMA_NETWORK_IPV6;
3343 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3347 struct ethhdr *eth_hdr;
3351 tmp_buf = (u8 *)rq_hdr_buf;
3353 * If dest mac is not same as I/F mac, this could be a
3354 * loopback address or multicast address, check whether
3355 * it is a loopback packet
3357 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3359 /* Check the ether type */
3360 eth_hdr = (struct ethhdr *)tmp_buf;
3361 eth_type = ntohs(eth_hdr->h_proto);
3369 struct udphdr *udp_hdr;
3371 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3372 sizeof(struct ipv6hdr));
3373 tmp_buf += sizeof(struct ethhdr) + len;
3374 udp_hdr = (struct udphdr *)tmp_buf;
3375 if (ntohs(udp_hdr->dest) ==
3388 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3389 struct bnxt_qplib_cqe *cqe)
3391 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3392 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3393 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3394 dma_addr_t shrq_hdr_buf_map;
3395 struct ib_sge s_sge[2] = {};
3396 struct ib_sge r_sge[2] = {};
3397 struct bnxt_re_ah *gsi_sah;
3398 struct ib_recv_wr rwr = {};
3399 dma_addr_t rq_hdr_buf_map;
3400 struct ib_ud_wr udwr = {};
3401 struct ib_send_wr *swr;
3410 tbl_idx = cqe->wr_id;
3412 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3413 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3414 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3417 /* Shadow QP header buffer */
3418 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3420 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3422 /* Store this cqe */
3423 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3424 sqp_entry->qp1_qp = gsi_qp;
3426 /* Find packet type from the cqe */
3428 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3429 cqe->raweth_qp1_flags2);
3431 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3435 /* Adjust the offset for the user buffer and post in the rq */
3437 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3441 * QP1 loopback packet has 4 bytes of internal header before
3442 * ether header. Skip these four bytes.
3444 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3447 /* First send SGE . Skip the ether header*/
3448 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3450 s_sge[0].lkey = 0xFFFFFFFF;
3451 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3452 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3454 /* Second Send SGE */
3455 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3456 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3457 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3459 s_sge[1].lkey = 0xFFFFFFFF;
3460 s_sge[1].length = 256;
3462 /* First recv SGE */
3464 r_sge[0].addr = shrq_hdr_buf_map;
3465 r_sge[0].lkey = 0xFFFFFFFF;
3466 r_sge[0].length = 40;
3468 r_sge[1].addr = sqp_entry->sge.addr + offset;
3469 r_sge[1].lkey = sqp_entry->sge.lkey;
3470 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3472 /* Create receive work request */
3474 rwr.sg_list = r_sge;
3475 rwr.wr_id = tbl_idx;
3478 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3480 ibdev_err(&rdev->ibdev,
3481 "Failed to post Rx buffers to shadow QP");
3486 swr->sg_list = s_sge;
3487 swr->wr_id = tbl_idx;
3488 swr->opcode = IB_WR_SEND;
3490 gsi_sah = rdev->gsi_ctx.gsi_sah;
3491 udwr.ah = &gsi_sah->ib_ah;
3492 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3493 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3495 /* post data received in the send queue */
3496 return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3499 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3500 struct bnxt_qplib_cqe *cqe)
3502 wc->opcode = IB_WC_RECV;
3503 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3504 wc->wc_flags |= IB_WC_GRH;
3507 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3511 * Check if the vlan is configured in the host. If not configured, it
3512 * can be a transparent VLAN. So dont report the vlan id.
3514 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3515 htons(ETH_P_8021Q), vlan_id))
3520 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3527 metadata = orig_cqe->raweth_qp1_metadata;
3528 if (orig_cqe->raweth_qp1_flags2 &
3529 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3531 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3532 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3533 if (tpid == ETH_P_8021Q) {
3535 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3537 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3538 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3546 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3547 struct bnxt_qplib_cqe *cqe)
3549 wc->opcode = IB_WC_RECV;
3550 wc->status = __rc_to_ib_wc_status(cqe->status);
3552 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3553 wc->wc_flags |= IB_WC_WITH_IMM;
3554 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3555 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3556 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3557 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3558 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3561 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3563 struct bnxt_qplib_cqe *cqe)
3565 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3566 struct bnxt_re_qp *gsi_qp = NULL;
3567 struct bnxt_qplib_cqe *orig_cqe = NULL;
3568 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3574 tbl_idx = cqe->wr_id;
3576 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3577 gsi_qp = sqp_entry->qp1_qp;
3578 orig_cqe = &sqp_entry->cqe;
3580 wc->wr_id = sqp_entry->wrid;
3581 wc->byte_len = orig_cqe->length;
3582 wc->qp = &gsi_qp->ib_qp;
3584 wc->ex.imm_data = orig_cqe->immdata;
3585 wc->src_qp = orig_cqe->src_qp;
3586 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3587 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3588 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3589 wc->vlan_id = vlan_id;
3591 wc->wc_flags |= IB_WC_WITH_VLAN;
3595 wc->vendor_err = orig_cqe->status;
3597 wc->opcode = IB_WC_RECV;
3598 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3599 wc->wc_flags |= IB_WC_GRH;
3601 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3602 orig_cqe->raweth_qp1_flags2);
3604 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3605 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3609 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3611 struct bnxt_qplib_cqe *cqe)
3613 struct bnxt_re_dev *rdev;
3618 wc->opcode = IB_WC_RECV;
3619 wc->status = __rc_to_ib_wc_status(cqe->status);
3621 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3622 wc->wc_flags |= IB_WC_WITH_IMM;
3623 /* report only on GSI QP for Thor */
3624 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3625 wc->wc_flags |= IB_WC_GRH;
3626 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3627 wc->wc_flags |= IB_WC_WITH_SMAC;
3628 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3629 vlan_id = (cqe->cfa_meta & 0xFFF);
3631 /* Mark only if vlan_id is non zero */
3632 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3633 wc->vlan_id = vlan_id;
3634 wc->wc_flags |= IB_WC_WITH_VLAN;
3636 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3637 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3638 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3639 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3644 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3646 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3647 unsigned long flags;
3650 spin_lock_irqsave(&qp->sq_lock, flags);
3652 rc = bnxt_re_bind_fence_mw(lib_qp);
3654 lib_qp->sq.phantom_wqe_cnt++;
3655 ibdev_dbg(&qp->rdev->ibdev,
3656 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3657 lib_qp->id, lib_qp->sq.hwq.prod,
3658 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3659 lib_qp->sq.phantom_wqe_cnt);
3662 spin_unlock_irqrestore(&qp->sq_lock, flags);
3666 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3668 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3669 struct bnxt_re_qp *qp, *sh_qp;
3670 struct bnxt_qplib_cqe *cqe;
3671 int i, ncqe, budget;
3672 struct bnxt_qplib_q *sq;
3673 struct bnxt_qplib_qp *lib_qp;
3675 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3676 unsigned long flags;
3678 /* User CQ; the only processing we do is to
3679 * complete any pending CQ resize operation.
3682 if (cq->resize_umem)
3683 bnxt_re_resize_cq_complete(cq);
3687 spin_lock_irqsave(&cq->cq_lock, flags);
3688 budget = min_t(u32, num_entries, cq->max_cql);
3689 num_entries = budget;
3691 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3697 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3700 if (sq->send_phantom) {
3701 qp = container_of(lib_qp,
3702 struct bnxt_re_qp, qplib_qp);
3703 if (send_phantom_wqe(qp) == -ENOMEM)
3704 ibdev_err(&cq->rdev->ibdev,
3705 "Phantom failed! Scheduled to send again\n");
3707 sq->send_phantom = false;
3711 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3718 for (i = 0; i < ncqe; i++, cqe++) {
3719 /* Transcribe each qplib_wqe back to ib_wc */
3720 memset(wc, 0, sizeof(*wc));
3722 wc->wr_id = cqe->wr_id;
3723 wc->byte_len = cqe->length;
3725 ((struct bnxt_qplib_qp *)
3726 (unsigned long)(cqe->qp_handle),
3727 struct bnxt_re_qp, qplib_qp);
3728 wc->qp = &qp->ib_qp;
3729 wc->ex.imm_data = cqe->immdata;
3730 wc->src_qp = cqe->src_qp;
3731 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3733 wc->vendor_err = cqe->status;
3735 switch (cqe->opcode) {
3736 case CQ_BASE_CQE_TYPE_REQ:
3737 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3739 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3740 /* Handle this completion with
3741 * the stored completion
3743 memset(wc, 0, sizeof(*wc));
3746 bnxt_re_process_req_wc(wc, cqe);
3748 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3752 rc = bnxt_re_process_raw_qp_pkt_rx
3755 memset(wc, 0, sizeof(*wc));
3760 /* Errors need not be looped back.
3761 * But change the wr_id to the one
3762 * stored in the table
3764 tbl_idx = cqe->wr_id;
3765 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3766 wc->wr_id = sqp_entry->wrid;
3767 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3769 case CQ_BASE_CQE_TYPE_RES_RC:
3770 bnxt_re_process_res_rc_wc(wc, cqe);
3772 case CQ_BASE_CQE_TYPE_RES_UD:
3773 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3775 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3776 /* Handle this completion with
3777 * the stored completion
3782 bnxt_re_process_res_shadow_qp_wc
3787 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3790 ibdev_err(&cq->rdev->ibdev,
3791 "POLL CQ : type 0x%x not handled",
3800 spin_unlock_irqrestore(&cq->cq_lock, flags);
3801 return num_entries - budget;
3804 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3805 enum ib_cq_notify_flags ib_cqn_flags)
3807 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3808 int type = 0, rc = 0;
3809 unsigned long flags;
3811 spin_lock_irqsave(&cq->cq_lock, flags);
3812 /* Trigger on the very next completion */
3813 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3814 type = DBC_DBC_TYPE_CQ_ARMALL;
3815 /* Trigger on the next solicited completion */
3816 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3817 type = DBC_DBC_TYPE_CQ_ARMSE;
3819 /* Poll to see if there are missed events */
3820 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3821 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3825 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3828 spin_unlock_irqrestore(&cq->cq_lock, flags);
3832 /* Memory Regions */
3833 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3835 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3836 struct bnxt_re_dev *rdev = pd->rdev;
3837 struct bnxt_re_mr *mr;
3841 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3843 return ERR_PTR(-ENOMEM);
3846 mr->qplib_mr.pd = &pd->qplib_pd;
3847 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3848 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3850 /* Allocate and register 0 as the address */
3851 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3855 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3856 mr->qplib_mr.total_size = -1; /* Infinte length */
3857 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3862 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3863 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3864 IB_ACCESS_REMOTE_ATOMIC))
3865 mr->ib_mr.rkey = mr->ib_mr.lkey;
3866 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3867 if (active_mrs > rdev->stats.res.mr_watermark)
3868 rdev->stats.res.mr_watermark = active_mrs;
3873 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3879 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3881 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3882 struct bnxt_re_dev *rdev = mr->rdev;
3885 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3887 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3892 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3898 ib_umem_release(mr->ib_umem);
3901 atomic_dec(&rdev->stats.res.mr_count);
3905 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3907 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3909 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3912 mr->pages[mr->npages++] = addr;
3916 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3917 unsigned int *sg_offset)
3919 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3922 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3925 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3928 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3929 struct bnxt_re_dev *rdev = pd->rdev;
3930 struct bnxt_re_mr *mr = NULL;
3934 if (type != IB_MR_TYPE_MEM_REG) {
3935 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3936 return ERR_PTR(-EINVAL);
3938 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3939 return ERR_PTR(-EINVAL);
3941 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3943 return ERR_PTR(-ENOMEM);
3946 mr->qplib_mr.pd = &pd->qplib_pd;
3947 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3948 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3950 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3954 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3955 mr->ib_mr.rkey = mr->ib_mr.lkey;
3957 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3962 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3963 &mr->qplib_frpl, max_num_sg);
3965 ibdev_err(&rdev->ibdev,
3966 "Failed to allocate HW FR page list");
3970 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3971 if (active_mrs > rdev->stats.res.mr_watermark)
3972 rdev->stats.res.mr_watermark = active_mrs;
3978 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3984 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3985 struct ib_udata *udata)
3987 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3988 struct bnxt_re_dev *rdev = pd->rdev;
3989 struct bnxt_re_mw *mw;
3993 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3995 return ERR_PTR(-ENOMEM);
3997 mw->qplib_mw.pd = &pd->qplib_pd;
3999 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4000 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4001 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4002 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4004 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
4007 mw->ib_mw.rkey = mw->qplib_mw.rkey;
4009 active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
4010 if (active_mws > rdev->stats.res.mw_watermark)
4011 rdev->stats.res.mw_watermark = active_mws;
4019 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4021 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
4022 struct bnxt_re_dev *rdev = mw->rdev;
4025 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4027 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
4032 atomic_dec(&rdev->stats.res.mw_count);
4036 static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
4037 int mr_access_flags, struct ib_umem *umem)
4039 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4040 struct bnxt_re_dev *rdev = pd->rdev;
4041 unsigned long page_size;
4042 struct bnxt_re_mr *mr;
4046 if (length > BNXT_RE_MAX_MR_SIZE) {
4047 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4048 length, BNXT_RE_MAX_MR_SIZE);
4049 return ERR_PTR(-ENOMEM);
4052 page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4054 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4055 return ERR_PTR(-EINVAL);
4058 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4060 return ERR_PTR(-ENOMEM);
4063 mr->qplib_mr.pd = &pd->qplib_pd;
4064 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
4065 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4067 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4069 ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4073 /* The fixed portion of the rkey is the same as the lkey */
4074 mr->ib_mr.rkey = mr->qplib_mr.rkey;
4076 mr->qplib_mr.va = virt_addr;
4077 mr->qplib_mr.total_size = length;
4079 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4080 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4081 umem_pgs, page_size);
4083 ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4088 mr->ib_mr.lkey = mr->qplib_mr.lkey;
4089 mr->ib_mr.rkey = mr->qplib_mr.lkey;
4090 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4091 if (active_mrs > rdev->stats.res.mr_watermark)
4092 rdev->stats.res.mr_watermark = active_mrs;
4097 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4103 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4104 u64 virt_addr, int mr_access_flags,
4105 struct ib_udata *udata)
4107 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4108 struct bnxt_re_dev *rdev = pd->rdev;
4109 struct ib_umem *umem;
4110 struct ib_mr *ib_mr;
4112 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4114 return ERR_CAST(umem);
4116 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4118 ib_umem_release(umem);
4122 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4123 u64 length, u64 virt_addr, int fd,
4124 int mr_access_flags, struct ib_udata *udata)
4126 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4127 struct bnxt_re_dev *rdev = pd->rdev;
4128 struct ib_umem_dmabuf *umem_dmabuf;
4129 struct ib_umem *umem;
4130 struct ib_mr *ib_mr;
4132 umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4133 fd, mr_access_flags);
4134 if (IS_ERR(umem_dmabuf))
4135 return ERR_CAST(umem_dmabuf);
4137 umem = &umem_dmabuf->umem;
4139 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4141 ib_umem_release(umem);
4145 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4147 struct ib_device *ibdev = ctx->device;
4148 struct bnxt_re_ucontext *uctx =
4149 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4150 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4151 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
4152 struct bnxt_re_user_mmap_entry *entry;
4153 struct bnxt_re_uctx_resp resp = {};
4154 struct bnxt_re_uctx_req ureq = {};
4155 u32 chip_met_rev_num = 0;
4158 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4160 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4161 ibdev_dbg(ibdev, " is different from the device %d ",
4162 BNXT_RE_ABI_VERSION);
4168 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4173 spin_lock_init(&uctx->sh_lock);
4175 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4176 chip_met_rev_num = rdev->chip_ctx->chip_num;
4177 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4178 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4179 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4180 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4181 resp.chip_id0 = chip_met_rev_num;
4182 /*Temp, Use xa_alloc instead */
4183 resp.dev_id = rdev->en_dev->pdev->devfn;
4184 resp.max_qp = rdev->qplib_ctx.qpc_count;
4185 resp.pg_size = PAGE_SIZE;
4186 resp.cqe_sz = sizeof(struct cq_base);
4187 resp.max_cqd = dev_attr->max_cq_wqes;
4189 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4190 resp.mode = rdev->chip_ctx->modes.wqe_mode;
4192 if (rdev->chip_ctx->modes.db_push)
4193 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4195 entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4200 uctx->shpage_mmap = &entry->rdma_entry;
4201 if (rdev->pacing.dbr_pacing)
4202 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4204 if (udata->inlen >= sizeof(ureq)) {
4205 rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
4208 if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
4209 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4210 uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4214 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4216 ibdev_err(ibdev, "Failed to copy user context");
4223 free_page((unsigned long)uctx->shpg);
4229 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4231 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4232 struct bnxt_re_ucontext,
4235 struct bnxt_re_dev *rdev = uctx->rdev;
4237 rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4238 uctx->shpage_mmap = NULL;
4240 free_page((unsigned long)uctx->shpg);
4242 if (uctx->dpi.dbr) {
4243 /* Free DPI only if this is the first PD allocated by the
4244 * application and mark the context dpi as NULL
4246 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4247 uctx->dpi.dbr = NULL;
4251 static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
4253 struct bnxt_re_cq *cq = NULL, *tmp_cq;
4255 hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
4256 if (tmp_cq->qplib_cq.id == cq_id) {
4264 /* Helper function to mmap the virtual memory from user app */
4265 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4267 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4268 struct bnxt_re_ucontext,
4270 struct bnxt_re_user_mmap_entry *bnxt_entry;
4271 struct rdma_user_mmap_entry *rdma_entry;
4275 rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4279 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4282 switch (bnxt_entry->mmap_flag) {
4283 case BNXT_RE_MMAP_WC_DB:
4284 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4285 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4286 pgprot_writecombine(vma->vm_page_prot),
4289 case BNXT_RE_MMAP_UC_DB:
4290 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4291 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4292 pgprot_noncached(vma->vm_page_prot),
4295 case BNXT_RE_MMAP_SH_PAGE:
4296 ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4298 case BNXT_RE_MMAP_DBR_BAR:
4299 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4300 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4301 pgprot_noncached(vma->vm_page_prot),
4304 case BNXT_RE_MMAP_DBR_PAGE:
4305 case BNXT_RE_MMAP_TOGGLE_PAGE:
4306 /* Driver doesn't expect write access for user space */
4307 if (vma->vm_flags & VM_WRITE)
4309 ret = vm_insert_page(vma, vma->vm_start,
4310 virt_to_page((void *)bnxt_entry->mem_offset));
4317 rdma_user_mmap_entry_put(rdma_entry);
4321 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4323 struct bnxt_re_user_mmap_entry *bnxt_entry;
4325 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4331 static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4333 struct bnxt_re_ucontext *uctx;
4335 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4336 bnxt_re_pacing_alert(uctx->rdev);
4340 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4342 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4343 enum bnxt_re_alloc_page_type alloc_type;
4344 struct bnxt_re_user_mmap_entry *entry;
4345 enum bnxt_re_mmap_flag mmap_flag;
4346 struct bnxt_qplib_chip_ctx *cctx;
4347 struct bnxt_re_ucontext *uctx;
4348 struct bnxt_re_dev *rdev;
4355 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4357 return PTR_ERR(uctx);
4359 err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4364 cctx = rdev->chip_ctx;
4366 switch (alloc_type) {
4367 case BNXT_RE_ALLOC_WC_PAGE:
4368 if (cctx->modes.db_push) {
4369 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4370 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4373 dpi = uctx->wcdpi.dpi;
4374 addr = (u64)uctx->wcdpi.umdbr;
4375 mmap_flag = BNXT_RE_MMAP_WC_DB;
4381 case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4383 addr = (u64)rdev->pacing.dbr_bar_addr;
4384 mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4387 case BNXT_RE_ALLOC_DBR_PAGE:
4389 addr = (u64)rdev->pacing.dbr_page;
4390 mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4397 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4401 uobj->object = entry;
4402 uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4403 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4404 &mmap_offset, sizeof(mmap_offset));
4408 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4409 &length, sizeof(length));
4413 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4414 &dpi, sizeof(length));
4421 static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4422 enum rdma_remove_reason why,
4423 struct uverbs_attr_bundle *attrs)
4425 struct bnxt_re_user_mmap_entry *entry = uobject->object;
4426 struct bnxt_re_ucontext *uctx = entry->uctx;
4428 switch (entry->mmap_flag) {
4429 case BNXT_RE_MMAP_WC_DB:
4430 if (uctx && uctx->wcdpi.dbr) {
4431 struct bnxt_re_dev *rdev = uctx->rdev;
4433 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4434 uctx->wcdpi.dbr = NULL;
4437 case BNXT_RE_MMAP_DBR_BAR:
4438 case BNXT_RE_MMAP_DBR_PAGE:
4443 rdma_user_mmap_entry_remove(&entry->rdma_entry);
4448 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4449 UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4450 BNXT_RE_OBJECT_ALLOC_PAGE,
4453 UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4454 enum bnxt_re_alloc_page_type,
4456 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4457 UVERBS_ATTR_TYPE(u64),
4459 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4460 UVERBS_ATTR_TYPE(u32),
4462 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4463 UVERBS_ATTR_TYPE(u32),
4466 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4467 UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4468 BNXT_RE_OBJECT_ALLOC_PAGE,
4469 UVERBS_ACCESS_DESTROY,
4472 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4473 UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4474 &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4475 &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4477 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4479 DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4480 &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4483 static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
4485 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4486 enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4487 enum bnxt_re_get_toggle_mem_type res_type;
4488 struct bnxt_re_user_mmap_entry *entry;
4489 struct bnxt_re_ucontext *uctx;
4490 struct ib_ucontext *ib_uctx;
4491 struct bnxt_re_dev *rdev;
4492 struct bnxt_re_cq *cq;
4500 ib_uctx = ib_uverbs_get_ucontext(attrs);
4501 if (IS_ERR(ib_uctx))
4502 return PTR_ERR(ib_uctx);
4504 err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
4508 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
4512 case BNXT_RE_CQ_TOGGLE_MEM:
4513 err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
4517 cq = bnxt_re_search_for_cq(rdev, cq_id);
4522 addr = (u64)cq->uctx_cq_page;
4523 mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4526 case BNXT_RE_SRQ_TOGGLE_MEM:
4533 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
4537 uobj->object = entry;
4538 uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4539 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4540 &mem_offset, sizeof(mem_offset));
4544 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4545 &length, sizeof(length));
4549 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4550 &offset, sizeof(length));
4557 static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
4558 enum rdma_remove_reason why,
4559 struct uverbs_attr_bundle *attrs)
4561 struct bnxt_re_user_mmap_entry *entry = uobject->object;
4563 rdma_user_mmap_entry_remove(&entry->rdma_entry);
4567 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
4568 UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
4569 BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4572 UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
4573 enum bnxt_re_get_toggle_mem_type,
4575 UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
4576 UVERBS_ATTR_TYPE(u32),
4578 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4579 UVERBS_ATTR_TYPE(u64),
4581 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4582 UVERBS_ATTR_TYPE(u32),
4584 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4585 UVERBS_ATTR_TYPE(u32),
4588 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
4589 UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
4590 BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4591 UVERBS_ACCESS_DESTROY,
4594 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4595 UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
4596 &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
4597 &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
4599 const struct uapi_definition bnxt_re_uapi_defs[] = {
4600 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4601 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
4602 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),