2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
55 #include "qplib_res.h"
58 #include "qplib_rcfw.h"
62 #include <rdma/bnxt_re-abi.h>
64 static int __from_ib_access_flags(int iflags)
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
85 static enum ib_access_flags __to_ib_access_flags(int qflags)
87 enum ib_access_flags iflags = 0;
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
128 netdev = rdev->netdev;
136 int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
143 memset(ib_attr, 0, sizeof(*ib_attr));
144 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145 min(sizeof(dev_attr->fw_ver),
146 sizeof(ib_attr->fw_ver)));
147 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148 (u8 *)&ib_attr->sys_image_guid);
149 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
150 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
152 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155 ib_attr->max_qp = dev_attr->max_qp;
156 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157 ib_attr->device_cap_flags =
158 IB_DEVICE_CURR_QP_STATE_MOD
159 | IB_DEVICE_RC_RNR_NAK_GEN
160 | IB_DEVICE_SHUTDOWN_PORT
161 | IB_DEVICE_SYS_IMAGE_GUID
162 | IB_DEVICE_LOCAL_DMA_LKEY
163 | IB_DEVICE_RESIZE_MAX_WR
164 | IB_DEVICE_PORT_ACTIVE_EVENT
165 | IB_DEVICE_N_NOTIFY_CQ
166 | IB_DEVICE_MEM_WINDOW
167 | IB_DEVICE_MEM_WINDOW_TYPE_2B
168 | IB_DEVICE_MEM_MGT_EXTENSIONS;
169 ib_attr->max_sge = dev_attr->max_qp_sges;
170 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
171 ib_attr->max_cq = dev_attr->max_cq;
172 ib_attr->max_cqe = dev_attr->max_cq_wqes;
173 ib_attr->max_mr = dev_attr->max_mr;
174 ib_attr->max_pd = dev_attr->max_pd;
175 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
176 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
177 ib_attr->atomic_cap = IB_ATOMIC_NONE;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
180 ib_attr->max_ee_rd_atom = 0;
181 ib_attr->max_res_rd_atom = 0;
182 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_rdd = 0;
185 ib_attr->max_mw = dev_attr->max_mw;
186 ib_attr->max_raw_ipv6_qp = 0;
187 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
188 ib_attr->max_mcast_grp = 0;
189 ib_attr->max_mcast_qp_attach = 0;
190 ib_attr->max_total_mcast_qp_attach = 0;
191 ib_attr->max_ah = dev_attr->max_ah;
193 ib_attr->max_fmr = 0;
194 ib_attr->max_map_per_fmr = 0;
196 ib_attr->max_srq = dev_attr->max_srq;
197 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
198 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202 ib_attr->max_pkeys = 1;
203 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
207 int bnxt_re_modify_device(struct ib_device *ibdev,
208 int device_modify_mask,
209 struct ib_device_modify *device_modify)
211 switch (device_modify_mask) {
212 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
213 /* Modify the GUID requires the modification of the GID table */
214 /* GUID should be made as READ-ONLY */
216 case IB_DEVICE_MODIFY_NODE_DESC:
217 /* Node Desc should be made as READ-ONLY */
226 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
227 struct ib_port_attr *port_attr)
229 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
230 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232 memset(port_attr, 0, sizeof(*port_attr));
234 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
235 port_attr->state = IB_PORT_ACTIVE;
236 port_attr->phys_state = 5;
238 port_attr->state = IB_PORT_DOWN;
239 port_attr->phys_state = 3;
241 port_attr->max_mtu = IB_MTU_4096;
242 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
243 port_attr->gid_tbl_len = dev_attr->max_sgid;
244 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
245 IB_PORT_DEVICE_MGMT_SUP |
246 IB_PORT_VENDOR_CLASS_SUP |
247 IB_PORT_IP_BASED_GIDS;
249 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
250 port_attr->bad_pkey_cntr = 0;
251 port_attr->qkey_viol_cntr = 0;
252 port_attr->pkey_tbl_len = dev_attr->max_pkey;
254 port_attr->sm_lid = 0;
256 port_attr->max_vl_num = 4;
257 port_attr->sm_sl = 0;
258 port_attr->subnet_timeout = 0;
259 port_attr->init_type_reply = 0;
260 port_attr->active_speed = rdev->active_speed;
261 port_attr->active_width = rdev->active_width;
266 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
267 struct ib_port_immutable *immutable)
269 struct ib_port_attr port_attr;
271 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
275 immutable->gid_tbl_len = port_attr.gid_tbl_len;
276 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
277 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
278 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
282 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
284 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
286 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
287 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
288 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
291 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
292 u16 index, u16 *pkey)
294 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
296 /* Ignore port_num */
298 memset(pkey, 0, sizeof(*pkey));
299 return bnxt_qplib_get_pkey(&rdev->qplib_res,
300 &rdev->qplib_res.pkey_tbl, index, pkey);
303 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
304 int index, union ib_gid *gid)
306 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
309 /* Ignore port_num */
310 memset(gid, 0, sizeof(*gid));
311 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
312 &rdev->qplib_res.sgid_tbl, index,
313 (struct bnxt_qplib_gid *)gid);
317 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
320 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
321 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
322 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
323 struct bnxt_qplib_gid *gid_to_del;
325 /* Delete the entry from the hardware */
330 if (sgid_tbl && sgid_tbl->active) {
331 if (ctx->idx >= sgid_tbl->max)
333 gid_to_del = &sgid_tbl->tbl[ctx->idx];
334 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
335 * or via the ib_unregister_device path. In the former case QP1
336 * may not be destroyed yet, in which case just return as FW
337 * needs that entry to be present and will fail it's deletion.
338 * We could get invoked again after QP1 is destroyed OR get an
339 * ADD_GID call with a different GID value for the same index
340 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
343 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
344 ctx->refcnt == 1 && rdev->qp1_sqp) {
345 dev_dbg(rdev_to_dev(rdev),
346 "Trying to delete GID0 while QP1 is alive\n");
351 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
353 dev_err(rdev_to_dev(rdev),
354 "Failed to remove GID: %#x", rc);
356 ctx_tbl = sgid_tbl->ctx;
357 ctx_tbl[ctx->idx] = NULL;
367 int bnxt_re_add_gid(const union ib_gid *gid,
368 const struct ib_gid_attr *attr, void **context)
372 u16 vlan_id = 0xFFFF;
373 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
374 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
375 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
377 if ((attr->ndev) && is_vlan_dev(attr->ndev))
378 vlan_id = vlan_dev_vlan_id(attr->ndev);
380 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
381 rdev->qplib_res.netdev->dev_addr,
382 vlan_id, true, &tbl_idx);
383 if (rc == -EALREADY) {
384 ctx_tbl = sgid_tbl->ctx;
385 ctx_tbl[tbl_idx]->refcnt++;
386 *context = ctx_tbl[tbl_idx];
391 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
395 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
398 ctx_tbl = sgid_tbl->ctx;
401 ctx_tbl[tbl_idx] = ctx;
407 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
410 return IB_LINK_LAYER_ETHERNET;
413 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
415 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
417 struct bnxt_re_fence_data *fence = &pd->fence;
418 struct ib_mr *ib_mr = &fence->mr->ib_mr;
419 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
421 memset(wqe, 0, sizeof(*wqe));
422 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
423 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
424 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
425 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
426 wqe->bind.zero_based = false;
427 wqe->bind.parent_l_key = ib_mr->lkey;
428 wqe->bind.va = (u64)(unsigned long)fence->va;
429 wqe->bind.length = fence->size;
430 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
431 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
433 /* Save the initial rkey in fence structure for now;
434 * wqe->bind.r_key will be set at (re)bind time.
436 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
439 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
441 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
443 struct ib_pd *ib_pd = qp->ib_qp.pd;
444 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
445 struct bnxt_re_fence_data *fence = &pd->fence;
446 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
447 struct bnxt_qplib_swqe wqe;
450 memcpy(&wqe, fence_wqe, sizeof(wqe));
451 wqe.bind.r_key = fence->bind_rkey;
452 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
454 dev_dbg(rdev_to_dev(qp->rdev),
455 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
456 wqe.bind.r_key, qp->qplib_qp.id, pd);
457 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
459 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
462 bnxt_qplib_post_send_db(&qp->qplib_qp);
467 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
469 struct bnxt_re_fence_data *fence = &pd->fence;
470 struct bnxt_re_dev *rdev = pd->rdev;
471 struct device *dev = &rdev->en_dev->pdev->dev;
472 struct bnxt_re_mr *mr = fence->mr;
475 bnxt_re_dealloc_mw(fence->mw);
480 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
483 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
487 if (fence->dma_addr) {
488 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
494 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
496 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
497 struct bnxt_re_fence_data *fence = &pd->fence;
498 struct bnxt_re_dev *rdev = pd->rdev;
499 struct device *dev = &rdev->en_dev->pdev->dev;
500 struct bnxt_re_mr *mr = NULL;
501 dma_addr_t dma_addr = 0;
506 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
508 rc = dma_mapping_error(dev, dma_addr);
510 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
515 fence->dma_addr = dma_addr;
518 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
525 mr->qplib_mr.pd = &pd->qplib_pd;
526 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
527 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
528 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
530 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
535 mr->ib_mr.lkey = mr->qplib_mr.lkey;
536 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
537 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
539 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
540 BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
542 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
545 mr->ib_mr.rkey = mr->qplib_mr.rkey;
547 /* Create a fence MW only for kernel consumers */
548 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
550 dev_err(rdev_to_dev(rdev),
551 "Failed to create fence-MW for PD: %p\n", pd);
557 bnxt_re_create_fence_wqe(pd);
561 bnxt_re_destroy_fence_mr(pd);
565 /* Protection Domains */
566 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
568 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
569 struct bnxt_re_dev *rdev = pd->rdev;
572 bnxt_re_destroy_fence_mr(pd);
574 if (pd->qplib_pd.id) {
575 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
576 &rdev->qplib_res.pd_tbl,
579 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
586 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
587 struct ib_ucontext *ucontext,
588 struct ib_udata *udata)
590 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
591 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
592 struct bnxt_re_ucontext,
594 struct bnxt_re_pd *pd;
597 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
599 return ERR_PTR(-ENOMEM);
602 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
603 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
609 struct bnxt_re_pd_resp resp;
611 if (!ucntx->dpi.dbr) {
612 /* Allocate DPI in alloc_pd to avoid failing of
613 * ibv_devinfo and family of application when DPIs
616 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
617 &ucntx->dpi, ucntx)) {
623 resp.pdid = pd->qplib_pd.id;
624 /* Still allow mapping this DBR to the new user PD. */
625 resp.dpi = ucntx->dpi.dpi;
626 resp.dbr = (u64)ucntx->dpi.umdbr;
628 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
630 dev_err(rdev_to_dev(rdev),
631 "Failed to copy user response\n");
637 if (bnxt_re_create_fence_mr(pd))
638 dev_warn(rdev_to_dev(rdev),
639 "Failed to create Fence-MR\n");
642 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
649 /* Address Handles */
650 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
652 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
653 struct bnxt_re_dev *rdev = ah->rdev;
656 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
658 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
665 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
666 struct rdma_ah_attr *ah_attr,
667 struct ib_udata *udata)
669 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
670 struct bnxt_re_dev *rdev = pd->rdev;
671 struct bnxt_re_ah *ah;
672 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
676 struct ib_gid_attr sgid_attr;
678 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
679 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
680 return ERR_PTR(-EINVAL);
682 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
684 return ERR_PTR(-ENOMEM);
687 ah->qplib_ah.pd = &pd->qplib_pd;
689 /* Supply the configuration for the HW */
690 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
691 sizeof(union ib_gid));
693 * If RoCE V2 is enabled, stack will have two entries for
694 * each GID entry. Avoiding this duplicte entry in HW. Dividing
695 * the GID index by 2 for RoCE V2
697 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
698 ah->qplib_ah.host_sgid_index = grh->sgid_index;
699 ah->qplib_ah.traffic_class = grh->traffic_class;
700 ah->qplib_ah.flow_label = grh->flow_label;
701 ah->qplib_ah.hop_limit = grh->hop_limit;
702 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
703 if (ib_pd->uobject &&
704 !rdma_is_multicast_addr((struct in6_addr *)
706 !rdma_link_local_addr((struct in6_addr *)
710 rc = ib_get_cached_gid(&rdev->ibdev, 1,
711 grh->sgid_index, &sgid,
714 dev_err(rdev_to_dev(rdev),
715 "Failed to query gid at index %d",
719 dev_put(sgid_attr.ndev);
720 /* Get network header type for this GID */
721 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
723 case RDMA_NETWORK_IPV4:
724 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
726 case RDMA_NETWORK_IPV6:
727 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
730 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
735 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
736 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
738 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
742 /* Write AVID to shared page. */
743 if (ib_pd->uobject) {
744 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
745 struct bnxt_re_ucontext *uctx;
749 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
750 spin_lock_irqsave(&uctx->sh_lock, flag);
751 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
752 *wrptr = ah->qplib_ah.id;
753 wmb(); /* make sure cache is updated. */
754 spin_unlock_irqrestore(&uctx->sh_lock, flag);
764 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
769 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
771 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
773 ah_attr->type = ib_ah->type;
774 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
775 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
776 rdma_ah_set_grh(ah_attr, NULL, 0,
777 ah->qplib_ah.host_sgid_index,
778 0, ah->qplib_ah.traffic_class);
779 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
780 rdma_ah_set_port_num(ah_attr, 1);
781 rdma_ah_set_static_rate(ah_attr, 0);
785 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
786 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790 spin_lock_irqsave(&qp->scq->cq_lock, flags);
791 if (qp->rcq != qp->scq)
792 spin_lock(&qp->rcq->cq_lock);
794 __acquire(&qp->rcq->cq_lock);
799 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
801 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
803 if (qp->rcq != qp->scq)
804 spin_unlock(&qp->rcq->cq_lock);
806 __release(&qp->rcq->cq_lock);
807 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
811 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
813 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
814 struct bnxt_re_dev *rdev = qp->rdev;
818 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
819 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
821 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
825 flags = bnxt_re_lock_cqs(qp);
826 bnxt_qplib_clean_qp(&qp->qplib_qp);
827 bnxt_re_unlock_cqs(qp, flags);
828 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
830 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
831 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
832 &rdev->sqp_ah->qplib_ah);
834 dev_err(rdev_to_dev(rdev),
835 "Failed to destroy HW AH for shadow QP");
839 bnxt_qplib_clean_qp(&qp->qplib_qp);
840 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
841 &rdev->qp1_sqp->qplib_qp);
843 dev_err(rdev_to_dev(rdev),
844 "Failed to destroy Shadow QP");
847 mutex_lock(&rdev->qp_lock);
848 list_del(&rdev->qp1_sqp->list);
849 atomic_dec(&rdev->qp_count);
850 mutex_unlock(&rdev->qp_lock);
853 kfree(rdev->qp1_sqp);
854 rdev->qp1_sqp = NULL;
858 if (!IS_ERR_OR_NULL(qp->rumem))
859 ib_umem_release(qp->rumem);
860 if (!IS_ERR_OR_NULL(qp->sumem))
861 ib_umem_release(qp->sumem);
863 mutex_lock(&rdev->qp_lock);
865 atomic_dec(&rdev->qp_count);
866 mutex_unlock(&rdev->qp_lock);
871 static u8 __from_ib_qp_type(enum ib_qp_type type)
875 return CMDQ_CREATE_QP1_TYPE_GSI;
877 return CMDQ_CREATE_QP_TYPE_RC;
879 return CMDQ_CREATE_QP_TYPE_UD;
885 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
886 struct bnxt_re_qp *qp, struct ib_udata *udata)
888 struct bnxt_re_qp_req ureq;
889 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
890 struct ib_umem *umem;
892 struct ib_ucontext *context = pd->ib_pd.uobject->context;
893 struct bnxt_re_ucontext *cntx = container_of(context,
894 struct bnxt_re_ucontext,
896 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
899 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
900 /* Consider mapping PSN search memory only for RC QPs. */
901 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
902 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
903 bytes = PAGE_ALIGN(bytes);
904 umem = ib_umem_get(context, ureq.qpsva, bytes,
905 IB_ACCESS_LOCAL_WRITE, 1);
907 return PTR_ERR(umem);
910 qplib_qp->sq.sglist = umem->sg_head.sgl;
911 qplib_qp->sq.nmap = umem->nmap;
912 qplib_qp->qp_handle = ureq.qp_handle;
914 if (!qp->qplib_qp.srq) {
915 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
916 bytes = PAGE_ALIGN(bytes);
917 umem = ib_umem_get(context, ureq.qprva, bytes,
918 IB_ACCESS_LOCAL_WRITE, 1);
922 qplib_qp->rq.sglist = umem->sg_head.sgl;
923 qplib_qp->rq.nmap = umem->nmap;
926 qplib_qp->dpi = &cntx->dpi;
929 ib_umem_release(qp->sumem);
931 qplib_qp->sq.sglist = NULL;
932 qplib_qp->sq.nmap = 0;
934 return PTR_ERR(umem);
937 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
938 (struct bnxt_re_pd *pd,
939 struct bnxt_qplib_res *qp1_res,
940 struct bnxt_qplib_qp *qp1_qp)
942 struct bnxt_re_dev *rdev = pd->rdev;
943 struct bnxt_re_ah *ah;
947 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
952 ah->qplib_ah.pd = &pd->qplib_pd;
954 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
958 /* supply the dgid data same as sgid */
959 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
960 sizeof(union ib_gid));
961 ah->qplib_ah.sgid_index = 0;
963 ah->qplib_ah.traffic_class = 0;
964 ah->qplib_ah.flow_label = 0;
965 ah->qplib_ah.hop_limit = 1;
967 /* Have DMAC same as SMAC */
968 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
970 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
972 dev_err(rdev_to_dev(rdev),
973 "Failed to allocate HW AH for Shadow QP");
984 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
985 (struct bnxt_re_pd *pd,
986 struct bnxt_qplib_res *qp1_res,
987 struct bnxt_qplib_qp *qp1_qp)
989 struct bnxt_re_dev *rdev = pd->rdev;
990 struct bnxt_re_qp *qp;
993 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
999 /* Initialize the shadow QP structure from the QP1 values */
1000 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1002 qp->qplib_qp.pd = &pd->qplib_pd;
1003 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1004 qp->qplib_qp.type = IB_QPT_UD;
1006 qp->qplib_qp.max_inline_data = 0;
1007 qp->qplib_qp.sig_type = true;
1009 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1010 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1011 qp->qplib_qp.sq.max_sge = 2;
1012 /* Q full delta can be 1 since it is internal QP */
1013 qp->qplib_qp.sq.q_full_delta = 1;
1015 qp->qplib_qp.scq = qp1_qp->scq;
1016 qp->qplib_qp.rcq = qp1_qp->rcq;
1018 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1019 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1020 /* Q full delta can be 1 since it is internal QP */
1021 qp->qplib_qp.rq.q_full_delta = 1;
1023 qp->qplib_qp.mtu = qp1_qp->mtu;
1025 qp->qplib_qp.sq_hdr_buf_size = 0;
1026 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1027 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1029 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1033 rdev->sqp_id = qp->qplib_qp.id;
1035 spin_lock_init(&qp->sq_lock);
1036 INIT_LIST_HEAD(&qp->list);
1037 mutex_lock(&rdev->qp_lock);
1038 list_add_tail(&qp->list, &rdev->qp_list);
1039 atomic_inc(&rdev->qp_count);
1040 mutex_unlock(&rdev->qp_lock);
1047 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1048 struct ib_qp_init_attr *qp_init_attr,
1049 struct ib_udata *udata)
1051 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1052 struct bnxt_re_dev *rdev = pd->rdev;
1053 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1054 struct bnxt_re_qp *qp;
1055 struct bnxt_re_cq *cq;
1056 struct bnxt_re_srq *srq;
1059 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1060 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1061 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1062 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1063 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1064 return ERR_PTR(-EINVAL);
1066 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1068 return ERR_PTR(-ENOMEM);
1071 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1072 qp->qplib_qp.pd = &pd->qplib_pd;
1073 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1074 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1075 if (qp->qplib_qp.type == IB_QPT_MAX) {
1076 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1081 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1082 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1083 IB_SIGNAL_ALL_WR) ? true : false);
1085 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1086 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1087 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1089 if (qp_init_attr->send_cq) {
1090 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1093 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1097 qp->qplib_qp.scq = &cq->qplib_cq;
1101 if (qp_init_attr->recv_cq) {
1102 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1105 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1109 qp->qplib_qp.rcq = &cq->qplib_cq;
1113 if (qp_init_attr->srq) {
1114 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1117 dev_err(rdev_to_dev(rdev), "SRQ not found");
1121 qp->qplib_qp.srq = &srq->qplib_srq;
1122 qp->qplib_qp.rq.max_wqe = 0;
1124 /* Allocate 1 more than what's provided so posting max doesn't
1127 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1128 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1129 dev_attr->max_qp_wqes + 1);
1131 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1132 qp_init_attr->cap.max_recv_wr;
1134 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1135 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1136 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1139 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1141 if (qp_init_attr->qp_type == IB_QPT_GSI) {
1142 /* Allocate 1 more than what's provided */
1143 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1144 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1145 dev_attr->max_qp_wqes + 1);
1146 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1147 qp_init_attr->cap.max_send_wr;
1148 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1149 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1150 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1151 qp->qplib_qp.sq.max_sge++;
1152 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1153 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1155 qp->qplib_qp.rq_hdr_buf_size =
1156 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1158 qp->qplib_qp.sq_hdr_buf_size =
1159 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1160 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1161 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1163 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1166 /* Create a shadow QP to handle the QP1 traffic */
1167 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1169 if (!rdev->qp1_sqp) {
1171 dev_err(rdev_to_dev(rdev),
1172 "Failed to create Shadow QP for QP1");
1175 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1177 if (!rdev->sqp_ah) {
1178 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1179 &rdev->qp1_sqp->qplib_qp);
1181 dev_err(rdev_to_dev(rdev),
1182 "Failed to create AH entry for ShadowQP");
1187 /* Allocate 128 + 1 more than what's provided */
1188 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1189 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1190 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1191 dev_attr->max_qp_wqes +
1192 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1193 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1196 * Reserving one slot for Phantom WQE. Application can
1197 * post one extra entry in this case. But allowing this to avoid
1198 * unexpected Queue full condition
1201 qp->qplib_qp.sq.q_full_delta -= 1;
1203 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1204 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1206 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1210 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1213 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1215 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1220 qp->ib_qp.qp_num = qp->qplib_qp.id;
1221 spin_lock_init(&qp->sq_lock);
1222 spin_lock_init(&qp->rq_lock);
1225 struct bnxt_re_qp_resp resp;
1227 resp.qpid = qp->ib_qp.qp_num;
1229 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1231 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1235 INIT_LIST_HEAD(&qp->list);
1236 mutex_lock(&rdev->qp_lock);
1237 list_add_tail(&qp->list, &rdev->qp_list);
1238 atomic_inc(&rdev->qp_count);
1239 mutex_unlock(&rdev->qp_lock);
1243 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1247 ib_umem_release(qp->rumem);
1249 ib_umem_release(qp->sumem);
1256 static u8 __from_ib_qp_state(enum ib_qp_state state)
1260 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1262 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1264 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1266 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1268 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1270 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1273 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1277 static enum ib_qp_state __to_ib_qp_state(u8 state)
1280 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1281 return IB_QPS_RESET;
1282 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1284 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1286 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1288 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1290 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1292 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1298 static u32 __from_ib_mtu(enum ib_mtu mtu)
1302 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1304 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1306 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1308 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1310 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1312 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1316 static enum ib_mtu __to_ib_mtu(u32 mtu)
1318 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1319 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1321 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1323 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1325 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1327 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1334 /* Shared Receive Queues */
1335 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1337 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1339 struct bnxt_re_dev *rdev = srq->rdev;
1340 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1341 struct bnxt_qplib_nq *nq = NULL;
1345 nq = qplib_srq->cq->nq;
1346 rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1348 dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1353 ib_umem_release(srq->umem);
1355 atomic_dec(&rdev->srq_count);
1361 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1362 struct bnxt_re_pd *pd,
1363 struct bnxt_re_srq *srq,
1364 struct ib_udata *udata)
1366 struct bnxt_re_srq_req ureq;
1367 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1368 struct ib_umem *umem;
1370 struct ib_ucontext *context = pd->ib_pd.uobject->context;
1371 struct bnxt_re_ucontext *cntx = container_of(context,
1372 struct bnxt_re_ucontext,
1374 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1377 bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1378 bytes = PAGE_ALIGN(bytes);
1379 umem = ib_umem_get(context, ureq.srqva, bytes,
1380 IB_ACCESS_LOCAL_WRITE, 1);
1382 return PTR_ERR(umem);
1385 qplib_srq->nmap = umem->nmap;
1386 qplib_srq->sglist = umem->sg_head.sgl;
1387 qplib_srq->srq_handle = ureq.srq_handle;
1388 qplib_srq->dpi = &cntx->dpi;
1393 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1394 struct ib_srq_init_attr *srq_init_attr,
1395 struct ib_udata *udata)
1397 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1398 struct bnxt_re_dev *rdev = pd->rdev;
1399 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1400 struct bnxt_re_srq *srq;
1401 struct bnxt_qplib_nq *nq = NULL;
1404 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1405 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1410 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1415 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1421 srq->qplib_srq.pd = &pd->qplib_pd;
1422 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1423 /* Allocate 1 more than what's provided so posting max doesn't
1426 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1427 if (entries > dev_attr->max_srq_wqes + 1)
1428 entries = dev_attr->max_srq_wqes + 1;
1430 srq->qplib_srq.max_wqe = entries;
1431 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1432 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1433 srq->srq_limit = srq_init_attr->attr.srq_limit;
1434 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1438 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1443 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1445 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1450 struct bnxt_re_srq_resp resp;
1452 resp.srqid = srq->qplib_srq.id;
1453 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1455 dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1456 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1463 atomic_inc(&rdev->srq_count);
1465 return &srq->ib_srq;
1469 ib_umem_release(srq->umem);
1475 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1476 enum ib_srq_attr_mask srq_attr_mask,
1477 struct ib_udata *udata)
1479 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1481 struct bnxt_re_dev *rdev = srq->rdev;
1484 switch (srq_attr_mask) {
1486 /* SRQ resize is not supported */
1489 /* Change the SRQ threshold */
1490 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1493 srq->qplib_srq.threshold = srq_attr->srq_limit;
1494 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1496 dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1499 /* On success, update the shadow */
1500 srq->srq_limit = srq_attr->srq_limit;
1501 /* No need to Build and send response back to udata */
1504 dev_err(rdev_to_dev(rdev),
1505 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1511 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1513 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1515 struct bnxt_re_srq tsrq;
1516 struct bnxt_re_dev *rdev = srq->rdev;
1519 /* Get live SRQ attr */
1520 tsrq.qplib_srq.id = srq->qplib_srq.id;
1521 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1523 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1526 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1527 srq_attr->max_sge = srq->qplib_srq.max_sge;
1528 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1533 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
1534 struct ib_recv_wr **bad_wr)
1536 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1538 struct bnxt_qplib_swqe wqe;
1539 unsigned long flags;
1542 spin_lock_irqsave(&srq->lock, flags);
1544 /* Transcribe each ib_recv_wr to qplib_swqe */
1545 wqe.num_sge = wr->num_sge;
1546 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1547 wqe.wr_id = wr->wr_id;
1548 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1550 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1557 spin_unlock_irqrestore(&srq->lock, flags);
1561 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1562 struct bnxt_re_qp *qp1_qp,
1565 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1568 if (qp_attr_mask & IB_QP_STATE) {
1569 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1570 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1572 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1573 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1574 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1577 if (qp_attr_mask & IB_QP_QKEY) {
1578 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1579 /* Using a Random QKEY */
1580 qp->qplib_qp.qkey = 0x81818181;
1582 if (qp_attr_mask & IB_QP_SQ_PSN) {
1583 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1584 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1587 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1589 dev_err(rdev_to_dev(rdev),
1590 "Failed to modify Shadow QP for QP1");
1594 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1595 int qp_attr_mask, struct ib_udata *udata)
1597 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1598 struct bnxt_re_dev *rdev = qp->rdev;
1599 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1600 enum ib_qp_state curr_qp_state, new_qp_state;
1604 struct ib_gid_attr sgid_attr;
1608 qp->qplib_qp.modify_flags = 0;
1609 if (qp_attr_mask & IB_QP_STATE) {
1610 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1611 new_qp_state = qp_attr->qp_state;
1612 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1613 ib_qp->qp_type, qp_attr_mask,
1614 IB_LINK_LAYER_ETHERNET)) {
1615 dev_err(rdev_to_dev(rdev),
1616 "Invalid attribute mask: %#x specified ",
1618 dev_err(rdev_to_dev(rdev),
1619 "for qpn: %#x type: %#x",
1620 ib_qp->qp_num, ib_qp->qp_type);
1621 dev_err(rdev_to_dev(rdev),
1622 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1623 curr_qp_state, new_qp_state);
1626 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1627 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1630 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1631 dev_dbg(rdev_to_dev(rdev),
1632 "Move QP = %p to flush list\n",
1634 flags = bnxt_re_lock_cqs(qp);
1635 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1636 bnxt_re_unlock_cqs(qp, flags);
1639 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1640 dev_dbg(rdev_to_dev(rdev),
1641 "Move QP = %p out of flush list\n",
1643 flags = bnxt_re_lock_cqs(qp);
1644 bnxt_qplib_clean_qp(&qp->qplib_qp);
1645 bnxt_re_unlock_cqs(qp, flags);
1648 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1649 qp->qplib_qp.modify_flags |=
1650 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1651 qp->qplib_qp.en_sqd_async_notify = true;
1653 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1654 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1655 qp->qplib_qp.access =
1656 __from_ib_access_flags(qp_attr->qp_access_flags);
1657 /* LOCAL_WRITE access must be set to allow RC receive */
1658 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1660 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1661 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1662 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1664 if (qp_attr_mask & IB_QP_QKEY) {
1665 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1666 qp->qplib_qp.qkey = qp_attr->qkey;
1668 if (qp_attr_mask & IB_QP_AV) {
1669 const struct ib_global_route *grh =
1670 rdma_ah_read_grh(&qp_attr->ah_attr);
1672 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1673 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1674 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1675 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1676 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1677 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1678 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1679 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1680 sizeof(qp->qplib_qp.ah.dgid.data));
1681 qp->qplib_qp.ah.flow_label = grh->flow_label;
1682 /* If RoCE V2 is enabled, stack will have two entries for
1683 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1684 * the GID index by 2 for RoCE V2
1686 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1687 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1688 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1689 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1690 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1691 ether_addr_copy(qp->qplib_qp.ah.dmac,
1692 qp_attr->ah_attr.roce.dmac);
1694 status = ib_get_cached_gid(&rdev->ibdev, 1,
1698 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1700 dev_put(sgid_attr.ndev);
1701 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1704 case RDMA_NETWORK_IPV4:
1705 qp->qplib_qp.nw_type =
1706 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1708 case RDMA_NETWORK_IPV6:
1709 qp->qplib_qp.nw_type =
1710 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1713 qp->qplib_qp.nw_type =
1714 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1720 if (qp_attr_mask & IB_QP_PATH_MTU) {
1721 qp->qplib_qp.modify_flags |=
1722 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1723 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1724 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1725 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1726 qp->qplib_qp.modify_flags |=
1727 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1728 qp->qplib_qp.path_mtu =
1729 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1731 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1734 if (qp_attr_mask & IB_QP_TIMEOUT) {
1735 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1736 qp->qplib_qp.timeout = qp_attr->timeout;
1738 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1739 qp->qplib_qp.modify_flags |=
1740 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1741 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1743 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1744 qp->qplib_qp.modify_flags |=
1745 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1746 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1748 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1749 qp->qplib_qp.modify_flags |=
1750 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1751 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1753 if (qp_attr_mask & IB_QP_RQ_PSN) {
1754 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1755 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1757 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1758 qp->qplib_qp.modify_flags |=
1759 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1760 /* Cap the max_rd_atomic to device max */
1761 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1762 dev_attr->max_qp_rd_atom);
1764 if (qp_attr_mask & IB_QP_SQ_PSN) {
1765 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1766 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1768 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1769 if (qp_attr->max_dest_rd_atomic >
1770 dev_attr->max_qp_init_rd_atom) {
1771 dev_err(rdev_to_dev(rdev),
1772 "max_dest_rd_atomic requested%d is > dev_max%d",
1773 qp_attr->max_dest_rd_atomic,
1774 dev_attr->max_qp_init_rd_atom);
1778 qp->qplib_qp.modify_flags |=
1779 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1780 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1782 if (qp_attr_mask & IB_QP_CAP) {
1783 qp->qplib_qp.modify_flags |=
1784 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1785 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1786 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1787 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1788 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1789 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1790 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1791 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1792 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1793 (qp_attr->cap.max_inline_data >=
1794 dev_attr->max_inline_data)) {
1795 dev_err(rdev_to_dev(rdev),
1796 "Create QP failed - max exceeded");
1799 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1800 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1801 dev_attr->max_qp_wqes + 1);
1802 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1803 qp_attr->cap.max_send_wr;
1805 * Reserving one slot for Phantom WQE. Some application can
1806 * post one extra entry in this case. Allowing this to avoid
1807 * unexpected Queue full condition
1809 qp->qplib_qp.sq.q_full_delta -= 1;
1810 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1811 if (qp->qplib_qp.rq.max_wqe) {
1812 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1813 qp->qplib_qp.rq.max_wqe =
1814 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1815 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1816 qp_attr->cap.max_recv_wr;
1817 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1819 /* SRQ was used prior, just ignore the RQ caps */
1822 if (qp_attr_mask & IB_QP_DEST_QPN) {
1823 qp->qplib_qp.modify_flags |=
1824 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1825 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1827 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1829 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1832 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1833 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1837 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1838 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1840 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1841 struct bnxt_re_dev *rdev = qp->rdev;
1842 struct bnxt_qplib_qp *qplib_qp;
1845 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1849 qplib_qp->id = qp->qplib_qp.id;
1850 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1852 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1854 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1857 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1858 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1859 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1860 qp_attr->pkey_index = qplib_qp->pkey_index;
1861 qp_attr->qkey = qplib_qp->qkey;
1862 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1863 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1864 qplib_qp->ah.host_sgid_index,
1865 qplib_qp->ah.hop_limit,
1866 qplib_qp->ah.traffic_class);
1867 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1868 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1869 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1870 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1871 qp_attr->timeout = qplib_qp->timeout;
1872 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1873 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1874 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1875 qp_attr->rq_psn = qplib_qp->rq.psn;
1876 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1877 qp_attr->sq_psn = qplib_qp->sq.psn;
1878 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1879 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1881 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1883 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1884 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1885 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1886 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1887 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1888 qp_init_attr->cap = qp_attr->cap;
1895 /* Routine for sending QP1 packets for RoCE V1 an V2
1897 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1898 struct ib_send_wr *wr,
1899 struct bnxt_qplib_swqe *wqe,
1902 struct ib_device *ibdev = &qp->rdev->ibdev;
1903 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1905 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1906 struct bnxt_qplib_sge sge;
1910 struct ib_gid_attr sgid_attr;
1912 bool is_eth = false;
1913 bool is_vlan = false;
1914 bool is_grh = false;
1915 bool is_udp = false;
1917 u16 vlan_id = 0xFFFF;
1921 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1923 rc = ib_get_cached_gid(ibdev, 1,
1924 qplib_ah->host_sgid_index, &sgid,
1927 dev_err(rdev_to_dev(qp->rdev),
1928 "Failed to query gid at index %d",
1929 qplib_ah->host_sgid_index);
1932 if (sgid_attr.ndev) {
1933 if (is_vlan_dev(sgid_attr.ndev))
1934 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1935 dev_put(sgid_attr.ndev);
1937 /* Get network header type for this GID */
1938 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1940 case RDMA_NETWORK_IPV4:
1941 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1943 case RDMA_NETWORK_IPV6:
1944 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1947 nw_type = BNXT_RE_ROCE_V1_PACKET;
1950 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1951 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1953 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1955 ether_type = ETH_P_IP;
1958 ether_type = ETH_P_IPV6;
1962 ether_type = ETH_P_IBOE;
1967 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1969 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1970 ip_version, is_udp, 0, &qp->qp1_hdr);
1973 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1974 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1976 /* For vlan, check the sgid for vlan existence */
1979 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1981 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1982 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1985 if (is_grh || (ip_version == 6)) {
1986 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1987 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1989 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1992 if (ip_version == 4) {
1993 qp->qp1_hdr.ip4.tos = 0;
1994 qp->qp1_hdr.ip4.id = 0;
1995 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1996 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1998 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1999 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2000 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2004 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2005 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2006 qp->qp1_hdr.udp.csum = 0;
2010 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2011 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2012 qp->qp1_hdr.immediate_present = 1;
2014 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2016 if (wr->send_flags & IB_SEND_SOLICITED)
2017 qp->qp1_hdr.bth.solicited_event = 1;
2019 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2021 /* P_key for QP1 is for all members */
2022 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2023 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2024 qp->qp1_hdr.bth.ack_req = 0;
2026 qp->send_psn &= BTH_PSN_MASK;
2027 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2029 /* Use the priviledged Q_Key for QP1 */
2030 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2031 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2033 /* Pack the QP1 to the transmit buffer */
2034 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2036 ib_ud_header_pack(&qp->qp1_hdr, buf);
2037 for (i = wqe->num_sge; i; i--) {
2038 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2039 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2040 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2044 * Max Header buf size for IPV6 RoCE V2 is 86,
2045 * which is same as the QP1 SQ header buffer.
2046 * Header buf size for IPV4 RoCE V2 can be 66.
2047 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2048 * Subtract 20 bytes from QP1 SQ header buf size
2050 if (is_udp && ip_version == 4)
2053 * Max Header buf size for RoCE V1 is 78.
2054 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2055 * Subtract 8 bytes from QP1 SQ header buf size
2060 /* Subtract 4 bytes for non vlan packets */
2064 wqe->sg_list[0].addr = sge.addr;
2065 wqe->sg_list[0].lkey = sge.lkey;
2066 wqe->sg_list[0].size = sge.size;
2070 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2076 /* For the MAD layer, it only provides the recv SGE the size of
2077 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2078 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2079 * receive packet (334 bytes) with no VLAN and then copy the GRH
2080 * and the MAD datagram out to the provided SGE.
2082 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2083 struct ib_recv_wr *wr,
2084 struct bnxt_qplib_swqe *wqe,
2087 struct bnxt_qplib_sge ref, sge;
2089 struct bnxt_re_sqp_entries *sqp_entry;
2091 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2093 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2096 /* Create 1 SGE to receive the entire
2099 /* Save the reference from ULP */
2100 ref.addr = wqe->sg_list[0].addr;
2101 ref.lkey = wqe->sg_list[0].lkey;
2102 ref.size = wqe->sg_list[0].size;
2104 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2107 wqe->sg_list[0].addr = sge.addr;
2108 wqe->sg_list[0].lkey = sge.lkey;
2109 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2110 sge.size -= wqe->sg_list[0].size;
2112 sqp_entry->sge.addr = ref.addr;
2113 sqp_entry->sge.lkey = ref.lkey;
2114 sqp_entry->sge.size = ref.size;
2115 /* Store the wrid for reporting completion */
2116 sqp_entry->wrid = wqe->wr_id;
2117 /* change the wqe->wrid to table index */
2118 wqe->wr_id = rq_prod_index;
2122 static int is_ud_qp(struct bnxt_re_qp *qp)
2124 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2127 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2128 struct ib_send_wr *wr,
2129 struct bnxt_qplib_swqe *wqe)
2131 struct bnxt_re_ah *ah = NULL;
2134 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2135 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2136 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2137 wqe->send.avid = ah->qplib_ah.id;
2139 switch (wr->opcode) {
2141 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2143 case IB_WR_SEND_WITH_IMM:
2144 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2145 wqe->send.imm_data = wr->ex.imm_data;
2147 case IB_WR_SEND_WITH_INV:
2148 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2149 wqe->send.inv_key = wr->ex.invalidate_rkey;
2154 if (wr->send_flags & IB_SEND_SIGNALED)
2155 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2156 if (wr->send_flags & IB_SEND_FENCE)
2157 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2158 if (wr->send_flags & IB_SEND_SOLICITED)
2159 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2160 if (wr->send_flags & IB_SEND_INLINE)
2161 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2166 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
2167 struct bnxt_qplib_swqe *wqe)
2169 switch (wr->opcode) {
2170 case IB_WR_RDMA_WRITE:
2171 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2173 case IB_WR_RDMA_WRITE_WITH_IMM:
2174 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2175 wqe->rdma.imm_data = wr->ex.imm_data;
2177 case IB_WR_RDMA_READ:
2178 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2179 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2184 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2185 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2186 if (wr->send_flags & IB_SEND_SIGNALED)
2187 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2188 if (wr->send_flags & IB_SEND_FENCE)
2189 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2190 if (wr->send_flags & IB_SEND_SOLICITED)
2191 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2192 if (wr->send_flags & IB_SEND_INLINE)
2193 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2198 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
2199 struct bnxt_qplib_swqe *wqe)
2201 switch (wr->opcode) {
2202 case IB_WR_ATOMIC_CMP_AND_SWP:
2203 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2204 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2205 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2207 case IB_WR_ATOMIC_FETCH_AND_ADD:
2208 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2209 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2214 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2215 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2216 if (wr->send_flags & IB_SEND_SIGNALED)
2217 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2218 if (wr->send_flags & IB_SEND_FENCE)
2219 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2220 if (wr->send_flags & IB_SEND_SOLICITED)
2221 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2225 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2226 struct bnxt_qplib_swqe *wqe)
2228 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2229 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2231 /* Need unconditional fence for local invalidate
2232 * opcode to work as expected.
2234 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2236 if (wr->send_flags & IB_SEND_SIGNALED)
2237 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2238 if (wr->send_flags & IB_SEND_SOLICITED)
2239 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2244 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2245 struct bnxt_qplib_swqe *wqe)
2247 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2248 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2249 int access = wr->access;
2251 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2252 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2253 wqe->frmr.page_list = mr->pages;
2254 wqe->frmr.page_list_len = mr->npages;
2255 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2256 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2258 /* Need unconditional fence for reg_mr
2259 * opcode to function as expected.
2262 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2264 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2265 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2267 if (access & IB_ACCESS_LOCAL_WRITE)
2268 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2269 if (access & IB_ACCESS_REMOTE_READ)
2270 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2271 if (access & IB_ACCESS_REMOTE_WRITE)
2272 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2273 if (access & IB_ACCESS_REMOTE_ATOMIC)
2274 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2275 if (access & IB_ACCESS_MW_BIND)
2276 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2278 wqe->frmr.l_key = wr->key;
2279 wqe->frmr.length = wr->mr->length;
2280 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2281 wqe->frmr.va = wr->mr->iova;
2285 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2286 struct ib_send_wr *wr,
2287 struct bnxt_qplib_swqe *wqe)
2289 /* Copy the inline data to the data field */
2294 in_data = wqe->inline_data;
2295 for (i = 0; i < wr->num_sge; i++) {
2296 sge_addr = (void *)(unsigned long)
2297 wr->sg_list[i].addr;
2298 sge_len = wr->sg_list[i].length;
2300 if ((sge_len + wqe->inline_len) >
2301 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2302 dev_err(rdev_to_dev(rdev),
2303 "Inline data size requested > supported value");
2306 sge_len = wr->sg_list[i].length;
2308 memcpy(in_data, sge_addr, sge_len);
2309 in_data += wr->sg_list[i].length;
2310 wqe->inline_len += wr->sg_list[i].length;
2312 return wqe->inline_len;
2315 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2316 struct ib_send_wr *wr,
2317 struct bnxt_qplib_swqe *wqe)
2321 if (wr->send_flags & IB_SEND_INLINE)
2322 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2324 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2330 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2332 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2333 qp->ib_qp.qp_type == IB_QPT_GSI ||
2334 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2335 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2337 struct ib_qp_attr qp_attr;
2339 qp_attr_mask = IB_QP_STATE;
2340 qp_attr.qp_state = IB_QPS_RTS;
2341 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2342 qp->qplib_qp.wqe_cnt = 0;
2346 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2347 struct bnxt_re_qp *qp,
2348 struct ib_send_wr *wr)
2350 struct bnxt_qplib_swqe wqe;
2351 int rc = 0, payload_sz = 0;
2352 unsigned long flags;
2354 spin_lock_irqsave(&qp->sq_lock, flags);
2355 memset(&wqe, 0, sizeof(wqe));
2358 memset(&wqe, 0, sizeof(wqe));
2361 wqe.num_sge = wr->num_sge;
2362 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2363 dev_err(rdev_to_dev(rdev),
2364 "Limit exceeded for Send SGEs");
2369 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2370 if (payload_sz < 0) {
2374 wqe.wr_id = wr->wr_id;
2376 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2378 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2380 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2383 dev_err(rdev_to_dev(rdev),
2384 "Post send failed opcode = %#x rc = %d",
2390 bnxt_qplib_post_send_db(&qp->qplib_qp);
2391 bnxt_ud_qp_hw_stall_workaround(qp);
2392 spin_unlock_irqrestore(&qp->sq_lock, flags);
2396 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2397 struct ib_send_wr **bad_wr)
2399 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2400 struct bnxt_qplib_swqe wqe;
2401 int rc = 0, payload_sz = 0;
2402 unsigned long flags;
2404 spin_lock_irqsave(&qp->sq_lock, flags);
2407 memset(&wqe, 0, sizeof(wqe));
2410 wqe.num_sge = wr->num_sge;
2411 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2412 dev_err(rdev_to_dev(qp->rdev),
2413 "Limit exceeded for Send SGEs");
2418 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2419 if (payload_sz < 0) {
2423 wqe.wr_id = wr->wr_id;
2425 switch (wr->opcode) {
2427 case IB_WR_SEND_WITH_IMM:
2428 if (ib_qp->qp_type == IB_QPT_GSI) {
2429 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2433 wqe.rawqp1.lflags |=
2434 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2436 switch (wr->send_flags) {
2437 case IB_SEND_IP_CSUM:
2438 wqe.rawqp1.lflags |=
2439 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2444 /* Fall thru to build the wqe */
2445 case IB_WR_SEND_WITH_INV:
2446 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2448 case IB_WR_RDMA_WRITE:
2449 case IB_WR_RDMA_WRITE_WITH_IMM:
2450 case IB_WR_RDMA_READ:
2451 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2453 case IB_WR_ATOMIC_CMP_AND_SWP:
2454 case IB_WR_ATOMIC_FETCH_AND_ADD:
2455 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2457 case IB_WR_RDMA_READ_WITH_INV:
2458 dev_err(rdev_to_dev(qp->rdev),
2459 "RDMA Read with Invalidate is not supported");
2462 case IB_WR_LOCAL_INV:
2463 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2466 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2469 /* Unsupported WRs */
2470 dev_err(rdev_to_dev(qp->rdev),
2471 "WR (%#x) is not supported", wr->opcode);
2476 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2479 dev_err(rdev_to_dev(qp->rdev),
2480 "post_send failed op:%#x qps = %#x rc = %d\n",
2481 wr->opcode, qp->qplib_qp.state, rc);
2487 bnxt_qplib_post_send_db(&qp->qplib_qp);
2488 bnxt_ud_qp_hw_stall_workaround(qp);
2489 spin_unlock_irqrestore(&qp->sq_lock, flags);
2494 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2495 struct bnxt_re_qp *qp,
2496 struct ib_recv_wr *wr)
2498 struct bnxt_qplib_swqe wqe;
2501 memset(&wqe, 0, sizeof(wqe));
2504 memset(&wqe, 0, sizeof(wqe));
2507 wqe.num_sge = wr->num_sge;
2508 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2509 dev_err(rdev_to_dev(rdev),
2510 "Limit exceeded for Receive SGEs");
2514 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2515 wqe.wr_id = wr->wr_id;
2516 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2518 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2525 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2529 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2530 struct ib_recv_wr **bad_wr)
2532 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2533 struct bnxt_qplib_swqe wqe;
2534 int rc = 0, payload_sz = 0;
2535 unsigned long flags;
2538 spin_lock_irqsave(&qp->rq_lock, flags);
2541 memset(&wqe, 0, sizeof(wqe));
2544 wqe.num_sge = wr->num_sge;
2545 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2546 dev_err(rdev_to_dev(qp->rdev),
2547 "Limit exceeded for Receive SGEs");
2553 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2555 wqe.wr_id = wr->wr_id;
2556 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2558 if (ib_qp->qp_type == IB_QPT_GSI)
2559 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2562 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2568 /* Ring DB if the RQEs posted reaches a threshold value */
2569 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2570 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2578 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2580 spin_unlock_irqrestore(&qp->rq_lock, flags);
2585 /* Completion Queues */
2586 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2589 struct bnxt_re_cq *cq;
2590 struct bnxt_qplib_nq *nq;
2591 struct bnxt_re_dev *rdev;
2593 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2595 nq = cq->qplib_cq.nq;
2597 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2599 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2602 if (!IS_ERR_OR_NULL(cq->umem))
2603 ib_umem_release(cq->umem);
2605 atomic_dec(&rdev->cq_count);
2613 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2614 const struct ib_cq_init_attr *attr,
2615 struct ib_ucontext *context,
2616 struct ib_udata *udata)
2618 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2619 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2620 struct bnxt_re_cq *cq = NULL;
2622 int cqe = attr->cqe;
2623 struct bnxt_qplib_nq *nq = NULL;
2624 unsigned int nq_alloc_cnt;
2626 /* Validate CQ fields */
2627 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2628 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2629 return ERR_PTR(-EINVAL);
2631 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2633 return ERR_PTR(-ENOMEM);
2636 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2638 entries = roundup_pow_of_two(cqe + 1);
2639 if (entries > dev_attr->max_cq_wqes + 1)
2640 entries = dev_attr->max_cq_wqes + 1;
2643 struct bnxt_re_cq_req req;
2644 struct bnxt_re_ucontext *uctx = container_of
2646 struct bnxt_re_ucontext,
2648 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2653 cq->umem = ib_umem_get(context, req.cq_va,
2654 entries * sizeof(struct cq_base),
2655 IB_ACCESS_LOCAL_WRITE, 1);
2656 if (IS_ERR(cq->umem)) {
2657 rc = PTR_ERR(cq->umem);
2660 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2661 cq->qplib_cq.nmap = cq->umem->nmap;
2662 cq->qplib_cq.dpi = &uctx->dpi;
2664 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2665 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2672 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2673 cq->qplib_cq.sghead = NULL;
2674 cq->qplib_cq.nmap = 0;
2677 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2678 * used for getting the NQ index.
2680 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2681 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2682 cq->qplib_cq.max_wqe = entries;
2683 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2684 cq->qplib_cq.nq = nq;
2686 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2688 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2692 cq->ib_cq.cqe = entries;
2693 cq->cq_period = cq->qplib_cq.period;
2696 atomic_inc(&rdev->cq_count);
2699 struct bnxt_re_cq_resp resp;
2701 resp.cqid = cq->qplib_cq.id;
2702 resp.tail = cq->qplib_cq.hwq.cons;
2703 resp.phase = cq->qplib_cq.period;
2705 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2707 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2708 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2717 ib_umem_release(cq->umem);
2724 static u8 __req_to_ib_wc_status(u8 qstatus)
2727 case CQ_REQ_STATUS_OK:
2728 return IB_WC_SUCCESS;
2729 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2730 return IB_WC_BAD_RESP_ERR;
2731 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2732 return IB_WC_LOC_LEN_ERR;
2733 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2734 return IB_WC_LOC_QP_OP_ERR;
2735 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2736 return IB_WC_LOC_PROT_ERR;
2737 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2738 return IB_WC_GENERAL_ERR;
2739 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2740 return IB_WC_REM_INV_REQ_ERR;
2741 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2742 return IB_WC_REM_ACCESS_ERR;
2743 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2744 return IB_WC_REM_OP_ERR;
2745 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2746 return IB_WC_RNR_RETRY_EXC_ERR;
2747 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2748 return IB_WC_RETRY_EXC_ERR;
2749 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2750 return IB_WC_WR_FLUSH_ERR;
2752 return IB_WC_GENERAL_ERR;
2757 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2760 case CQ_RES_RAWETH_QP1_STATUS_OK:
2761 return IB_WC_SUCCESS;
2762 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2763 return IB_WC_LOC_ACCESS_ERR;
2764 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2765 return IB_WC_LOC_LEN_ERR;
2766 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2767 return IB_WC_LOC_PROT_ERR;
2768 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2769 return IB_WC_LOC_QP_OP_ERR;
2770 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2771 return IB_WC_GENERAL_ERR;
2772 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2773 return IB_WC_WR_FLUSH_ERR;
2774 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2775 return IB_WC_WR_FLUSH_ERR;
2777 return IB_WC_GENERAL_ERR;
2781 static u8 __rc_to_ib_wc_status(u8 qstatus)
2784 case CQ_RES_RC_STATUS_OK:
2785 return IB_WC_SUCCESS;
2786 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2787 return IB_WC_LOC_ACCESS_ERR;
2788 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2789 return IB_WC_LOC_LEN_ERR;
2790 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2791 return IB_WC_LOC_PROT_ERR;
2792 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2793 return IB_WC_LOC_QP_OP_ERR;
2794 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2795 return IB_WC_GENERAL_ERR;
2796 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2797 return IB_WC_REM_INV_REQ_ERR;
2798 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2799 return IB_WC_WR_FLUSH_ERR;
2800 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2801 return IB_WC_WR_FLUSH_ERR;
2803 return IB_WC_GENERAL_ERR;
2807 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2809 switch (cqe->type) {
2810 case BNXT_QPLIB_SWQE_TYPE_SEND:
2811 wc->opcode = IB_WC_SEND;
2813 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2814 wc->opcode = IB_WC_SEND;
2815 wc->wc_flags |= IB_WC_WITH_IMM;
2817 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2818 wc->opcode = IB_WC_SEND;
2819 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2821 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2822 wc->opcode = IB_WC_RDMA_WRITE;
2824 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2825 wc->opcode = IB_WC_RDMA_WRITE;
2826 wc->wc_flags |= IB_WC_WITH_IMM;
2828 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2829 wc->opcode = IB_WC_RDMA_READ;
2831 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2832 wc->opcode = IB_WC_COMP_SWAP;
2834 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2835 wc->opcode = IB_WC_FETCH_ADD;
2837 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2838 wc->opcode = IB_WC_LOCAL_INV;
2840 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2841 wc->opcode = IB_WC_REG_MR;
2844 wc->opcode = IB_WC_SEND;
2848 wc->status = __req_to_ib_wc_status(cqe->status);
2851 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2852 u16 raweth_qp1_flags2)
2854 bool is_ipv6 = false, is_ipv4 = false;
2856 /* raweth_qp1_flags Bit 9-6 indicates itype */
2857 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2858 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2861 if (raweth_qp1_flags2 &
2862 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2864 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2865 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2866 (raweth_qp1_flags2 &
2867 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2868 (is_ipv6 = true) : (is_ipv4 = true);
2870 BNXT_RE_ROCEV2_IPV6_PACKET :
2871 BNXT_RE_ROCEV2_IPV4_PACKET);
2873 return BNXT_RE_ROCE_V1_PACKET;
2877 static int bnxt_re_to_ib_nw_type(int nw_type)
2879 u8 nw_hdr_type = 0xFF;
2882 case BNXT_RE_ROCE_V1_PACKET:
2883 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2885 case BNXT_RE_ROCEV2_IPV4_PACKET:
2886 nw_hdr_type = RDMA_NETWORK_IPV4;
2888 case BNXT_RE_ROCEV2_IPV6_PACKET:
2889 nw_hdr_type = RDMA_NETWORK_IPV6;
2895 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2899 struct ethhdr *eth_hdr;
2903 tmp_buf = (u8 *)rq_hdr_buf;
2905 * If dest mac is not same as I/F mac, this could be a
2906 * loopback address or multicast address, check whether
2907 * it is a loopback packet
2909 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2911 /* Check the ether type */
2912 eth_hdr = (struct ethhdr *)tmp_buf;
2913 eth_type = ntohs(eth_hdr->h_proto);
2921 struct udphdr *udp_hdr;
2923 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2924 sizeof(struct ipv6hdr));
2925 tmp_buf += sizeof(struct ethhdr) + len;
2926 udp_hdr = (struct udphdr *)tmp_buf;
2927 if (ntohs(udp_hdr->dest) ==
2940 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2941 struct bnxt_qplib_cqe *cqe)
2943 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2944 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2945 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2946 struct ib_send_wr *swr;
2947 struct ib_ud_wr udwr;
2948 struct ib_recv_wr rwr;
2952 dma_addr_t rq_hdr_buf_map;
2953 dma_addr_t shrq_hdr_buf_map;
2956 struct ib_sge s_sge[2];
2957 struct ib_sge r_sge[2];
2960 memset(&udwr, 0, sizeof(udwr));
2961 memset(&rwr, 0, sizeof(rwr));
2962 memset(&s_sge, 0, sizeof(s_sge));
2963 memset(&r_sge, 0, sizeof(r_sge));
2966 tbl_idx = cqe->wr_id;
2968 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2969 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2970 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2973 /* Shadow QP header buffer */
2974 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2976 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2978 /* Store this cqe */
2979 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2980 sqp_entry->qp1_qp = qp1_qp;
2982 /* Find packet type from the cqe */
2984 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2985 cqe->raweth_qp1_flags2);
2987 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2991 /* Adjust the offset for the user buffer and post in the rq */
2993 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2997 * QP1 loopback packet has 4 bytes of internal header before
2998 * ether header. Skip these four bytes.
3000 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3003 /* First send SGE . Skip the ether header*/
3004 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3006 s_sge[0].lkey = 0xFFFFFFFF;
3007 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3008 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3010 /* Second Send SGE */
3011 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3012 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3013 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3015 s_sge[1].lkey = 0xFFFFFFFF;
3016 s_sge[1].length = 256;
3018 /* First recv SGE */
3020 r_sge[0].addr = shrq_hdr_buf_map;
3021 r_sge[0].lkey = 0xFFFFFFFF;
3022 r_sge[0].length = 40;
3024 r_sge[1].addr = sqp_entry->sge.addr + offset;
3025 r_sge[1].lkey = sqp_entry->sge.lkey;
3026 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3028 /* Create receive work request */
3030 rwr.sg_list = r_sge;
3031 rwr.wr_id = tbl_idx;
3034 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
3036 dev_err(rdev_to_dev(rdev),
3037 "Failed to post Rx buffers to shadow QP");
3042 swr->sg_list = s_sge;
3043 swr->wr_id = tbl_idx;
3044 swr->opcode = IB_WR_SEND;
3047 udwr.ah = &rdev->sqp_ah->ib_ah;
3048 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3049 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3051 /* post data received in the send queue */
3052 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3057 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3058 struct bnxt_qplib_cqe *cqe)
3060 wc->opcode = IB_WC_RECV;
3061 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3062 wc->wc_flags |= IB_WC_GRH;
3065 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3072 metadata = orig_cqe->raweth_qp1_metadata;
3073 if (orig_cqe->raweth_qp1_flags2 &
3074 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3076 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3077 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3078 if (tpid == ETH_P_8021Q) {
3080 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3082 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3083 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3091 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3092 struct bnxt_qplib_cqe *cqe)
3094 wc->opcode = IB_WC_RECV;
3095 wc->status = __rc_to_ib_wc_status(cqe->status);
3097 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3098 wc->wc_flags |= IB_WC_WITH_IMM;
3099 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3100 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3101 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3102 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3103 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3106 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3108 struct bnxt_qplib_cqe *cqe)
3110 struct bnxt_re_dev *rdev = qp->rdev;
3111 struct bnxt_re_qp *qp1_qp = NULL;
3112 struct bnxt_qplib_cqe *orig_cqe = NULL;
3113 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3119 tbl_idx = cqe->wr_id;
3121 sqp_entry = &rdev->sqp_tbl[tbl_idx];
3122 qp1_qp = sqp_entry->qp1_qp;
3123 orig_cqe = &sqp_entry->cqe;
3125 wc->wr_id = sqp_entry->wrid;
3126 wc->byte_len = orig_cqe->length;
3127 wc->qp = &qp1_qp->ib_qp;
3129 wc->ex.imm_data = orig_cqe->immdata;
3130 wc->src_qp = orig_cqe->src_qp;
3131 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3132 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3133 wc->vlan_id = vlan_id;
3135 wc->wc_flags |= IB_WC_WITH_VLAN;
3138 wc->vendor_err = orig_cqe->status;
3140 wc->opcode = IB_WC_RECV;
3141 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3142 wc->wc_flags |= IB_WC_GRH;
3144 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3145 orig_cqe->raweth_qp1_flags2);
3147 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3148 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3152 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3153 struct bnxt_qplib_cqe *cqe)
3155 wc->opcode = IB_WC_RECV;
3156 wc->status = __rc_to_ib_wc_status(cqe->status);
3158 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3159 wc->wc_flags |= IB_WC_WITH_IMM;
3160 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3161 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3162 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3163 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3164 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3167 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3169 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3170 unsigned long flags;
3173 spin_lock_irqsave(&qp->sq_lock, flags);
3175 rc = bnxt_re_bind_fence_mw(lib_qp);
3177 lib_qp->sq.phantom_wqe_cnt++;
3178 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3179 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3180 lib_qp->id, lib_qp->sq.hwq.prod,
3181 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3182 lib_qp->sq.phantom_wqe_cnt);
3185 spin_unlock_irqrestore(&qp->sq_lock, flags);
3189 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3191 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3192 struct bnxt_re_qp *qp;
3193 struct bnxt_qplib_cqe *cqe;
3194 int i, ncqe, budget;
3195 struct bnxt_qplib_q *sq;
3196 struct bnxt_qplib_qp *lib_qp;
3198 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3199 unsigned long flags;
3201 spin_lock_irqsave(&cq->cq_lock, flags);
3202 budget = min_t(u32, num_entries, cq->max_cql);
3203 num_entries = budget;
3205 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3211 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3214 if (sq->send_phantom) {
3215 qp = container_of(lib_qp,
3216 struct bnxt_re_qp, qplib_qp);
3217 if (send_phantom_wqe(qp) == -ENOMEM)
3218 dev_err(rdev_to_dev(cq->rdev),
3219 "Phantom failed! Scheduled to send again\n");
3221 sq->send_phantom = false;
3225 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3232 for (i = 0; i < ncqe; i++, cqe++) {
3233 /* Transcribe each qplib_wqe back to ib_wc */
3234 memset(wc, 0, sizeof(*wc));
3236 wc->wr_id = cqe->wr_id;
3237 wc->byte_len = cqe->length;
3239 ((struct bnxt_qplib_qp *)
3240 (unsigned long)(cqe->qp_handle),
3241 struct bnxt_re_qp, qplib_qp);
3243 dev_err(rdev_to_dev(cq->rdev),
3244 "POLL CQ : bad QP handle");
3247 wc->qp = &qp->ib_qp;
3248 wc->ex.imm_data = cqe->immdata;
3249 wc->src_qp = cqe->src_qp;
3250 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3252 wc->vendor_err = cqe->status;
3254 switch (cqe->opcode) {
3255 case CQ_BASE_CQE_TYPE_REQ:
3256 if (qp->qplib_qp.id ==
3257 qp->rdev->qp1_sqp->qplib_qp.id) {
3258 /* Handle this completion with
3259 * the stored completion
3261 memset(wc, 0, sizeof(*wc));
3264 bnxt_re_process_req_wc(wc, cqe);
3266 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3270 rc = bnxt_re_process_raw_qp_pkt_rx
3273 memset(wc, 0, sizeof(*wc));
3278 /* Errors need not be looped back.
3279 * But change the wr_id to the one
3280 * stored in the table
3282 tbl_idx = cqe->wr_id;
3283 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3284 wc->wr_id = sqp_entry->wrid;
3285 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3287 case CQ_BASE_CQE_TYPE_RES_RC:
3288 bnxt_re_process_res_rc_wc(wc, cqe);
3290 case CQ_BASE_CQE_TYPE_RES_UD:
3291 if (qp->qplib_qp.id ==
3292 qp->rdev->qp1_sqp->qplib_qp.id) {
3293 /* Handle this completion with
3294 * the stored completion
3299 bnxt_re_process_res_shadow_qp_wc
3304 bnxt_re_process_res_ud_wc(wc, cqe);
3307 dev_err(rdev_to_dev(cq->rdev),
3308 "POLL CQ : type 0x%x not handled",
3317 spin_unlock_irqrestore(&cq->cq_lock, flags);
3318 return num_entries - budget;
3321 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3322 enum ib_cq_notify_flags ib_cqn_flags)
3324 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3325 int type = 0, rc = 0;
3326 unsigned long flags;
3328 spin_lock_irqsave(&cq->cq_lock, flags);
3329 /* Trigger on the very next completion */
3330 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3331 type = DBR_DBR_TYPE_CQ_ARMALL;
3332 /* Trigger on the next solicited completion */
3333 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3334 type = DBR_DBR_TYPE_CQ_ARMSE;
3336 /* Poll to see if there are missed events */
3337 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3338 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3342 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3345 spin_unlock_irqrestore(&cq->cq_lock, flags);
3349 /* Memory Regions */
3350 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3352 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3353 struct bnxt_re_dev *rdev = pd->rdev;
3354 struct bnxt_re_mr *mr;
3358 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3360 return ERR_PTR(-ENOMEM);
3363 mr->qplib_mr.pd = &pd->qplib_pd;
3364 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3365 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3367 /* Allocate and register 0 as the address */
3368 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3372 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3373 mr->qplib_mr.total_size = -1; /* Infinte length */
3374 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3379 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3380 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3381 IB_ACCESS_REMOTE_ATOMIC))
3382 mr->ib_mr.rkey = mr->ib_mr.lkey;
3383 atomic_inc(&rdev->mr_count);
3388 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3394 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3396 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3397 struct bnxt_re_dev *rdev = mr->rdev;
3400 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3402 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3405 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3411 if (!IS_ERR_OR_NULL(mr->ib_umem))
3412 ib_umem_release(mr->ib_umem);
3415 atomic_dec(&rdev->mr_count);
3419 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3421 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3423 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3426 mr->pages[mr->npages++] = addr;
3430 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3431 unsigned int *sg_offset)
3433 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3436 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3439 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3442 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3443 struct bnxt_re_dev *rdev = pd->rdev;
3444 struct bnxt_re_mr *mr = NULL;
3447 if (type != IB_MR_TYPE_MEM_REG) {
3448 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3449 return ERR_PTR(-EINVAL);
3451 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3452 return ERR_PTR(-EINVAL);
3454 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3456 return ERR_PTR(-ENOMEM);
3459 mr->qplib_mr.pd = &pd->qplib_pd;
3460 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3461 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3463 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3467 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3468 mr->ib_mr.rkey = mr->ib_mr.lkey;
3470 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3475 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3476 &mr->qplib_frpl, max_num_sg);
3478 dev_err(rdev_to_dev(rdev),
3479 "Failed to allocate HW FR page list");
3483 atomic_inc(&rdev->mr_count);
3489 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3495 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3496 struct ib_udata *udata)
3498 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3499 struct bnxt_re_dev *rdev = pd->rdev;
3500 struct bnxt_re_mw *mw;
3503 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3505 return ERR_PTR(-ENOMEM);
3507 mw->qplib_mw.pd = &pd->qplib_pd;
3509 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3510 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3511 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3512 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3514 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3517 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3519 atomic_inc(&rdev->mw_count);
3527 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3529 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3530 struct bnxt_re_dev *rdev = mw->rdev;
3533 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3535 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3540 atomic_dec(&rdev->mw_count);
3544 static int bnxt_re_page_size_ok(int page_shift)
3546 switch (page_shift) {
3547 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3548 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3549 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3550 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3551 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3552 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3553 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3554 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3561 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3564 u64 *pbl_tbl = pbl_tbl_orig;
3566 u64 page_mask = (1ULL << page_shift) - 1;
3568 struct scatterlist *sg;
3571 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3572 pages = sg_dma_len(sg) >> PAGE_SHIFT;
3573 for (i = 0; i < pages; i++) {
3574 paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3575 if (pbl_tbl == pbl_tbl_orig)
3576 *pbl_tbl++ = paddr & ~page_mask;
3577 else if ((paddr & page_mask) == 0)
3581 return pbl_tbl - pbl_tbl_orig;
3585 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3586 u64 virt_addr, int mr_access_flags,
3587 struct ib_udata *udata)
3589 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3590 struct bnxt_re_dev *rdev = pd->rdev;
3591 struct bnxt_re_mr *mr;
3592 struct ib_umem *umem;
3593 u64 *pbl_tbl = NULL;
3594 int umem_pgs, page_shift, rc;
3596 if (length > BNXT_RE_MAX_MR_SIZE) {
3597 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3598 length, BNXT_RE_MAX_MR_SIZE);
3599 return ERR_PTR(-ENOMEM);
3602 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3604 return ERR_PTR(-ENOMEM);
3607 mr->qplib_mr.pd = &pd->qplib_pd;
3608 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3609 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3611 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3613 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3616 /* The fixed portion of the rkey is the same as the lkey */
3617 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3619 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3620 mr_access_flags, 0);
3622 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3628 mr->qplib_mr.va = virt_addr;
3629 umem_pgs = ib_umem_page_count(umem);
3631 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3635 mr->qplib_mr.total_size = length;
3637 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3643 page_shift = umem->page_shift;
3645 if (!bnxt_re_page_size_ok(page_shift)) {
3646 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3651 if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3652 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3653 length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3657 if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3658 page_shift = BNXT_RE_PAGE_SHIFT_2M;
3659 dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3663 /* Map umem buf ptrs to the PBL */
3664 umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3665 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3666 umem_pgs, false, 1 << page_shift);
3668 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3674 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3675 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3676 atomic_inc(&rdev->mr_count);
3682 ib_umem_release(umem);
3684 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3690 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3691 struct ib_udata *udata)
3693 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3694 struct bnxt_re_uctx_resp resp;
3695 struct bnxt_re_ucontext *uctx;
3696 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3699 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3700 ibdev->uverbs_abi_ver);
3702 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3703 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3704 BNXT_RE_ABI_VERSION);
3705 return ERR_PTR(-EPERM);
3708 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3710 return ERR_PTR(-ENOMEM);
3714 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3719 spin_lock_init(&uctx->sh_lock);
3721 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3722 resp.max_qp = rdev->qplib_ctx.qpc_count;
3723 resp.pg_size = PAGE_SIZE;
3724 resp.cqe_sz = sizeof(struct cq_base);
3725 resp.max_cqd = dev_attr->max_cq_wqes;
3728 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3730 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3735 return &uctx->ib_uctx;
3737 free_page((unsigned long)uctx->shpg);
3744 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3746 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3747 struct bnxt_re_ucontext,
3750 struct bnxt_re_dev *rdev = uctx->rdev;
3754 free_page((unsigned long)uctx->shpg);
3756 if (uctx->dpi.dbr) {
3757 /* Free DPI only if this is the first PD allocated by the
3758 * application and mark the context dpi as NULL
3760 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3761 &rdev->qplib_res.dpi_tbl,
3764 dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3765 /* Don't fail, continue*/
3766 uctx->dpi.dbr = NULL;
3773 /* Helper function to mmap the virtual memory from user app */
3774 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3776 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3777 struct bnxt_re_ucontext,
3779 struct bnxt_re_dev *rdev = uctx->rdev;
3782 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3785 if (vma->vm_pgoff) {
3786 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3787 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3788 PAGE_SIZE, vma->vm_page_prot)) {
3789 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3793 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3794 if (remap_pfn_range(vma, vma->vm_start,
3795 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3796 dev_err(rdev_to_dev(rdev),
3797 "Failed to map shared page");