Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / drivers / infiniband / hw / bnxt_re / ib_verbs.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/uverbs_ioctl.h>
52
53 #include "bnxt_ulp.h"
54
55 #include "roce_hsi.h"
56 #include "qplib_res.h"
57 #include "qplib_sp.h"
58 #include "qplib_fp.h"
59 #include "qplib_rcfw.h"
60
61 #include "bnxt_re.h"
62 #include "ib_verbs.h"
63 #include <rdma/bnxt_re-abi.h>
64
65 static int __from_ib_access_flags(int iflags)
66 {
67         int qflags = 0;
68
69         if (iflags & IB_ACCESS_LOCAL_WRITE)
70                 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71         if (iflags & IB_ACCESS_REMOTE_READ)
72                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73         if (iflags & IB_ACCESS_REMOTE_WRITE)
74                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75         if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77         if (iflags & IB_ACCESS_MW_BIND)
78                 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79         if (iflags & IB_ZERO_BASED)
80                 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81         if (iflags & IB_ACCESS_ON_DEMAND)
82                 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83         return qflags;
84 };
85
86 static enum ib_access_flags __to_ib_access_flags(int qflags)
87 {
88         enum ib_access_flags iflags = 0;
89
90         if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91                 iflags |= IB_ACCESS_LOCAL_WRITE;
92         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93                 iflags |= IB_ACCESS_REMOTE_WRITE;
94         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95                 iflags |= IB_ACCESS_REMOTE_READ;
96         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97                 iflags |= IB_ACCESS_REMOTE_ATOMIC;
98         if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99                 iflags |= IB_ACCESS_MW_BIND;
100         if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101                 iflags |= IB_ZERO_BASED;
102         if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103                 iflags |= IB_ACCESS_ON_DEMAND;
104         return iflags;
105 };
106
107 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108                              struct bnxt_qplib_sge *sg_list, int num)
109 {
110         int i, total = 0;
111
112         for (i = 0; i < num; i++) {
113                 sg_list[i].addr = ib_sg_list[i].addr;
114                 sg_list[i].lkey = ib_sg_list[i].lkey;
115                 sg_list[i].size = ib_sg_list[i].length;
116                 total += sg_list[i].size;
117         }
118         return total;
119 }
120
121 /* Device */
122 int bnxt_re_query_device(struct ib_device *ibdev,
123                          struct ib_device_attr *ib_attr,
124                          struct ib_udata *udata)
125 {
126         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
128
129         memset(ib_attr, 0, sizeof(*ib_attr));
130         memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131                min(sizeof(dev_attr->fw_ver),
132                    sizeof(ib_attr->fw_ver)));
133         bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134                             (u8 *)&ib_attr->sys_image_guid);
135         ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136         ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
137
138         ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139         ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140         ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141         ib_attr->max_qp = dev_attr->max_qp;
142         ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143         ib_attr->device_cap_flags =
144                                     IB_DEVICE_CURR_QP_STATE_MOD
145                                     | IB_DEVICE_RC_RNR_NAK_GEN
146                                     | IB_DEVICE_SHUTDOWN_PORT
147                                     | IB_DEVICE_SYS_IMAGE_GUID
148                                     | IB_DEVICE_LOCAL_DMA_LKEY
149                                     | IB_DEVICE_RESIZE_MAX_WR
150                                     | IB_DEVICE_PORT_ACTIVE_EVENT
151                                     | IB_DEVICE_N_NOTIFY_CQ
152                                     | IB_DEVICE_MEM_WINDOW
153                                     | IB_DEVICE_MEM_WINDOW_TYPE_2B
154                                     | IB_DEVICE_MEM_MGT_EXTENSIONS;
155         ib_attr->max_send_sge = dev_attr->max_qp_sges;
156         ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157         ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158         ib_attr->max_cq = dev_attr->max_cq;
159         ib_attr->max_cqe = dev_attr->max_cq_wqes;
160         ib_attr->max_mr = dev_attr->max_mr;
161         ib_attr->max_pd = dev_attr->max_pd;
162         ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163         ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164         ib_attr->atomic_cap = IB_ATOMIC_NONE;
165         ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
166
167         ib_attr->max_ee_rd_atom = 0;
168         ib_attr->max_res_rd_atom = 0;
169         ib_attr->max_ee_init_rd_atom = 0;
170         ib_attr->max_ee = 0;
171         ib_attr->max_rdd = 0;
172         ib_attr->max_mw = dev_attr->max_mw;
173         ib_attr->max_raw_ipv6_qp = 0;
174         ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
175         ib_attr->max_mcast_grp = 0;
176         ib_attr->max_mcast_qp_attach = 0;
177         ib_attr->max_total_mcast_qp_attach = 0;
178         ib_attr->max_ah = dev_attr->max_ah;
179
180         ib_attr->max_srq = dev_attr->max_srq;
181         ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
182         ib_attr->max_srq_sge = dev_attr->max_srq_sges;
183
184         ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
185
186         ib_attr->max_pkeys = 1;
187         ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
188         return 0;
189 }
190
191 /* Port */
192 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
193                        struct ib_port_attr *port_attr)
194 {
195         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
196         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
197
198         memset(port_attr, 0, sizeof(*port_attr));
199
200         if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
201                 port_attr->state = IB_PORT_ACTIVE;
202                 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
203         } else {
204                 port_attr->state = IB_PORT_DOWN;
205                 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
206         }
207         port_attr->max_mtu = IB_MTU_4096;
208         port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
209         port_attr->gid_tbl_len = dev_attr->max_sgid;
210         port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
211                                     IB_PORT_DEVICE_MGMT_SUP |
212                                     IB_PORT_VENDOR_CLASS_SUP;
213         port_attr->ip_gids = true;
214
215         port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
216         port_attr->bad_pkey_cntr = 0;
217         port_attr->qkey_viol_cntr = 0;
218         port_attr->pkey_tbl_len = dev_attr->max_pkey;
219         port_attr->lid = 0;
220         port_attr->sm_lid = 0;
221         port_attr->lmc = 0;
222         port_attr->max_vl_num = 4;
223         port_attr->sm_sl = 0;
224         port_attr->subnet_timeout = 0;
225         port_attr->init_type_reply = 0;
226         port_attr->active_speed = rdev->active_speed;
227         port_attr->active_width = rdev->active_width;
228
229         return 0;
230 }
231
232 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
233                                struct ib_port_immutable *immutable)
234 {
235         struct ib_port_attr port_attr;
236
237         if (bnxt_re_query_port(ibdev, port_num, &port_attr))
238                 return -EINVAL;
239
240         immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
241         immutable->gid_tbl_len = port_attr.gid_tbl_len;
242         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
243         immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
244         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
245         return 0;
246 }
247
248 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
249 {
250         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
251
252         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
253                  rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
254                  rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
255 }
256
257 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
258                        u16 index, u16 *pkey)
259 {
260         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
261
262         /* Ignore port_num */
263
264         memset(pkey, 0, sizeof(*pkey));
265         return bnxt_qplib_get_pkey(&rdev->qplib_res,
266                                    &rdev->qplib_res.pkey_tbl, index, pkey);
267 }
268
269 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
270                       int index, union ib_gid *gid)
271 {
272         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
273         int rc = 0;
274
275         /* Ignore port_num */
276         memset(gid, 0, sizeof(*gid));
277         rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
278                                  &rdev->qplib_res.sgid_tbl, index,
279                                  (struct bnxt_qplib_gid *)gid);
280         return rc;
281 }
282
283 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
284 {
285         int rc = 0;
286         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
287         struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
288         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
289         struct bnxt_qplib_gid *gid_to_del;
290         u16 vlan_id = 0xFFFF;
291
292         /* Delete the entry from the hardware */
293         ctx = *context;
294         if (!ctx)
295                 return -EINVAL;
296
297         if (sgid_tbl && sgid_tbl->active) {
298                 if (ctx->idx >= sgid_tbl->max)
299                         return -EINVAL;
300                 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
301                 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
302                 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
303                  * or via the ib_unregister_device path. In the former case QP1
304                  * may not be destroyed yet, in which case just return as FW
305                  * needs that entry to be present and will fail it's deletion.
306                  * We could get invoked again after QP1 is destroyed OR get an
307                  * ADD_GID call with a different GID value for the same index
308                  * where we issue MODIFY_GID cmd to update the GID entry -- TBD
309                  */
310                 if (ctx->idx == 0 &&
311                     rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
312                     ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
313                         ibdev_dbg(&rdev->ibdev,
314                                   "Trying to delete GID0 while QP1 is alive\n");
315                         return -EFAULT;
316                 }
317                 ctx->refcnt--;
318                 if (!ctx->refcnt) {
319                         rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
320                                                  vlan_id,  true);
321                         if (rc) {
322                                 ibdev_err(&rdev->ibdev,
323                                           "Failed to remove GID: %#x", rc);
324                         } else {
325                                 ctx_tbl = sgid_tbl->ctx;
326                                 ctx_tbl[ctx->idx] = NULL;
327                                 kfree(ctx);
328                         }
329                 }
330         } else {
331                 return -EINVAL;
332         }
333         return rc;
334 }
335
336 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
337 {
338         int rc;
339         u32 tbl_idx = 0;
340         u16 vlan_id = 0xFFFF;
341         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
342         struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
343         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
344
345         rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
346         if (rc)
347                 return rc;
348
349         rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
350                                  rdev->qplib_res.netdev->dev_addr,
351                                  vlan_id, true, &tbl_idx);
352         if (rc == -EALREADY) {
353                 ctx_tbl = sgid_tbl->ctx;
354                 ctx_tbl[tbl_idx]->refcnt++;
355                 *context = ctx_tbl[tbl_idx];
356                 return 0;
357         }
358
359         if (rc < 0) {
360                 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
361                 return rc;
362         }
363
364         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
365         if (!ctx)
366                 return -ENOMEM;
367         ctx_tbl = sgid_tbl->ctx;
368         ctx->idx = tbl_idx;
369         ctx->refcnt = 1;
370         ctx_tbl[tbl_idx] = ctx;
371         *context = ctx;
372
373         return rc;
374 }
375
376 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
377                                             u32 port_num)
378 {
379         return IB_LINK_LAYER_ETHERNET;
380 }
381
382 #define BNXT_RE_FENCE_PBL_SIZE  DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
383
384 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
385 {
386         struct bnxt_re_fence_data *fence = &pd->fence;
387         struct ib_mr *ib_mr = &fence->mr->ib_mr;
388         struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
389
390         memset(wqe, 0, sizeof(*wqe));
391         wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
392         wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
393         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
394         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
395         wqe->bind.zero_based = false;
396         wqe->bind.parent_l_key = ib_mr->lkey;
397         wqe->bind.va = (u64)(unsigned long)fence->va;
398         wqe->bind.length = fence->size;
399         wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
400         wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
401
402         /* Save the initial rkey in fence structure for now;
403          * wqe->bind.r_key will be set at (re)bind time.
404          */
405         fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
406 }
407
408 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
409 {
410         struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
411                                              qplib_qp);
412         struct ib_pd *ib_pd = qp->ib_qp.pd;
413         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
414         struct bnxt_re_fence_data *fence = &pd->fence;
415         struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
416         struct bnxt_qplib_swqe wqe;
417         int rc;
418
419         memcpy(&wqe, fence_wqe, sizeof(wqe));
420         wqe.bind.r_key = fence->bind_rkey;
421         fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
422
423         ibdev_dbg(&qp->rdev->ibdev,
424                   "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
425                 wqe.bind.r_key, qp->qplib_qp.id, pd);
426         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
427         if (rc) {
428                 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
429                 return rc;
430         }
431         bnxt_qplib_post_send_db(&qp->qplib_qp);
432
433         return rc;
434 }
435
436 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
437 {
438         struct bnxt_re_fence_data *fence = &pd->fence;
439         struct bnxt_re_dev *rdev = pd->rdev;
440         struct device *dev = &rdev->en_dev->pdev->dev;
441         struct bnxt_re_mr *mr = fence->mr;
442
443         if (fence->mw) {
444                 bnxt_re_dealloc_mw(fence->mw);
445                 fence->mw = NULL;
446         }
447         if (mr) {
448                 if (mr->ib_mr.rkey)
449                         bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
450                                              true);
451                 if (mr->ib_mr.lkey)
452                         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
453                 kfree(mr);
454                 fence->mr = NULL;
455         }
456         if (fence->dma_addr) {
457                 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
458                                  DMA_BIDIRECTIONAL);
459                 fence->dma_addr = 0;
460         }
461 }
462
463 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
464 {
465         int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
466         struct bnxt_re_fence_data *fence = &pd->fence;
467         struct bnxt_re_dev *rdev = pd->rdev;
468         struct device *dev = &rdev->en_dev->pdev->dev;
469         struct bnxt_re_mr *mr = NULL;
470         dma_addr_t dma_addr = 0;
471         struct ib_mw *mw;
472         int rc;
473
474         dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
475                                   DMA_BIDIRECTIONAL);
476         rc = dma_mapping_error(dev, dma_addr);
477         if (rc) {
478                 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
479                 rc = -EIO;
480                 fence->dma_addr = 0;
481                 goto fail;
482         }
483         fence->dma_addr = dma_addr;
484
485         /* Allocate a MR */
486         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
487         if (!mr) {
488                 rc = -ENOMEM;
489                 goto fail;
490         }
491         fence->mr = mr;
492         mr->rdev = rdev;
493         mr->qplib_mr.pd = &pd->qplib_pd;
494         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
495         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
496         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
497         if (rc) {
498                 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
499                 goto fail;
500         }
501
502         /* Register MR */
503         mr->ib_mr.lkey = mr->qplib_mr.lkey;
504         mr->qplib_mr.va = (u64)(unsigned long)fence->va;
505         mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
506         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
507                                BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
508         if (rc) {
509                 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
510                 goto fail;
511         }
512         mr->ib_mr.rkey = mr->qplib_mr.rkey;
513
514         /* Create a fence MW only for kernel consumers */
515         mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
516         if (IS_ERR(mw)) {
517                 ibdev_err(&rdev->ibdev,
518                           "Failed to create fence-MW for PD: %p\n", pd);
519                 rc = PTR_ERR(mw);
520                 goto fail;
521         }
522         fence->mw = mw;
523
524         bnxt_re_create_fence_wqe(pd);
525         return 0;
526
527 fail:
528         bnxt_re_destroy_fence_mr(pd);
529         return rc;
530 }
531
532 /* Protection Domains */
533 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
534 {
535         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
536         struct bnxt_re_dev *rdev = pd->rdev;
537
538         bnxt_re_destroy_fence_mr(pd);
539
540         if (pd->qplib_pd.id)
541                 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
542                                       &pd->qplib_pd);
543         return 0;
544 }
545
546 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
547 {
548         struct ib_device *ibdev = ibpd->device;
549         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
550         struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
551                 udata, struct bnxt_re_ucontext, ib_uctx);
552         struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
553         int rc;
554
555         pd->rdev = rdev;
556         if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
557                 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
558                 rc = -ENOMEM;
559                 goto fail;
560         }
561
562         if (udata) {
563                 struct bnxt_re_pd_resp resp;
564
565                 if (!ucntx->dpi.dbr) {
566                         /* Allocate DPI in alloc_pd to avoid failing of
567                          * ibv_devinfo and family of application when DPIs
568                          * are depleted.
569                          */
570                         if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
571                                                  &ucntx->dpi, ucntx)) {
572                                 rc = -ENOMEM;
573                                 goto dbfail;
574                         }
575                 }
576
577                 resp.pdid = pd->qplib_pd.id;
578                 /* Still allow mapping this DBR to the new user PD. */
579                 resp.dpi = ucntx->dpi.dpi;
580                 resp.dbr = (u64)ucntx->dpi.umdbr;
581
582                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
583                 if (rc) {
584                         ibdev_err(&rdev->ibdev,
585                                   "Failed to copy user response\n");
586                         goto dbfail;
587                 }
588         }
589
590         if (!udata)
591                 if (bnxt_re_create_fence_mr(pd))
592                         ibdev_warn(&rdev->ibdev,
593                                    "Failed to create Fence-MR\n");
594         return 0;
595 dbfail:
596         bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
597                               &pd->qplib_pd);
598 fail:
599         return rc;
600 }
601
602 /* Address Handles */
603 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
604 {
605         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
606         struct bnxt_re_dev *rdev = ah->rdev;
607
608         bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
609                               !(flags & RDMA_DESTROY_AH_SLEEPABLE));
610         return 0;
611 }
612
613 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
614 {
615         u8 nw_type;
616
617         switch (ntype) {
618         case RDMA_NETWORK_IPV4:
619                 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
620                 break;
621         case RDMA_NETWORK_IPV6:
622                 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
623                 break;
624         default:
625                 nw_type = CMDQ_CREATE_AH_TYPE_V1;
626                 break;
627         }
628         return nw_type;
629 }
630
631 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
632                       struct ib_udata *udata)
633 {
634         struct ib_pd *ib_pd = ib_ah->pd;
635         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
636         struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
637         const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
638         struct bnxt_re_dev *rdev = pd->rdev;
639         const struct ib_gid_attr *sgid_attr;
640         struct bnxt_re_gid_ctx *ctx;
641         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
642         u8 nw_type;
643         int rc;
644
645         if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
646                 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
647                 return -EINVAL;
648         }
649
650         ah->rdev = rdev;
651         ah->qplib_ah.pd = &pd->qplib_pd;
652
653         /* Supply the configuration for the HW */
654         memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
655                sizeof(union ib_gid));
656         sgid_attr = grh->sgid_attr;
657         /* Get the HW context of the GID. The reference
658          * of GID table entry is already taken by the caller.
659          */
660         ctx = rdma_read_gid_hw_context(sgid_attr);
661         ah->qplib_ah.sgid_index = ctx->idx;
662         ah->qplib_ah.host_sgid_index = grh->sgid_index;
663         ah->qplib_ah.traffic_class = grh->traffic_class;
664         ah->qplib_ah.flow_label = grh->flow_label;
665         ah->qplib_ah.hop_limit = grh->hop_limit;
666         ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
667
668         /* Get network header type for this GID */
669         nw_type = rdma_gid_attr_network_type(sgid_attr);
670         ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
671
672         memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
673         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
674                                   !(init_attr->flags &
675                                     RDMA_CREATE_AH_SLEEPABLE));
676         if (rc) {
677                 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
678                 return rc;
679         }
680
681         /* Write AVID to shared page. */
682         if (udata) {
683                 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
684                         udata, struct bnxt_re_ucontext, ib_uctx);
685                 unsigned long flag;
686                 u32 *wrptr;
687
688                 spin_lock_irqsave(&uctx->sh_lock, flag);
689                 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
690                 *wrptr = ah->qplib_ah.id;
691                 wmb(); /* make sure cache is updated. */
692                 spin_unlock_irqrestore(&uctx->sh_lock, flag);
693         }
694
695         return 0;
696 }
697
698 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
699 {
700         return 0;
701 }
702
703 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
704 {
705         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
706
707         ah_attr->type = ib_ah->type;
708         rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
709         memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
710         rdma_ah_set_grh(ah_attr, NULL, 0,
711                         ah->qplib_ah.host_sgid_index,
712                         0, ah->qplib_ah.traffic_class);
713         rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
714         rdma_ah_set_port_num(ah_attr, 1);
715         rdma_ah_set_static_rate(ah_attr, 0);
716         return 0;
717 }
718
719 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
720         __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
721 {
722         unsigned long flags;
723
724         spin_lock_irqsave(&qp->scq->cq_lock, flags);
725         if (qp->rcq != qp->scq)
726                 spin_lock(&qp->rcq->cq_lock);
727         else
728                 __acquire(&qp->rcq->cq_lock);
729
730         return flags;
731 }
732
733 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
734                         unsigned long flags)
735         __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
736 {
737         if (qp->rcq != qp->scq)
738                 spin_unlock(&qp->rcq->cq_lock);
739         else
740                 __release(&qp->rcq->cq_lock);
741         spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
742 }
743
744 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
745 {
746         struct bnxt_re_qp *gsi_sqp;
747         struct bnxt_re_ah *gsi_sah;
748         struct bnxt_re_dev *rdev;
749         int rc = 0;
750
751         rdev = qp->rdev;
752         gsi_sqp = rdev->gsi_ctx.gsi_sqp;
753         gsi_sah = rdev->gsi_ctx.gsi_sah;
754
755         ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
756         bnxt_qplib_destroy_ah(&rdev->qplib_res,
757                               &gsi_sah->qplib_ah,
758                               true);
759         bnxt_qplib_clean_qp(&qp->qplib_qp);
760
761         ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
762         rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
763         if (rc) {
764                 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
765                 goto fail;
766         }
767         bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
768
769         /* remove from active qp list */
770         mutex_lock(&rdev->qp_lock);
771         list_del(&gsi_sqp->list);
772         mutex_unlock(&rdev->qp_lock);
773         atomic_dec(&rdev->qp_count);
774
775         kfree(rdev->gsi_ctx.sqp_tbl);
776         kfree(gsi_sah);
777         kfree(gsi_sqp);
778         rdev->gsi_ctx.gsi_sqp = NULL;
779         rdev->gsi_ctx.gsi_sah = NULL;
780         rdev->gsi_ctx.sqp_tbl = NULL;
781
782         return 0;
783 fail:
784         return rc;
785 }
786
787 /* Queue Pairs */
788 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
789 {
790         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
791         struct bnxt_re_dev *rdev = qp->rdev;
792         unsigned int flags;
793         int rc;
794
795         bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
796
797         rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
798         if (rc) {
799                 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
800                 return rc;
801         }
802
803         if (rdma_is_kernel_res(&qp->ib_qp.res)) {
804                 flags = bnxt_re_lock_cqs(qp);
805                 bnxt_qplib_clean_qp(&qp->qplib_qp);
806                 bnxt_re_unlock_cqs(qp, flags);
807         }
808
809         bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
810
811         if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
812                 rc = bnxt_re_destroy_gsi_sqp(qp);
813                 if (rc)
814                         goto sh_fail;
815         }
816
817         mutex_lock(&rdev->qp_lock);
818         list_del(&qp->list);
819         mutex_unlock(&rdev->qp_lock);
820         atomic_dec(&rdev->qp_count);
821
822         ib_umem_release(qp->rumem);
823         ib_umem_release(qp->sumem);
824
825         kfree(qp);
826         return 0;
827 sh_fail:
828         return rc;
829 }
830
831 static u8 __from_ib_qp_type(enum ib_qp_type type)
832 {
833         switch (type) {
834         case IB_QPT_GSI:
835                 return CMDQ_CREATE_QP1_TYPE_GSI;
836         case IB_QPT_RC:
837                 return CMDQ_CREATE_QP_TYPE_RC;
838         case IB_QPT_UD:
839                 return CMDQ_CREATE_QP_TYPE_UD;
840         default:
841                 return IB_QPT_MAX;
842         }
843 }
844
845 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
846                                    int rsge, int max)
847 {
848         if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
849                 rsge = max;
850         return bnxt_re_get_rwqe_size(rsge);
851 }
852
853 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
854 {
855         u16 wqe_size, calc_ils;
856
857         wqe_size = bnxt_re_get_swqe_size(nsge);
858         if (ilsize) {
859                 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
860                 wqe_size = max_t(u16, calc_ils, wqe_size);
861                 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
862         }
863         return wqe_size;
864 }
865
866 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
867                                    struct ib_qp_init_attr *init_attr)
868 {
869         struct bnxt_qplib_dev_attr *dev_attr;
870         struct bnxt_qplib_qp *qplqp;
871         struct bnxt_re_dev *rdev;
872         struct bnxt_qplib_q *sq;
873         int align, ilsize;
874
875         rdev = qp->rdev;
876         qplqp = &qp->qplib_qp;
877         sq = &qplqp->sq;
878         dev_attr = &rdev->dev_attr;
879
880         align = sizeof(struct sq_send_hdr);
881         ilsize = ALIGN(init_attr->cap.max_inline_data, align);
882
883         sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
884         if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
885                 return -EINVAL;
886         /* For gen p4 and gen p5 backward compatibility mode
887          * wqe size is fixed to 128 bytes
888          */
889         if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
890                         qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
891                 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
892
893         if (init_attr->cap.max_inline_data) {
894                 qplqp->max_inline_data = sq->wqe_size -
895                         sizeof(struct sq_send_hdr);
896                 init_attr->cap.max_inline_data = qplqp->max_inline_data;
897                 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
898                         sq->max_sge = qplqp->max_inline_data /
899                                 sizeof(struct sq_sge);
900         }
901
902         return 0;
903 }
904
905 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
906                                 struct bnxt_re_qp *qp, struct ib_udata *udata)
907 {
908         struct bnxt_qplib_qp *qplib_qp;
909         struct bnxt_re_ucontext *cntx;
910         struct bnxt_re_qp_req ureq;
911         int bytes = 0, psn_sz;
912         struct ib_umem *umem;
913         int psn_nume;
914
915         qplib_qp = &qp->qplib_qp;
916         cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
917                                          ib_uctx);
918         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
919                 return -EFAULT;
920
921         bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
922         /* Consider mapping PSN search memory only for RC QPs. */
923         if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
924                 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
925                                                    sizeof(struct sq_psn_search_ext) :
926                                                    sizeof(struct sq_psn_search);
927                 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
928                             qplib_qp->sq.max_wqe :
929                             ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
930                               sizeof(struct bnxt_qplib_sge));
931                 bytes += (psn_nume * psn_sz);
932         }
933
934         bytes = PAGE_ALIGN(bytes);
935         umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
936                            IB_ACCESS_LOCAL_WRITE);
937         if (IS_ERR(umem))
938                 return PTR_ERR(umem);
939
940         qp->sumem = umem;
941         qplib_qp->sq.sg_info.umem = umem;
942         qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
943         qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
944         qplib_qp->qp_handle = ureq.qp_handle;
945
946         if (!qp->qplib_qp.srq) {
947                 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
948                 bytes = PAGE_ALIGN(bytes);
949                 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
950                                    IB_ACCESS_LOCAL_WRITE);
951                 if (IS_ERR(umem))
952                         goto rqfail;
953                 qp->rumem = umem;
954                 qplib_qp->rq.sg_info.umem = umem;
955                 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
956                 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
957         }
958
959         qplib_qp->dpi = &cntx->dpi;
960         return 0;
961 rqfail:
962         ib_umem_release(qp->sumem);
963         qp->sumem = NULL;
964         memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
965
966         return PTR_ERR(umem);
967 }
968
969 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
970                                 (struct bnxt_re_pd *pd,
971                                  struct bnxt_qplib_res *qp1_res,
972                                  struct bnxt_qplib_qp *qp1_qp)
973 {
974         struct bnxt_re_dev *rdev = pd->rdev;
975         struct bnxt_re_ah *ah;
976         union ib_gid sgid;
977         int rc;
978
979         ah = kzalloc(sizeof(*ah), GFP_KERNEL);
980         if (!ah)
981                 return NULL;
982
983         ah->rdev = rdev;
984         ah->qplib_ah.pd = &pd->qplib_pd;
985
986         rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
987         if (rc)
988                 goto fail;
989
990         /* supply the dgid data same as sgid */
991         memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
992                sizeof(union ib_gid));
993         ah->qplib_ah.sgid_index = 0;
994
995         ah->qplib_ah.traffic_class = 0;
996         ah->qplib_ah.flow_label = 0;
997         ah->qplib_ah.hop_limit = 1;
998         ah->qplib_ah.sl = 0;
999         /* Have DMAC same as SMAC */
1000         ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1001
1002         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1003         if (rc) {
1004                 ibdev_err(&rdev->ibdev,
1005                           "Failed to allocate HW AH for Shadow QP");
1006                 goto fail;
1007         }
1008
1009         return ah;
1010
1011 fail:
1012         kfree(ah);
1013         return NULL;
1014 }
1015
1016 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1017                                 (struct bnxt_re_pd *pd,
1018                                  struct bnxt_qplib_res *qp1_res,
1019                                  struct bnxt_qplib_qp *qp1_qp)
1020 {
1021         struct bnxt_re_dev *rdev = pd->rdev;
1022         struct bnxt_re_qp *qp;
1023         int rc;
1024
1025         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1026         if (!qp)
1027                 return NULL;
1028
1029         qp->rdev = rdev;
1030
1031         /* Initialize the shadow QP structure from the QP1 values */
1032         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1033
1034         qp->qplib_qp.pd = &pd->qplib_pd;
1035         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1036         qp->qplib_qp.type = IB_QPT_UD;
1037
1038         qp->qplib_qp.max_inline_data = 0;
1039         qp->qplib_qp.sig_type = true;
1040
1041         /* Shadow QP SQ depth should be same as QP1 RQ depth */
1042         qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1043         qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1044         qp->qplib_qp.sq.max_sge = 2;
1045         /* Q full delta can be 1 since it is internal QP */
1046         qp->qplib_qp.sq.q_full_delta = 1;
1047         qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1048         qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1049
1050         qp->qplib_qp.scq = qp1_qp->scq;
1051         qp->qplib_qp.rcq = qp1_qp->rcq;
1052
1053         qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1054         qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1055         qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1056         /* Q full delta can be 1 since it is internal QP */
1057         qp->qplib_qp.rq.q_full_delta = 1;
1058         qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1059         qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1060
1061         qp->qplib_qp.mtu = qp1_qp->mtu;
1062
1063         qp->qplib_qp.sq_hdr_buf_size = 0;
1064         qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1065         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1066
1067         rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1068         if (rc)
1069                 goto fail;
1070
1071         spin_lock_init(&qp->sq_lock);
1072         INIT_LIST_HEAD(&qp->list);
1073         mutex_lock(&rdev->qp_lock);
1074         list_add_tail(&qp->list, &rdev->qp_list);
1075         atomic_inc(&rdev->qp_count);
1076         mutex_unlock(&rdev->qp_lock);
1077         return qp;
1078 fail:
1079         kfree(qp);
1080         return NULL;
1081 }
1082
1083 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1084                                 struct ib_qp_init_attr *init_attr)
1085 {
1086         struct bnxt_qplib_dev_attr *dev_attr;
1087         struct bnxt_qplib_qp *qplqp;
1088         struct bnxt_re_dev *rdev;
1089         struct bnxt_qplib_q *rq;
1090         int entries;
1091
1092         rdev = qp->rdev;
1093         qplqp = &qp->qplib_qp;
1094         rq = &qplqp->rq;
1095         dev_attr = &rdev->dev_attr;
1096
1097         if (init_attr->srq) {
1098                 struct bnxt_re_srq *srq;
1099
1100                 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1101                 if (!srq) {
1102                         ibdev_err(&rdev->ibdev, "SRQ not found");
1103                         return -EINVAL;
1104                 }
1105                 qplqp->srq = &srq->qplib_srq;
1106                 rq->max_wqe = 0;
1107         } else {
1108                 rq->max_sge = init_attr->cap.max_recv_sge;
1109                 if (rq->max_sge > dev_attr->max_qp_sges)
1110                         rq->max_sge = dev_attr->max_qp_sges;
1111                 init_attr->cap.max_recv_sge = rq->max_sge;
1112                 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1113                                                        dev_attr->max_qp_sges);
1114                 /* Allocate 1 more than what's provided so posting max doesn't
1115                  * mean empty.
1116                  */
1117                 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1118                 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1119                 rq->q_full_delta = 0;
1120                 rq->sg_info.pgsize = PAGE_SIZE;
1121                 rq->sg_info.pgshft = PAGE_SHIFT;
1122         }
1123
1124         return 0;
1125 }
1126
1127 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1128 {
1129         struct bnxt_qplib_dev_attr *dev_attr;
1130         struct bnxt_qplib_qp *qplqp;
1131         struct bnxt_re_dev *rdev;
1132
1133         rdev = qp->rdev;
1134         qplqp = &qp->qplib_qp;
1135         dev_attr = &rdev->dev_attr;
1136
1137         if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1138                 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1139                 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1140                         qplqp->rq.max_sge = dev_attr->max_qp_sges;
1141                 qplqp->rq.max_sge = 6;
1142         }
1143 }
1144
1145 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1146                                 struct ib_qp_init_attr *init_attr,
1147                                 struct ib_udata *udata)
1148 {
1149         struct bnxt_qplib_dev_attr *dev_attr;
1150         struct bnxt_qplib_qp *qplqp;
1151         struct bnxt_re_dev *rdev;
1152         struct bnxt_qplib_q *sq;
1153         int entries;
1154         int diff;
1155         int rc;
1156
1157         rdev = qp->rdev;
1158         qplqp = &qp->qplib_qp;
1159         sq = &qplqp->sq;
1160         dev_attr = &rdev->dev_attr;
1161
1162         sq->max_sge = init_attr->cap.max_send_sge;
1163         if (sq->max_sge > dev_attr->max_qp_sges) {
1164                 sq->max_sge = dev_attr->max_qp_sges;
1165                 init_attr->cap.max_send_sge = sq->max_sge;
1166         }
1167
1168         rc = bnxt_re_setup_swqe_size(qp, init_attr);
1169         if (rc)
1170                 return rc;
1171
1172         entries = init_attr->cap.max_send_wr;
1173         /* Allocate 128 + 1 more than what's provided */
1174         diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1175                 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1176         entries = roundup_pow_of_two(entries + diff + 1);
1177         sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1178         sq->q_full_delta = diff + 1;
1179         /*
1180          * Reserving one slot for Phantom WQE. Application can
1181          * post one extra entry in this case. But allowing this to avoid
1182          * unexpected Queue full condition
1183          */
1184         qplqp->sq.q_full_delta -= 1;
1185         qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1186         qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1187
1188         return 0;
1189 }
1190
1191 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1192                                        struct ib_qp_init_attr *init_attr)
1193 {
1194         struct bnxt_qplib_dev_attr *dev_attr;
1195         struct bnxt_qplib_qp *qplqp;
1196         struct bnxt_re_dev *rdev;
1197         int entries;
1198
1199         rdev = qp->rdev;
1200         qplqp = &qp->qplib_qp;
1201         dev_attr = &rdev->dev_attr;
1202
1203         if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1204                 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1205                 qplqp->sq.max_wqe = min_t(u32, entries,
1206                                           dev_attr->max_qp_wqes + 1);
1207                 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1208                         init_attr->cap.max_send_wr;
1209                 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1210                 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1211                         qplqp->sq.max_sge = dev_attr->max_qp_sges;
1212         }
1213 }
1214
1215 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1216                                 struct ib_qp_init_attr *init_attr)
1217 {
1218         struct bnxt_qplib_chip_ctx *chip_ctx;
1219         int qptype;
1220
1221         chip_ctx = rdev->chip_ctx;
1222
1223         qptype = __from_ib_qp_type(init_attr->qp_type);
1224         if (qptype == IB_QPT_MAX) {
1225                 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1226                 qptype = -EOPNOTSUPP;
1227                 goto out;
1228         }
1229
1230         if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1231             init_attr->qp_type == IB_QPT_GSI)
1232                 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1233 out:
1234         return qptype;
1235 }
1236
1237 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1238                                 struct ib_qp_init_attr *init_attr,
1239                                 struct ib_udata *udata)
1240 {
1241         struct bnxt_qplib_dev_attr *dev_attr;
1242         struct bnxt_qplib_qp *qplqp;
1243         struct bnxt_re_dev *rdev;
1244         struct bnxt_re_cq *cq;
1245         int rc = 0, qptype;
1246
1247         rdev = qp->rdev;
1248         qplqp = &qp->qplib_qp;
1249         dev_attr = &rdev->dev_attr;
1250
1251         /* Setup misc params */
1252         ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1253         qplqp->pd = &pd->qplib_pd;
1254         qplqp->qp_handle = (u64)qplqp;
1255         qplqp->max_inline_data = init_attr->cap.max_inline_data;
1256         qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1257                             true : false);
1258         qptype = bnxt_re_init_qp_type(rdev, init_attr);
1259         if (qptype < 0) {
1260                 rc = qptype;
1261                 goto out;
1262         }
1263         qplqp->type = (u8)qptype;
1264         qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1265
1266         if (init_attr->qp_type == IB_QPT_RC) {
1267                 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1268                 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1269         }
1270         qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1271         qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1272         if (init_attr->create_flags) {
1273                 ibdev_dbg(&rdev->ibdev,
1274                           "QP create flags 0x%x not supported",
1275                           init_attr->create_flags);
1276                 return -EOPNOTSUPP;
1277         }
1278
1279         /* Setup CQs */
1280         if (init_attr->send_cq) {
1281                 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1282                 if (!cq) {
1283                         ibdev_err(&rdev->ibdev, "Send CQ not found");
1284                         rc = -EINVAL;
1285                         goto out;
1286                 }
1287                 qplqp->scq = &cq->qplib_cq;
1288                 qp->scq = cq;
1289         }
1290
1291         if (init_attr->recv_cq) {
1292                 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1293                 if (!cq) {
1294                         ibdev_err(&rdev->ibdev, "Receive CQ not found");
1295                         rc = -EINVAL;
1296                         goto out;
1297                 }
1298                 qplqp->rcq = &cq->qplib_cq;
1299                 qp->rcq = cq;
1300         }
1301
1302         /* Setup RQ/SRQ */
1303         rc = bnxt_re_init_rq_attr(qp, init_attr);
1304         if (rc)
1305                 goto out;
1306         if (init_attr->qp_type == IB_QPT_GSI)
1307                 bnxt_re_adjust_gsi_rq_attr(qp);
1308
1309         /* Setup SQ */
1310         rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1311         if (rc)
1312                 goto out;
1313         if (init_attr->qp_type == IB_QPT_GSI)
1314                 bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1315
1316         if (udata) /* This will update DPI and qp_handle */
1317                 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1318 out:
1319         return rc;
1320 }
1321
1322 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1323                                      struct bnxt_re_pd *pd)
1324 {
1325         struct bnxt_re_sqp_entries *sqp_tbl = NULL;
1326         struct bnxt_re_dev *rdev;
1327         struct bnxt_re_qp *sqp;
1328         struct bnxt_re_ah *sah;
1329         int rc = 0;
1330
1331         rdev = qp->rdev;
1332         /* Create a shadow QP to handle the QP1 traffic */
1333         sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
1334                           GFP_KERNEL);
1335         if (!sqp_tbl)
1336                 return -ENOMEM;
1337         rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1338
1339         sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1340         if (!sqp) {
1341                 rc = -ENODEV;
1342                 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1343                 goto out;
1344         }
1345         rdev->gsi_ctx.gsi_sqp = sqp;
1346
1347         sqp->rcq = qp->rcq;
1348         sqp->scq = qp->scq;
1349         sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1350                                           &qp->qplib_qp);
1351         if (!sah) {
1352                 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1353                                       &sqp->qplib_qp);
1354                 rc = -ENODEV;
1355                 ibdev_err(&rdev->ibdev,
1356                           "Failed to create AH entry for ShadowQP");
1357                 goto out;
1358         }
1359         rdev->gsi_ctx.gsi_sah = sah;
1360
1361         return 0;
1362 out:
1363         kfree(sqp_tbl);
1364         return rc;
1365 }
1366
1367 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1368                                  struct ib_qp_init_attr *init_attr)
1369 {
1370         struct bnxt_re_dev *rdev;
1371         struct bnxt_qplib_qp *qplqp;
1372         int rc = 0;
1373
1374         rdev = qp->rdev;
1375         qplqp = &qp->qplib_qp;
1376
1377         qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1378         qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1379
1380         rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1381         if (rc) {
1382                 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1383                 goto out;
1384         }
1385
1386         rc = bnxt_re_create_shadow_gsi(qp, pd);
1387 out:
1388         return rc;
1389 }
1390
1391 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1392                                    struct ib_qp_init_attr *init_attr,
1393                                    struct bnxt_qplib_dev_attr *dev_attr)
1394 {
1395         bool rc = true;
1396
1397         if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1398             init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1399             init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1400             init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1401             init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1402                 ibdev_err(&rdev->ibdev,
1403                           "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1404                           init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1405                           init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1406                           init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1407                           init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1408                           init_attr->cap.max_inline_data,
1409                           dev_attr->max_inline_data);
1410                 rc = false;
1411         }
1412         return rc;
1413 }
1414
1415 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1416                                 struct ib_qp_init_attr *qp_init_attr,
1417                                 struct ib_udata *udata)
1418 {
1419         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1420         struct bnxt_re_dev *rdev = pd->rdev;
1421         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1422         struct bnxt_re_qp *qp;
1423         int rc;
1424
1425         rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1426         if (!rc) {
1427                 rc = -EINVAL;
1428                 goto exit;
1429         }
1430
1431         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1432         if (!qp) {
1433                 rc = -ENOMEM;
1434                 goto exit;
1435         }
1436         qp->rdev = rdev;
1437         rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1438         if (rc)
1439                 goto fail;
1440
1441         if (qp_init_attr->qp_type == IB_QPT_GSI &&
1442             !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1443                 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1444                 if (rc == -ENODEV)
1445                         goto qp_destroy;
1446                 if (rc)
1447                         goto fail;
1448         } else {
1449                 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1450                 if (rc) {
1451                         ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1452                         goto free_umem;
1453                 }
1454                 if (udata) {
1455                         struct bnxt_re_qp_resp resp;
1456
1457                         resp.qpid = qp->qplib_qp.id;
1458                         resp.rsvd = 0;
1459                         rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1460                         if (rc) {
1461                                 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1462                                 goto qp_destroy;
1463                         }
1464                 }
1465         }
1466
1467         qp->ib_qp.qp_num = qp->qplib_qp.id;
1468         if (qp_init_attr->qp_type == IB_QPT_GSI)
1469                 rdev->gsi_ctx.gsi_qp = qp;
1470         spin_lock_init(&qp->sq_lock);
1471         spin_lock_init(&qp->rq_lock);
1472         INIT_LIST_HEAD(&qp->list);
1473         mutex_lock(&rdev->qp_lock);
1474         list_add_tail(&qp->list, &rdev->qp_list);
1475         mutex_unlock(&rdev->qp_lock);
1476         atomic_inc(&rdev->qp_count);
1477
1478         return &qp->ib_qp;
1479 qp_destroy:
1480         bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1481 free_umem:
1482         ib_umem_release(qp->rumem);
1483         ib_umem_release(qp->sumem);
1484 fail:
1485         kfree(qp);
1486 exit:
1487         return ERR_PTR(rc);
1488 }
1489
1490 static u8 __from_ib_qp_state(enum ib_qp_state state)
1491 {
1492         switch (state) {
1493         case IB_QPS_RESET:
1494                 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1495         case IB_QPS_INIT:
1496                 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1497         case IB_QPS_RTR:
1498                 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1499         case IB_QPS_RTS:
1500                 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1501         case IB_QPS_SQD:
1502                 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1503         case IB_QPS_SQE:
1504                 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1505         case IB_QPS_ERR:
1506         default:
1507                 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1508         }
1509 }
1510
1511 static enum ib_qp_state __to_ib_qp_state(u8 state)
1512 {
1513         switch (state) {
1514         case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1515                 return IB_QPS_RESET;
1516         case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1517                 return IB_QPS_INIT;
1518         case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1519                 return IB_QPS_RTR;
1520         case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1521                 return IB_QPS_RTS;
1522         case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1523                 return IB_QPS_SQD;
1524         case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1525                 return IB_QPS_SQE;
1526         case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1527         default:
1528                 return IB_QPS_ERR;
1529         }
1530 }
1531
1532 static u32 __from_ib_mtu(enum ib_mtu mtu)
1533 {
1534         switch (mtu) {
1535         case IB_MTU_256:
1536                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1537         case IB_MTU_512:
1538                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1539         case IB_MTU_1024:
1540                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1541         case IB_MTU_2048:
1542                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1543         case IB_MTU_4096:
1544                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1545         default:
1546                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1547         }
1548 }
1549
1550 static enum ib_mtu __to_ib_mtu(u32 mtu)
1551 {
1552         switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1553         case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1554                 return IB_MTU_256;
1555         case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1556                 return IB_MTU_512;
1557         case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1558                 return IB_MTU_1024;
1559         case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1560                 return IB_MTU_2048;
1561         case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1562                 return IB_MTU_4096;
1563         default:
1564                 return IB_MTU_2048;
1565         }
1566 }
1567
1568 /* Shared Receive Queues */
1569 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1570 {
1571         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1572                                                ib_srq);
1573         struct bnxt_re_dev *rdev = srq->rdev;
1574         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1575         struct bnxt_qplib_nq *nq = NULL;
1576
1577         if (qplib_srq->cq)
1578                 nq = qplib_srq->cq->nq;
1579         bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1580         ib_umem_release(srq->umem);
1581         atomic_dec(&rdev->srq_count);
1582         if (nq)
1583                 nq->budget--;
1584         return 0;
1585 }
1586
1587 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1588                                  struct bnxt_re_pd *pd,
1589                                  struct bnxt_re_srq *srq,
1590                                  struct ib_udata *udata)
1591 {
1592         struct bnxt_re_srq_req ureq;
1593         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1594         struct ib_umem *umem;
1595         int bytes = 0;
1596         struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1597                 udata, struct bnxt_re_ucontext, ib_uctx);
1598
1599         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1600                 return -EFAULT;
1601
1602         bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1603         bytes = PAGE_ALIGN(bytes);
1604         umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1605                            IB_ACCESS_LOCAL_WRITE);
1606         if (IS_ERR(umem))
1607                 return PTR_ERR(umem);
1608
1609         srq->umem = umem;
1610         qplib_srq->sg_info.umem = umem;
1611         qplib_srq->sg_info.pgsize = PAGE_SIZE;
1612         qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1613         qplib_srq->srq_handle = ureq.srq_handle;
1614         qplib_srq->dpi = &cntx->dpi;
1615
1616         return 0;
1617 }
1618
1619 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1620                        struct ib_srq_init_attr *srq_init_attr,
1621                        struct ib_udata *udata)
1622 {
1623         struct bnxt_qplib_dev_attr *dev_attr;
1624         struct bnxt_qplib_nq *nq = NULL;
1625         struct bnxt_re_dev *rdev;
1626         struct bnxt_re_srq *srq;
1627         struct bnxt_re_pd *pd;
1628         struct ib_pd *ib_pd;
1629         int rc, entries;
1630
1631         ib_pd = ib_srq->pd;
1632         pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1633         rdev = pd->rdev;
1634         dev_attr = &rdev->dev_attr;
1635         srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1636
1637         if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1638                 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1639                 rc = -EINVAL;
1640                 goto exit;
1641         }
1642
1643         if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1644                 rc = -EOPNOTSUPP;
1645                 goto exit;
1646         }
1647
1648         srq->rdev = rdev;
1649         srq->qplib_srq.pd = &pd->qplib_pd;
1650         srq->qplib_srq.dpi = &rdev->dpi_privileged;
1651         /* Allocate 1 more than what's provided so posting max doesn't
1652          * mean empty
1653          */
1654         entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1655         if (entries > dev_attr->max_srq_wqes + 1)
1656                 entries = dev_attr->max_srq_wqes + 1;
1657         srq->qplib_srq.max_wqe = entries;
1658
1659         srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1660          /* 128 byte wqe size for SRQ . So use max sges */
1661         srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1662         srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1663         srq->srq_limit = srq_init_attr->attr.srq_limit;
1664         srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1665         nq = &rdev->nq[0];
1666
1667         if (udata) {
1668                 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1669                 if (rc)
1670                         goto fail;
1671         }
1672
1673         rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1674         if (rc) {
1675                 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1676                 goto fail;
1677         }
1678
1679         if (udata) {
1680                 struct bnxt_re_srq_resp resp;
1681
1682                 resp.srqid = srq->qplib_srq.id;
1683                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1684                 if (rc) {
1685                         ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1686                         bnxt_qplib_destroy_srq(&rdev->qplib_res,
1687                                                &srq->qplib_srq);
1688                         goto fail;
1689                 }
1690         }
1691         if (nq)
1692                 nq->budget++;
1693         atomic_inc(&rdev->srq_count);
1694
1695         return 0;
1696
1697 fail:
1698         ib_umem_release(srq->umem);
1699 exit:
1700         return rc;
1701 }
1702
1703 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1704                        enum ib_srq_attr_mask srq_attr_mask,
1705                        struct ib_udata *udata)
1706 {
1707         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1708                                                ib_srq);
1709         struct bnxt_re_dev *rdev = srq->rdev;
1710         int rc;
1711
1712         switch (srq_attr_mask) {
1713         case IB_SRQ_MAX_WR:
1714                 /* SRQ resize is not supported */
1715                 break;
1716         case IB_SRQ_LIMIT:
1717                 /* Change the SRQ threshold */
1718                 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1719                         return -EINVAL;
1720
1721                 srq->qplib_srq.threshold = srq_attr->srq_limit;
1722                 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1723                 if (rc) {
1724                         ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1725                         return rc;
1726                 }
1727                 /* On success, update the shadow */
1728                 srq->srq_limit = srq_attr->srq_limit;
1729                 /* No need to Build and send response back to udata */
1730                 break;
1731         default:
1732                 ibdev_err(&rdev->ibdev,
1733                           "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1734                 return -EINVAL;
1735         }
1736         return 0;
1737 }
1738
1739 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1740 {
1741         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1742                                                ib_srq);
1743         struct bnxt_re_srq tsrq;
1744         struct bnxt_re_dev *rdev = srq->rdev;
1745         int rc;
1746
1747         /* Get live SRQ attr */
1748         tsrq.qplib_srq.id = srq->qplib_srq.id;
1749         rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1750         if (rc) {
1751                 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1752                 return rc;
1753         }
1754         srq_attr->max_wr = srq->qplib_srq.max_wqe;
1755         srq_attr->max_sge = srq->qplib_srq.max_sge;
1756         srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1757
1758         return 0;
1759 }
1760
1761 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1762                           const struct ib_recv_wr **bad_wr)
1763 {
1764         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1765                                                ib_srq);
1766         struct bnxt_qplib_swqe wqe;
1767         unsigned long flags;
1768         int rc = 0;
1769
1770         spin_lock_irqsave(&srq->lock, flags);
1771         while (wr) {
1772                 /* Transcribe each ib_recv_wr to qplib_swqe */
1773                 wqe.num_sge = wr->num_sge;
1774                 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1775                 wqe.wr_id = wr->wr_id;
1776                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1777
1778                 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1779                 if (rc) {
1780                         *bad_wr = wr;
1781                         break;
1782                 }
1783                 wr = wr->next;
1784         }
1785         spin_unlock_irqrestore(&srq->lock, flags);
1786
1787         return rc;
1788 }
1789 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1790                                     struct bnxt_re_qp *qp1_qp,
1791                                     int qp_attr_mask)
1792 {
1793         struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1794         int rc = 0;
1795
1796         if (qp_attr_mask & IB_QP_STATE) {
1797                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1798                 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1799         }
1800         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1801                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1802                 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1803         }
1804
1805         if (qp_attr_mask & IB_QP_QKEY) {
1806                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1807                 /* Using a Random  QKEY */
1808                 qp->qplib_qp.qkey = 0x81818181;
1809         }
1810         if (qp_attr_mask & IB_QP_SQ_PSN) {
1811                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1812                 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1813         }
1814
1815         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1816         if (rc)
1817                 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1818         return rc;
1819 }
1820
1821 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1822                       int qp_attr_mask, struct ib_udata *udata)
1823 {
1824         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1825         struct bnxt_re_dev *rdev = qp->rdev;
1826         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1827         enum ib_qp_state curr_qp_state, new_qp_state;
1828         int rc, entries;
1829         unsigned int flags;
1830         u8 nw_type;
1831
1832         if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1833                 return -EOPNOTSUPP;
1834
1835         qp->qplib_qp.modify_flags = 0;
1836         if (qp_attr_mask & IB_QP_STATE) {
1837                 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1838                 new_qp_state = qp_attr->qp_state;
1839                 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1840                                         ib_qp->qp_type, qp_attr_mask)) {
1841                         ibdev_err(&rdev->ibdev,
1842                                   "Invalid attribute mask: %#x specified ",
1843                                   qp_attr_mask);
1844                         ibdev_err(&rdev->ibdev,
1845                                   "for qpn: %#x type: %#x",
1846                                   ib_qp->qp_num, ib_qp->qp_type);
1847                         ibdev_err(&rdev->ibdev,
1848                                   "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1849                                   curr_qp_state, new_qp_state);
1850                         return -EINVAL;
1851                 }
1852                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1853                 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1854
1855                 if (!qp->sumem &&
1856                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1857                         ibdev_dbg(&rdev->ibdev,
1858                                   "Move QP = %p to flush list\n", qp);
1859                         flags = bnxt_re_lock_cqs(qp);
1860                         bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1861                         bnxt_re_unlock_cqs(qp, flags);
1862                 }
1863                 if (!qp->sumem &&
1864                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1865                         ibdev_dbg(&rdev->ibdev,
1866                                   "Move QP = %p out of flush list\n", qp);
1867                         flags = bnxt_re_lock_cqs(qp);
1868                         bnxt_qplib_clean_qp(&qp->qplib_qp);
1869                         bnxt_re_unlock_cqs(qp, flags);
1870                 }
1871         }
1872         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1873                 qp->qplib_qp.modify_flags |=
1874                                 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1875                 qp->qplib_qp.en_sqd_async_notify = true;
1876         }
1877         if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1878                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1879                 qp->qplib_qp.access =
1880                         __from_ib_access_flags(qp_attr->qp_access_flags);
1881                 /* LOCAL_WRITE access must be set to allow RC receive */
1882                 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1883                 /* Temp: Set all params on QP as of now */
1884                 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1885                 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1886         }
1887         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1888                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1889                 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1890         }
1891         if (qp_attr_mask & IB_QP_QKEY) {
1892                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1893                 qp->qplib_qp.qkey = qp_attr->qkey;
1894         }
1895         if (qp_attr_mask & IB_QP_AV) {
1896                 const struct ib_global_route *grh =
1897                         rdma_ah_read_grh(&qp_attr->ah_attr);
1898                 const struct ib_gid_attr *sgid_attr;
1899                 struct bnxt_re_gid_ctx *ctx;
1900
1901                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1902                                      CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1903                                      CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1904                                      CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1905                                      CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1906                                      CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1907                                      CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1908                 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1909                        sizeof(qp->qplib_qp.ah.dgid.data));
1910                 qp->qplib_qp.ah.flow_label = grh->flow_label;
1911                 sgid_attr = grh->sgid_attr;
1912                 /* Get the HW context of the GID. The reference
1913                  * of GID table entry is already taken by the caller.
1914                  */
1915                 ctx = rdma_read_gid_hw_context(sgid_attr);
1916                 qp->qplib_qp.ah.sgid_index = ctx->idx;
1917                 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1918                 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1919                 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1920                 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1921                 ether_addr_copy(qp->qplib_qp.ah.dmac,
1922                                 qp_attr->ah_attr.roce.dmac);
1923
1924                 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1925                                              &qp->qplib_qp.smac[0]);
1926                 if (rc)
1927                         return rc;
1928
1929                 nw_type = rdma_gid_attr_network_type(sgid_attr);
1930                 switch (nw_type) {
1931                 case RDMA_NETWORK_IPV4:
1932                         qp->qplib_qp.nw_type =
1933                                 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1934                         break;
1935                 case RDMA_NETWORK_IPV6:
1936                         qp->qplib_qp.nw_type =
1937                                 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1938                         break;
1939                 default:
1940                         qp->qplib_qp.nw_type =
1941                                 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1942                         break;
1943                 }
1944         }
1945
1946         if (qp_attr_mask & IB_QP_PATH_MTU) {
1947                 qp->qplib_qp.modify_flags |=
1948                                 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1949                 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1950                 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1951         } else if (qp_attr->qp_state == IB_QPS_RTR) {
1952                 qp->qplib_qp.modify_flags |=
1953                         CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1954                 qp->qplib_qp.path_mtu =
1955                         __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1956                 qp->qplib_qp.mtu =
1957                         ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1958         }
1959
1960         if (qp_attr_mask & IB_QP_TIMEOUT) {
1961                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1962                 qp->qplib_qp.timeout = qp_attr->timeout;
1963         }
1964         if (qp_attr_mask & IB_QP_RETRY_CNT) {
1965                 qp->qplib_qp.modify_flags |=
1966                                 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1967                 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1968         }
1969         if (qp_attr_mask & IB_QP_RNR_RETRY) {
1970                 qp->qplib_qp.modify_flags |=
1971                                 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1972                 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1973         }
1974         if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1975                 qp->qplib_qp.modify_flags |=
1976                                 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1977                 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1978         }
1979         if (qp_attr_mask & IB_QP_RQ_PSN) {
1980                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1981                 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1982         }
1983         if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1984                 qp->qplib_qp.modify_flags |=
1985                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1986                 /* Cap the max_rd_atomic to device max */
1987                 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1988                                                    dev_attr->max_qp_rd_atom);
1989         }
1990         if (qp_attr_mask & IB_QP_SQ_PSN) {
1991                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1992                 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1993         }
1994         if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1995                 if (qp_attr->max_dest_rd_atomic >
1996                     dev_attr->max_qp_init_rd_atom) {
1997                         ibdev_err(&rdev->ibdev,
1998                                   "max_dest_rd_atomic requested%d is > dev_max%d",
1999                                   qp_attr->max_dest_rd_atomic,
2000                                   dev_attr->max_qp_init_rd_atom);
2001                         return -EINVAL;
2002                 }
2003
2004                 qp->qplib_qp.modify_flags |=
2005                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2006                 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2007         }
2008         if (qp_attr_mask & IB_QP_CAP) {
2009                 qp->qplib_qp.modify_flags |=
2010                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2011                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2012                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2013                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2014                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2015                 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2016                     (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2017                     (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2018                     (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2019                     (qp_attr->cap.max_inline_data >=
2020                                                 dev_attr->max_inline_data)) {
2021                         ibdev_err(&rdev->ibdev,
2022                                   "Create QP failed - max exceeded");
2023                         return -EINVAL;
2024                 }
2025                 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2026                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2027                                                 dev_attr->max_qp_wqes + 1);
2028                 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2029                                                 qp_attr->cap.max_send_wr;
2030                 /*
2031                  * Reserving one slot for Phantom WQE. Some application can
2032                  * post one extra entry in this case. Allowing this to avoid
2033                  * unexpected Queue full condition
2034                  */
2035                 qp->qplib_qp.sq.q_full_delta -= 1;
2036                 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2037                 if (qp->qplib_qp.rq.max_wqe) {
2038                         entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2039                         qp->qplib_qp.rq.max_wqe =
2040                                 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2041                         qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2042                                                        qp_attr->cap.max_recv_wr;
2043                         qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2044                 } else {
2045                         /* SRQ was used prior, just ignore the RQ caps */
2046                 }
2047         }
2048         if (qp_attr_mask & IB_QP_DEST_QPN) {
2049                 qp->qplib_qp.modify_flags |=
2050                                 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2051                 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2052         }
2053         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2054         if (rc) {
2055                 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2056                 return rc;
2057         }
2058         if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2059                 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2060         return rc;
2061 }
2062
2063 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2064                      int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2065 {
2066         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2067         struct bnxt_re_dev *rdev = qp->rdev;
2068         struct bnxt_qplib_qp *qplib_qp;
2069         int rc;
2070
2071         qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2072         if (!qplib_qp)
2073                 return -ENOMEM;
2074
2075         qplib_qp->id = qp->qplib_qp.id;
2076         qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2077
2078         rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2079         if (rc) {
2080                 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2081                 goto out;
2082         }
2083         qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2084         qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2085         qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2086         qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2087         qp_attr->pkey_index = qplib_qp->pkey_index;
2088         qp_attr->qkey = qplib_qp->qkey;
2089         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2090         rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2091                         qplib_qp->ah.host_sgid_index,
2092                         qplib_qp->ah.hop_limit,
2093                         qplib_qp->ah.traffic_class);
2094         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2095         rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2096         ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2097         qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2098         qp_attr->timeout = qplib_qp->timeout;
2099         qp_attr->retry_cnt = qplib_qp->retry_cnt;
2100         qp_attr->rnr_retry = qplib_qp->rnr_retry;
2101         qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2102         qp_attr->rq_psn = qplib_qp->rq.psn;
2103         qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2104         qp_attr->sq_psn = qplib_qp->sq.psn;
2105         qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2106         qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2107                                                          IB_SIGNAL_REQ_WR;
2108         qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2109
2110         qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2111         qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2112         qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2113         qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2114         qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2115         qp_init_attr->cap = qp_attr->cap;
2116
2117 out:
2118         kfree(qplib_qp);
2119         return rc;
2120 }
2121
2122 /* Routine for sending QP1 packets for RoCE V1 an V2
2123  */
2124 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2125                                      const struct ib_send_wr *wr,
2126                                      struct bnxt_qplib_swqe *wqe,
2127                                      int payload_size)
2128 {
2129         struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2130                                              ib_ah);
2131         struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2132         const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2133         struct bnxt_qplib_sge sge;
2134         u8 nw_type;
2135         u16 ether_type;
2136         union ib_gid dgid;
2137         bool is_eth = false;
2138         bool is_vlan = false;
2139         bool is_grh = false;
2140         bool is_udp = false;
2141         u8 ip_version = 0;
2142         u16 vlan_id = 0xFFFF;
2143         void *buf;
2144         int i, rc = 0;
2145
2146         memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2147
2148         rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2149         if (rc)
2150                 return rc;
2151
2152         /* Get network header type for this GID */
2153         nw_type = rdma_gid_attr_network_type(sgid_attr);
2154         switch (nw_type) {
2155         case RDMA_NETWORK_IPV4:
2156                 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2157                 break;
2158         case RDMA_NETWORK_IPV6:
2159                 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2160                 break;
2161         default:
2162                 nw_type = BNXT_RE_ROCE_V1_PACKET;
2163                 break;
2164         }
2165         memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2166         is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2167         if (is_udp) {
2168                 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2169                         ip_version = 4;
2170                         ether_type = ETH_P_IP;
2171                 } else {
2172                         ip_version = 6;
2173                         ether_type = ETH_P_IPV6;
2174                 }
2175                 is_grh = false;
2176         } else {
2177                 ether_type = ETH_P_IBOE;
2178                 is_grh = true;
2179         }
2180
2181         is_eth = true;
2182         is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
2183
2184         ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2185                           ip_version, is_udp, 0, &qp->qp1_hdr);
2186
2187         /* ETH */
2188         ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2189         ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2190
2191         /* For vlan, check the sgid for vlan existence */
2192
2193         if (!is_vlan) {
2194                 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2195         } else {
2196                 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2197                 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2198         }
2199
2200         if (is_grh || (ip_version == 6)) {
2201                 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2202                        sizeof(sgid_attr->gid));
2203                 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2204                        sizeof(sgid_attr->gid));
2205                 qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
2206         }
2207
2208         if (ip_version == 4) {
2209                 qp->qp1_hdr.ip4.tos = 0;
2210                 qp->qp1_hdr.ip4.id = 0;
2211                 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2212                 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2213
2214                 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2215                 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2216                 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2217         }
2218
2219         if (is_udp) {
2220                 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2221                 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2222                 qp->qp1_hdr.udp.csum = 0;
2223         }
2224
2225         /* BTH */
2226         if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2227                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2228                 qp->qp1_hdr.immediate_present = 1;
2229         } else {
2230                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2231         }
2232         if (wr->send_flags & IB_SEND_SOLICITED)
2233                 qp->qp1_hdr.bth.solicited_event = 1;
2234         /* pad_count */
2235         qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2236
2237         /* P_key for QP1 is for all members */
2238         qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2239         qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2240         qp->qp1_hdr.bth.ack_req = 0;
2241         qp->send_psn++;
2242         qp->send_psn &= BTH_PSN_MASK;
2243         qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2244         /* DETH */
2245         /* Use the priviledged Q_Key for QP1 */
2246         qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2247         qp->qp1_hdr.deth.source_qpn = IB_QP1;
2248
2249         /* Pack the QP1 to the transmit buffer */
2250         buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2251         if (buf) {
2252                 ib_ud_header_pack(&qp->qp1_hdr, buf);
2253                 for (i = wqe->num_sge; i; i--) {
2254                         wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2255                         wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2256                         wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2257                 }
2258
2259                 /*
2260                  * Max Header buf size for IPV6 RoCE V2 is 86,
2261                  * which is same as the QP1 SQ header buffer.
2262                  * Header buf size for IPV4 RoCE V2 can be 66.
2263                  * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2264                  * Subtract 20 bytes from QP1 SQ header buf size
2265                  */
2266                 if (is_udp && ip_version == 4)
2267                         sge.size -= 20;
2268                 /*
2269                  * Max Header buf size for RoCE V1 is 78.
2270                  * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2271                  * Subtract 8 bytes from QP1 SQ header buf size
2272                  */
2273                 if (!is_udp)
2274                         sge.size -= 8;
2275
2276                 /* Subtract 4 bytes for non vlan packets */
2277                 if (!is_vlan)
2278                         sge.size -= 4;
2279
2280                 wqe->sg_list[0].addr = sge.addr;
2281                 wqe->sg_list[0].lkey = sge.lkey;
2282                 wqe->sg_list[0].size = sge.size;
2283                 wqe->num_sge++;
2284
2285         } else {
2286                 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2287                 rc = -ENOMEM;
2288         }
2289         return rc;
2290 }
2291
2292 /* For the MAD layer, it only provides the recv SGE the size of
2293  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2294  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2295  * receive packet (334 bytes) with no VLAN and then copy the GRH
2296  * and the MAD datagram out to the provided SGE.
2297  */
2298 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2299                                             const struct ib_recv_wr *wr,
2300                                             struct bnxt_qplib_swqe *wqe,
2301                                             int payload_size)
2302 {
2303         struct bnxt_re_sqp_entries *sqp_entry;
2304         struct bnxt_qplib_sge ref, sge;
2305         struct bnxt_re_dev *rdev;
2306         u32 rq_prod_index;
2307
2308         rdev = qp->rdev;
2309
2310         rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2311
2312         if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2313                 return -ENOMEM;
2314
2315         /* Create 1 SGE to receive the entire
2316          * ethernet packet
2317          */
2318         /* Save the reference from ULP */
2319         ref.addr = wqe->sg_list[0].addr;
2320         ref.lkey = wqe->sg_list[0].lkey;
2321         ref.size = wqe->sg_list[0].size;
2322
2323         sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2324
2325         /* SGE 1 */
2326         wqe->sg_list[0].addr = sge.addr;
2327         wqe->sg_list[0].lkey = sge.lkey;
2328         wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2329         sge.size -= wqe->sg_list[0].size;
2330
2331         sqp_entry->sge.addr = ref.addr;
2332         sqp_entry->sge.lkey = ref.lkey;
2333         sqp_entry->sge.size = ref.size;
2334         /* Store the wrid for reporting completion */
2335         sqp_entry->wrid = wqe->wr_id;
2336         /* change the wqe->wrid to table index */
2337         wqe->wr_id = rq_prod_index;
2338         return 0;
2339 }
2340
2341 static int is_ud_qp(struct bnxt_re_qp *qp)
2342 {
2343         return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2344                 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2345 }
2346
2347 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2348                                   const struct ib_send_wr *wr,
2349                                   struct bnxt_qplib_swqe *wqe)
2350 {
2351         struct bnxt_re_ah *ah = NULL;
2352
2353         if (is_ud_qp(qp)) {
2354                 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2355                 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2356                 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2357                 wqe->send.avid = ah->qplib_ah.id;
2358         }
2359         switch (wr->opcode) {
2360         case IB_WR_SEND:
2361                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2362                 break;
2363         case IB_WR_SEND_WITH_IMM:
2364                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2365                 wqe->send.imm_data = wr->ex.imm_data;
2366                 break;
2367         case IB_WR_SEND_WITH_INV:
2368                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2369                 wqe->send.inv_key = wr->ex.invalidate_rkey;
2370                 break;
2371         default:
2372                 return -EINVAL;
2373         }
2374         if (wr->send_flags & IB_SEND_SIGNALED)
2375                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2376         if (wr->send_flags & IB_SEND_FENCE)
2377                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2378         if (wr->send_flags & IB_SEND_SOLICITED)
2379                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2380         if (wr->send_flags & IB_SEND_INLINE)
2381                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2382
2383         return 0;
2384 }
2385
2386 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2387                                   struct bnxt_qplib_swqe *wqe)
2388 {
2389         switch (wr->opcode) {
2390         case IB_WR_RDMA_WRITE:
2391                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2392                 break;
2393         case IB_WR_RDMA_WRITE_WITH_IMM:
2394                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2395                 wqe->rdma.imm_data = wr->ex.imm_data;
2396                 break;
2397         case IB_WR_RDMA_READ:
2398                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2399                 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2400                 break;
2401         default:
2402                 return -EINVAL;
2403         }
2404         wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2405         wqe->rdma.r_key = rdma_wr(wr)->rkey;
2406         if (wr->send_flags & IB_SEND_SIGNALED)
2407                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2408         if (wr->send_flags & IB_SEND_FENCE)
2409                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2410         if (wr->send_flags & IB_SEND_SOLICITED)
2411                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2412         if (wr->send_flags & IB_SEND_INLINE)
2413                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2414
2415         return 0;
2416 }
2417
2418 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2419                                     struct bnxt_qplib_swqe *wqe)
2420 {
2421         switch (wr->opcode) {
2422         case IB_WR_ATOMIC_CMP_AND_SWP:
2423                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2424                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2425                 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2426                 break;
2427         case IB_WR_ATOMIC_FETCH_AND_ADD:
2428                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2429                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2430                 break;
2431         default:
2432                 return -EINVAL;
2433         }
2434         wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2435         wqe->atomic.r_key = atomic_wr(wr)->rkey;
2436         if (wr->send_flags & IB_SEND_SIGNALED)
2437                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2438         if (wr->send_flags & IB_SEND_FENCE)
2439                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2440         if (wr->send_flags & IB_SEND_SOLICITED)
2441                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2442         return 0;
2443 }
2444
2445 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2446                                  struct bnxt_qplib_swqe *wqe)
2447 {
2448         wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2449         wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2450
2451         /* Need unconditional fence for local invalidate
2452          * opcode to work as expected.
2453          */
2454         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2455
2456         if (wr->send_flags & IB_SEND_SIGNALED)
2457                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2458         if (wr->send_flags & IB_SEND_SOLICITED)
2459                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2460
2461         return 0;
2462 }
2463
2464 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2465                                  struct bnxt_qplib_swqe *wqe)
2466 {
2467         struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2468         struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2469         int access = wr->access;
2470
2471         wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2472         wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2473         wqe->frmr.page_list = mr->pages;
2474         wqe->frmr.page_list_len = mr->npages;
2475         wqe->frmr.levels = qplib_frpl->hwq.level;
2476         wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2477
2478         /* Need unconditional fence for reg_mr
2479          * opcode to function as expected.
2480          */
2481
2482         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2483
2484         if (wr->wr.send_flags & IB_SEND_SIGNALED)
2485                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2486
2487         if (access & IB_ACCESS_LOCAL_WRITE)
2488                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2489         if (access & IB_ACCESS_REMOTE_READ)
2490                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2491         if (access & IB_ACCESS_REMOTE_WRITE)
2492                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2493         if (access & IB_ACCESS_REMOTE_ATOMIC)
2494                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2495         if (access & IB_ACCESS_MW_BIND)
2496                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2497
2498         wqe->frmr.l_key = wr->key;
2499         wqe->frmr.length = wr->mr->length;
2500         wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2501         wqe->frmr.va = wr->mr->iova;
2502         return 0;
2503 }
2504
2505 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2506                                     const struct ib_send_wr *wr,
2507                                     struct bnxt_qplib_swqe *wqe)
2508 {
2509         /*  Copy the inline data to the data  field */
2510         u8 *in_data;
2511         u32 i, sge_len;
2512         void *sge_addr;
2513
2514         in_data = wqe->inline_data;
2515         for (i = 0; i < wr->num_sge; i++) {
2516                 sge_addr = (void *)(unsigned long)
2517                                 wr->sg_list[i].addr;
2518                 sge_len = wr->sg_list[i].length;
2519
2520                 if ((sge_len + wqe->inline_len) >
2521                     BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2522                         ibdev_err(&rdev->ibdev,
2523                                   "Inline data size requested > supported value");
2524                         return -EINVAL;
2525                 }
2526                 sge_len = wr->sg_list[i].length;
2527
2528                 memcpy(in_data, sge_addr, sge_len);
2529                 in_data += wr->sg_list[i].length;
2530                 wqe->inline_len += wr->sg_list[i].length;
2531         }
2532         return wqe->inline_len;
2533 }
2534
2535 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2536                                    const struct ib_send_wr *wr,
2537                                    struct bnxt_qplib_swqe *wqe)
2538 {
2539         int payload_sz = 0;
2540
2541         if (wr->send_flags & IB_SEND_INLINE)
2542                 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2543         else
2544                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2545                                                wqe->num_sge);
2546
2547         return payload_sz;
2548 }
2549
2550 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2551 {
2552         if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2553              qp->ib_qp.qp_type == IB_QPT_GSI ||
2554              qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2555              qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2556                 int qp_attr_mask;
2557                 struct ib_qp_attr qp_attr;
2558
2559                 qp_attr_mask = IB_QP_STATE;
2560                 qp_attr.qp_state = IB_QPS_RTS;
2561                 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2562                 qp->qplib_qp.wqe_cnt = 0;
2563         }
2564 }
2565
2566 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2567                                        struct bnxt_re_qp *qp,
2568                                        const struct ib_send_wr *wr)
2569 {
2570         int rc = 0, payload_sz = 0;
2571         unsigned long flags;
2572
2573         spin_lock_irqsave(&qp->sq_lock, flags);
2574         while (wr) {
2575                 struct bnxt_qplib_swqe wqe = {};
2576
2577                 /* Common */
2578                 wqe.num_sge = wr->num_sge;
2579                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2580                         ibdev_err(&rdev->ibdev,
2581                                   "Limit exceeded for Send SGEs");
2582                         rc = -EINVAL;
2583                         goto bad;
2584                 }
2585
2586                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2587                 if (payload_sz < 0) {
2588                         rc = -EINVAL;
2589                         goto bad;
2590                 }
2591                 wqe.wr_id = wr->wr_id;
2592
2593                 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2594
2595                 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2596                 if (!rc)
2597                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2598 bad:
2599                 if (rc) {
2600                         ibdev_err(&rdev->ibdev,
2601                                   "Post send failed opcode = %#x rc = %d",
2602                                   wr->opcode, rc);
2603                         break;
2604                 }
2605                 wr = wr->next;
2606         }
2607         bnxt_qplib_post_send_db(&qp->qplib_qp);
2608         bnxt_ud_qp_hw_stall_workaround(qp);
2609         spin_unlock_irqrestore(&qp->sq_lock, flags);
2610         return rc;
2611 }
2612
2613 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2614                       const struct ib_send_wr **bad_wr)
2615 {
2616         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2617         struct bnxt_qplib_swqe wqe;
2618         int rc = 0, payload_sz = 0;
2619         unsigned long flags;
2620
2621         spin_lock_irqsave(&qp->sq_lock, flags);
2622         while (wr) {
2623                 /* House keeping */
2624                 memset(&wqe, 0, sizeof(wqe));
2625
2626                 /* Common */
2627                 wqe.num_sge = wr->num_sge;
2628                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2629                         ibdev_err(&qp->rdev->ibdev,
2630                                   "Limit exceeded for Send SGEs");
2631                         rc = -EINVAL;
2632                         goto bad;
2633                 }
2634
2635                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2636                 if (payload_sz < 0) {
2637                         rc = -EINVAL;
2638                         goto bad;
2639                 }
2640                 wqe.wr_id = wr->wr_id;
2641
2642                 switch (wr->opcode) {
2643                 case IB_WR_SEND:
2644                 case IB_WR_SEND_WITH_IMM:
2645                         if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2646                                 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2647                                                                payload_sz);
2648                                 if (rc)
2649                                         goto bad;
2650                                 wqe.rawqp1.lflags |=
2651                                         SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2652                         }
2653                         switch (wr->send_flags) {
2654                         case IB_SEND_IP_CSUM:
2655                                 wqe.rawqp1.lflags |=
2656                                         SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2657                                 break;
2658                         default:
2659                                 break;
2660                         }
2661                         fallthrough;
2662                 case IB_WR_SEND_WITH_INV:
2663                         rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2664                         break;
2665                 case IB_WR_RDMA_WRITE:
2666                 case IB_WR_RDMA_WRITE_WITH_IMM:
2667                 case IB_WR_RDMA_READ:
2668                         rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2669                         break;
2670                 case IB_WR_ATOMIC_CMP_AND_SWP:
2671                 case IB_WR_ATOMIC_FETCH_AND_ADD:
2672                         rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2673                         break;
2674                 case IB_WR_RDMA_READ_WITH_INV:
2675                         ibdev_err(&qp->rdev->ibdev,
2676                                   "RDMA Read with Invalidate is not supported");
2677                         rc = -EINVAL;
2678                         goto bad;
2679                 case IB_WR_LOCAL_INV:
2680                         rc = bnxt_re_build_inv_wqe(wr, &wqe);
2681                         break;
2682                 case IB_WR_REG_MR:
2683                         rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2684                         break;
2685                 default:
2686                         /* Unsupported WRs */
2687                         ibdev_err(&qp->rdev->ibdev,
2688                                   "WR (%#x) is not supported", wr->opcode);
2689                         rc = -EINVAL;
2690                         goto bad;
2691                 }
2692                 if (!rc)
2693                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2694 bad:
2695                 if (rc) {
2696                         ibdev_err(&qp->rdev->ibdev,
2697                                   "post_send failed op:%#x qps = %#x rc = %d\n",
2698                                   wr->opcode, qp->qplib_qp.state, rc);
2699                         *bad_wr = wr;
2700                         break;
2701                 }
2702                 wr = wr->next;
2703         }
2704         bnxt_qplib_post_send_db(&qp->qplib_qp);
2705         bnxt_ud_qp_hw_stall_workaround(qp);
2706         spin_unlock_irqrestore(&qp->sq_lock, flags);
2707
2708         return rc;
2709 }
2710
2711 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2712                                        struct bnxt_re_qp *qp,
2713                                        const struct ib_recv_wr *wr)
2714 {
2715         struct bnxt_qplib_swqe wqe;
2716         int rc = 0;
2717
2718         memset(&wqe, 0, sizeof(wqe));
2719         while (wr) {
2720                 /* House keeping */
2721                 memset(&wqe, 0, sizeof(wqe));
2722
2723                 /* Common */
2724                 wqe.num_sge = wr->num_sge;
2725                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2726                         ibdev_err(&rdev->ibdev,
2727                                   "Limit exceeded for Receive SGEs");
2728                         rc = -EINVAL;
2729                         break;
2730                 }
2731                 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2732                 wqe.wr_id = wr->wr_id;
2733                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2734
2735                 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2736                 if (rc)
2737                         break;
2738
2739                 wr = wr->next;
2740         }
2741         if (!rc)
2742                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2743         return rc;
2744 }
2745
2746 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2747                       const struct ib_recv_wr **bad_wr)
2748 {
2749         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2750         struct bnxt_qplib_swqe wqe;
2751         int rc = 0, payload_sz = 0;
2752         unsigned long flags;
2753         u32 count = 0;
2754
2755         spin_lock_irqsave(&qp->rq_lock, flags);
2756         while (wr) {
2757                 /* House keeping */
2758                 memset(&wqe, 0, sizeof(wqe));
2759
2760                 /* Common */
2761                 wqe.num_sge = wr->num_sge;
2762                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2763                         ibdev_err(&qp->rdev->ibdev,
2764                                   "Limit exceeded for Receive SGEs");
2765                         rc = -EINVAL;
2766                         *bad_wr = wr;
2767                         break;
2768                 }
2769
2770                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2771                                                wr->num_sge);
2772                 wqe.wr_id = wr->wr_id;
2773                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2774
2775                 if (ib_qp->qp_type == IB_QPT_GSI &&
2776                     qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2777                         rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2778                                                               payload_sz);
2779                 if (!rc)
2780                         rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2781                 if (rc) {
2782                         *bad_wr = wr;
2783                         break;
2784                 }
2785
2786                 /* Ring DB if the RQEs posted reaches a threshold value */
2787                 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2788                         bnxt_qplib_post_recv_db(&qp->qplib_qp);
2789                         count = 0;
2790                 }
2791
2792                 wr = wr->next;
2793         }
2794
2795         if (count)
2796                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2797
2798         spin_unlock_irqrestore(&qp->rq_lock, flags);
2799
2800         return rc;
2801 }
2802
2803 /* Completion Queues */
2804 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2805 {
2806         struct bnxt_re_cq *cq;
2807         struct bnxt_qplib_nq *nq;
2808         struct bnxt_re_dev *rdev;
2809
2810         cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2811         rdev = cq->rdev;
2812         nq = cq->qplib_cq.nq;
2813
2814         bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2815         ib_umem_release(cq->umem);
2816
2817         atomic_dec(&rdev->cq_count);
2818         nq->budget--;
2819         kfree(cq->cql);
2820         return 0;
2821 }
2822
2823 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2824                       struct ib_udata *udata)
2825 {
2826         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2827         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2828         struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2829         int rc, entries;
2830         int cqe = attr->cqe;
2831         struct bnxt_qplib_nq *nq = NULL;
2832         unsigned int nq_alloc_cnt;
2833
2834         if (attr->flags)
2835                 return -EOPNOTSUPP;
2836
2837         /* Validate CQ fields */
2838         if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2839                 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2840                 return -EINVAL;
2841         }
2842
2843         cq->rdev = rdev;
2844         cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2845
2846         entries = roundup_pow_of_two(cqe + 1);
2847         if (entries > dev_attr->max_cq_wqes + 1)
2848                 entries = dev_attr->max_cq_wqes + 1;
2849
2850         cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2851         cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2852         if (udata) {
2853                 struct bnxt_re_cq_req req;
2854                 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2855                         udata, struct bnxt_re_ucontext, ib_uctx);
2856                 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2857                         rc = -EFAULT;
2858                         goto fail;
2859                 }
2860
2861                 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2862                                        entries * sizeof(struct cq_base),
2863                                        IB_ACCESS_LOCAL_WRITE);
2864                 if (IS_ERR(cq->umem)) {
2865                         rc = PTR_ERR(cq->umem);
2866                         goto fail;
2867                 }
2868                 cq->qplib_cq.sg_info.umem = cq->umem;
2869                 cq->qplib_cq.dpi = &uctx->dpi;
2870         } else {
2871                 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2872                 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2873                                   GFP_KERNEL);
2874                 if (!cq->cql) {
2875                         rc = -ENOMEM;
2876                         goto fail;
2877                 }
2878
2879                 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2880         }
2881         /*
2882          * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2883          * used for getting the NQ index.
2884          */
2885         nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2886         nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2887         cq->qplib_cq.max_wqe = entries;
2888         cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2889         cq->qplib_cq.nq = nq;
2890
2891         rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2892         if (rc) {
2893                 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2894                 goto fail;
2895         }
2896
2897         cq->ib_cq.cqe = entries;
2898         cq->cq_period = cq->qplib_cq.period;
2899         nq->budget++;
2900
2901         atomic_inc(&rdev->cq_count);
2902         spin_lock_init(&cq->cq_lock);
2903
2904         if (udata) {
2905                 struct bnxt_re_cq_resp resp;
2906
2907                 resp.cqid = cq->qplib_cq.id;
2908                 resp.tail = cq->qplib_cq.hwq.cons;
2909                 resp.phase = cq->qplib_cq.period;
2910                 resp.rsvd = 0;
2911                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2912                 if (rc) {
2913                         ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
2914                         bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2915                         goto c2fail;
2916                 }
2917         }
2918
2919         return 0;
2920
2921 c2fail:
2922         ib_umem_release(cq->umem);
2923 fail:
2924         kfree(cq->cql);
2925         return rc;
2926 }
2927
2928 static u8 __req_to_ib_wc_status(u8 qstatus)
2929 {
2930         switch (qstatus) {
2931         case CQ_REQ_STATUS_OK:
2932                 return IB_WC_SUCCESS;
2933         case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2934                 return IB_WC_BAD_RESP_ERR;
2935         case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2936                 return IB_WC_LOC_LEN_ERR;
2937         case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2938                 return IB_WC_LOC_QP_OP_ERR;
2939         case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2940                 return IB_WC_LOC_PROT_ERR;
2941         case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2942                 return IB_WC_GENERAL_ERR;
2943         case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2944                 return IB_WC_REM_INV_REQ_ERR;
2945         case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2946                 return IB_WC_REM_ACCESS_ERR;
2947         case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2948                 return IB_WC_REM_OP_ERR;
2949         case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2950                 return IB_WC_RNR_RETRY_EXC_ERR;
2951         case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2952                 return IB_WC_RETRY_EXC_ERR;
2953         case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2954                 return IB_WC_WR_FLUSH_ERR;
2955         default:
2956                 return IB_WC_GENERAL_ERR;
2957         }
2958         return 0;
2959 }
2960
2961 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2962 {
2963         switch (qstatus) {
2964         case CQ_RES_RAWETH_QP1_STATUS_OK:
2965                 return IB_WC_SUCCESS;
2966         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2967                 return IB_WC_LOC_ACCESS_ERR;
2968         case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2969                 return IB_WC_LOC_LEN_ERR;
2970         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2971                 return IB_WC_LOC_PROT_ERR;
2972         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2973                 return IB_WC_LOC_QP_OP_ERR;
2974         case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2975                 return IB_WC_GENERAL_ERR;
2976         case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2977                 return IB_WC_WR_FLUSH_ERR;
2978         case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2979                 return IB_WC_WR_FLUSH_ERR;
2980         default:
2981                 return IB_WC_GENERAL_ERR;
2982         }
2983 }
2984
2985 static u8 __rc_to_ib_wc_status(u8 qstatus)
2986 {
2987         switch (qstatus) {
2988         case CQ_RES_RC_STATUS_OK:
2989                 return IB_WC_SUCCESS;
2990         case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2991                 return IB_WC_LOC_ACCESS_ERR;
2992         case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2993                 return IB_WC_LOC_LEN_ERR;
2994         case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2995                 return IB_WC_LOC_PROT_ERR;
2996         case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2997                 return IB_WC_LOC_QP_OP_ERR;
2998         case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2999                 return IB_WC_GENERAL_ERR;
3000         case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3001                 return IB_WC_REM_INV_REQ_ERR;
3002         case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3003                 return IB_WC_WR_FLUSH_ERR;
3004         case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3005                 return IB_WC_WR_FLUSH_ERR;
3006         default:
3007                 return IB_WC_GENERAL_ERR;
3008         }
3009 }
3010
3011 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3012 {
3013         switch (cqe->type) {
3014         case BNXT_QPLIB_SWQE_TYPE_SEND:
3015                 wc->opcode = IB_WC_SEND;
3016                 break;
3017         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3018                 wc->opcode = IB_WC_SEND;
3019                 wc->wc_flags |= IB_WC_WITH_IMM;
3020                 break;
3021         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3022                 wc->opcode = IB_WC_SEND;
3023                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3024                 break;
3025         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3026                 wc->opcode = IB_WC_RDMA_WRITE;
3027                 break;
3028         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3029                 wc->opcode = IB_WC_RDMA_WRITE;
3030                 wc->wc_flags |= IB_WC_WITH_IMM;
3031                 break;
3032         case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3033                 wc->opcode = IB_WC_RDMA_READ;
3034                 break;
3035         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3036                 wc->opcode = IB_WC_COMP_SWAP;
3037                 break;
3038         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3039                 wc->opcode = IB_WC_FETCH_ADD;
3040                 break;
3041         case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3042                 wc->opcode = IB_WC_LOCAL_INV;
3043                 break;
3044         case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3045                 wc->opcode = IB_WC_REG_MR;
3046                 break;
3047         default:
3048                 wc->opcode = IB_WC_SEND;
3049                 break;
3050         }
3051
3052         wc->status = __req_to_ib_wc_status(cqe->status);
3053 }
3054
3055 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3056                                      u16 raweth_qp1_flags2)
3057 {
3058         bool is_ipv6 = false, is_ipv4 = false;
3059
3060         /* raweth_qp1_flags Bit 9-6 indicates itype */
3061         if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3062             != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3063                 return -1;
3064
3065         if (raweth_qp1_flags2 &
3066             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3067             raweth_qp1_flags2 &
3068             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3069                 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3070                 (raweth_qp1_flags2 &
3071                  CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3072                         (is_ipv6 = true) : (is_ipv4 = true);
3073                 return ((is_ipv6) ?
3074                          BNXT_RE_ROCEV2_IPV6_PACKET :
3075                          BNXT_RE_ROCEV2_IPV4_PACKET);
3076         } else {
3077                 return BNXT_RE_ROCE_V1_PACKET;
3078         }
3079 }
3080
3081 static int bnxt_re_to_ib_nw_type(int nw_type)
3082 {
3083         u8 nw_hdr_type = 0xFF;
3084
3085         switch (nw_type) {
3086         case BNXT_RE_ROCE_V1_PACKET:
3087                 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3088                 break;
3089         case BNXT_RE_ROCEV2_IPV4_PACKET:
3090                 nw_hdr_type = RDMA_NETWORK_IPV4;
3091                 break;
3092         case BNXT_RE_ROCEV2_IPV6_PACKET:
3093                 nw_hdr_type = RDMA_NETWORK_IPV6;
3094                 break;
3095         }
3096         return nw_hdr_type;
3097 }
3098
3099 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3100                                        void *rq_hdr_buf)
3101 {
3102         u8 *tmp_buf = NULL;
3103         struct ethhdr *eth_hdr;
3104         u16 eth_type;
3105         bool rc = false;
3106
3107         tmp_buf = (u8 *)rq_hdr_buf;
3108         /*
3109          * If dest mac is not same as I/F mac, this could be a
3110          * loopback address or multicast address, check whether
3111          * it is a loopback packet
3112          */
3113         if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3114                 tmp_buf += 4;
3115                 /* Check the  ether type */
3116                 eth_hdr = (struct ethhdr *)tmp_buf;
3117                 eth_type = ntohs(eth_hdr->h_proto);
3118                 switch (eth_type) {
3119                 case ETH_P_IBOE:
3120                         rc = true;
3121                         break;
3122                 case ETH_P_IP:
3123                 case ETH_P_IPV6: {
3124                         u32 len;
3125                         struct udphdr *udp_hdr;
3126
3127                         len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3128                                                       sizeof(struct ipv6hdr));
3129                         tmp_buf += sizeof(struct ethhdr) + len;
3130                         udp_hdr = (struct udphdr *)tmp_buf;
3131                         if (ntohs(udp_hdr->dest) ==
3132                                     ROCE_V2_UDP_DPORT)
3133                                 rc = true;
3134                         break;
3135                         }
3136                 default:
3137                         break;
3138                 }
3139         }
3140
3141         return rc;
3142 }
3143
3144 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3145                                          struct bnxt_qplib_cqe *cqe)
3146 {
3147         struct bnxt_re_dev *rdev = gsi_qp->rdev;
3148         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3149         struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3150         struct bnxt_re_ah *gsi_sah;
3151         struct ib_send_wr *swr;
3152         struct ib_ud_wr udwr;
3153         struct ib_recv_wr rwr;
3154         int pkt_type = 0;
3155         u32 tbl_idx;
3156         void *rq_hdr_buf;
3157         dma_addr_t rq_hdr_buf_map;
3158         dma_addr_t shrq_hdr_buf_map;
3159         u32 offset = 0;
3160         u32 skip_bytes = 0;
3161         struct ib_sge s_sge[2];
3162         struct ib_sge r_sge[2];
3163         int rc;
3164
3165         memset(&udwr, 0, sizeof(udwr));
3166         memset(&rwr, 0, sizeof(rwr));
3167         memset(&s_sge, 0, sizeof(s_sge));
3168         memset(&r_sge, 0, sizeof(r_sge));
3169
3170         swr = &udwr.wr;
3171         tbl_idx = cqe->wr_id;
3172
3173         rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3174                         (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3175         rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3176                                                           tbl_idx);
3177
3178         /* Shadow QP header buffer */
3179         shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3180                                                             tbl_idx);
3181         sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3182
3183         /* Store this cqe */
3184         memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3185         sqp_entry->qp1_qp = gsi_qp;
3186
3187         /* Find packet type from the cqe */
3188
3189         pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3190                                              cqe->raweth_qp1_flags2);
3191         if (pkt_type < 0) {
3192                 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3193                 return -EINVAL;
3194         }
3195
3196         /* Adjust the offset for the user buffer and post in the rq */
3197
3198         if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3199                 offset = 20;
3200
3201         /*
3202          * QP1 loopback packet has 4 bytes of internal header before
3203          * ether header. Skip these four bytes.
3204          */
3205         if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3206                 skip_bytes = 4;
3207
3208         /* First send SGE . Skip the ether header*/
3209         s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3210                         + skip_bytes;
3211         s_sge[0].lkey = 0xFFFFFFFF;
3212         s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3213                                 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3214
3215         /* Second Send SGE */
3216         s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3217                         BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3218         if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3219                 s_sge[1].addr += 8;
3220         s_sge[1].lkey = 0xFFFFFFFF;
3221         s_sge[1].length = 256;
3222
3223         /* First recv SGE */
3224
3225         r_sge[0].addr = shrq_hdr_buf_map;
3226         r_sge[0].lkey = 0xFFFFFFFF;
3227         r_sge[0].length = 40;
3228
3229         r_sge[1].addr = sqp_entry->sge.addr + offset;
3230         r_sge[1].lkey = sqp_entry->sge.lkey;
3231         r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3232
3233         /* Create receive work request */
3234         rwr.num_sge = 2;
3235         rwr.sg_list = r_sge;
3236         rwr.wr_id = tbl_idx;
3237         rwr.next = NULL;
3238
3239         rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3240         if (rc) {
3241                 ibdev_err(&rdev->ibdev,
3242                           "Failed to post Rx buffers to shadow QP");
3243                 return -ENOMEM;
3244         }
3245
3246         swr->num_sge = 2;
3247         swr->sg_list = s_sge;
3248         swr->wr_id = tbl_idx;
3249         swr->opcode = IB_WR_SEND;
3250         swr->next = NULL;
3251         gsi_sah = rdev->gsi_ctx.gsi_sah;
3252         udwr.ah = &gsi_sah->ib_ah;
3253         udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3254         udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3255
3256         /* post data received  in the send queue */
3257         rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3258
3259         return 0;
3260 }
3261
3262 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3263                                           struct bnxt_qplib_cqe *cqe)
3264 {
3265         wc->opcode = IB_WC_RECV;
3266         wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3267         wc->wc_flags |= IB_WC_GRH;
3268 }
3269
3270 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3271                                         u16 vlan_id)
3272 {
3273         /*
3274          * Check if the vlan is configured in the host.  If not configured, it
3275          * can be a transparent VLAN. So dont report the vlan id.
3276          */
3277         if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3278                                       htons(ETH_P_8021Q), vlan_id))
3279                 return false;
3280         return true;
3281 }
3282
3283 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3284                                 u16 *vid, u8 *sl)
3285 {
3286         bool ret = false;
3287         u32 metadata;
3288         u16 tpid;
3289
3290         metadata = orig_cqe->raweth_qp1_metadata;
3291         if (orig_cqe->raweth_qp1_flags2 &
3292                 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3293                 tpid = ((metadata &
3294                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3295                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3296                 if (tpid == ETH_P_8021Q) {
3297                         *vid = metadata &
3298                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3299                         *sl = (metadata &
3300                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3301                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3302                         ret = true;
3303                 }
3304         }
3305
3306         return ret;
3307 }
3308
3309 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3310                                       struct bnxt_qplib_cqe *cqe)
3311 {
3312         wc->opcode = IB_WC_RECV;
3313         wc->status = __rc_to_ib_wc_status(cqe->status);
3314
3315         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3316                 wc->wc_flags |= IB_WC_WITH_IMM;
3317         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3318                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3319         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3320             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3321                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3322 }
3323
3324 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3325                                              struct ib_wc *wc,
3326                                              struct bnxt_qplib_cqe *cqe)
3327 {
3328         struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3329         struct bnxt_re_qp *gsi_qp = NULL;
3330         struct bnxt_qplib_cqe *orig_cqe = NULL;
3331         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3332         int nw_type;
3333         u32 tbl_idx;
3334         u16 vlan_id;
3335         u8 sl;
3336
3337         tbl_idx = cqe->wr_id;
3338
3339         sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3340         gsi_qp = sqp_entry->qp1_qp;
3341         orig_cqe = &sqp_entry->cqe;
3342
3343         wc->wr_id = sqp_entry->wrid;
3344         wc->byte_len = orig_cqe->length;
3345         wc->qp = &gsi_qp->ib_qp;
3346
3347         wc->ex.imm_data = orig_cqe->immdata;
3348         wc->src_qp = orig_cqe->src_qp;
3349         memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3350         if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3351                 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3352                         wc->vlan_id = vlan_id;
3353                         wc->sl = sl;
3354                         wc->wc_flags |= IB_WC_WITH_VLAN;
3355                 }
3356         }
3357         wc->port_num = 1;
3358         wc->vendor_err = orig_cqe->status;
3359
3360         wc->opcode = IB_WC_RECV;
3361         wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3362         wc->wc_flags |= IB_WC_GRH;
3363
3364         nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3365                                             orig_cqe->raweth_qp1_flags2);
3366         if (nw_type >= 0) {
3367                 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3368                 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3369         }
3370 }
3371
3372 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3373                                       struct ib_wc *wc,
3374                                       struct bnxt_qplib_cqe *cqe)
3375 {
3376         u8 nw_type;
3377
3378         wc->opcode = IB_WC_RECV;
3379         wc->status = __rc_to_ib_wc_status(cqe->status);
3380
3381         if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3382                 wc->wc_flags |= IB_WC_WITH_IMM;
3383         /* report only on GSI QP for Thor */
3384         if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3385                 wc->wc_flags |= IB_WC_GRH;
3386                 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3387                 wc->wc_flags |= IB_WC_WITH_SMAC;
3388                 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3389                         wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3390                         if (wc->vlan_id < 0x1000)
3391                                 wc->wc_flags |= IB_WC_WITH_VLAN;
3392                 }
3393                 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3394                            CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3395                 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3396                 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3397         }
3398
3399 }
3400
3401 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3402 {
3403         struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3404         unsigned long flags;
3405         int rc = 0;
3406
3407         spin_lock_irqsave(&qp->sq_lock, flags);
3408
3409         rc = bnxt_re_bind_fence_mw(lib_qp);
3410         if (!rc) {
3411                 lib_qp->sq.phantom_wqe_cnt++;
3412                 ibdev_dbg(&qp->rdev->ibdev,
3413                           "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3414                           lib_qp->id, lib_qp->sq.hwq.prod,
3415                           HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3416                           lib_qp->sq.phantom_wqe_cnt);
3417         }
3418
3419         spin_unlock_irqrestore(&qp->sq_lock, flags);
3420         return rc;
3421 }
3422
3423 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3424 {
3425         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3426         struct bnxt_re_qp *qp, *sh_qp;
3427         struct bnxt_qplib_cqe *cqe;
3428         int i, ncqe, budget;
3429         struct bnxt_qplib_q *sq;
3430         struct bnxt_qplib_qp *lib_qp;
3431         u32 tbl_idx;
3432         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3433         unsigned long flags;
3434
3435         spin_lock_irqsave(&cq->cq_lock, flags);
3436         budget = min_t(u32, num_entries, cq->max_cql);
3437         num_entries = budget;
3438         if (!cq->cql) {
3439                 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3440                 goto exit;
3441         }
3442         cqe = &cq->cql[0];
3443         while (budget) {
3444                 lib_qp = NULL;
3445                 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3446                 if (lib_qp) {
3447                         sq = &lib_qp->sq;
3448                         if (sq->send_phantom) {
3449                                 qp = container_of(lib_qp,
3450                                                   struct bnxt_re_qp, qplib_qp);
3451                                 if (send_phantom_wqe(qp) == -ENOMEM)
3452                                         ibdev_err(&cq->rdev->ibdev,
3453                                                   "Phantom failed! Scheduled to send again\n");
3454                                 else
3455                                         sq->send_phantom = false;
3456                         }
3457                 }
3458                 if (ncqe < budget)
3459                         ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3460                                                               cqe + ncqe,
3461                                                               budget - ncqe);
3462
3463                 if (!ncqe)
3464                         break;
3465
3466                 for (i = 0; i < ncqe; i++, cqe++) {
3467                         /* Transcribe each qplib_wqe back to ib_wc */
3468                         memset(wc, 0, sizeof(*wc));
3469
3470                         wc->wr_id = cqe->wr_id;
3471                         wc->byte_len = cqe->length;
3472                         qp = container_of
3473                                 ((struct bnxt_qplib_qp *)
3474                                  (unsigned long)(cqe->qp_handle),
3475                                  struct bnxt_re_qp, qplib_qp);
3476                         if (!qp) {
3477                                 ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle");
3478                                 continue;
3479                         }
3480                         wc->qp = &qp->ib_qp;
3481                         wc->ex.imm_data = cqe->immdata;
3482                         wc->src_qp = cqe->src_qp;
3483                         memcpy(wc->smac, cqe->smac, ETH_ALEN);
3484                         wc->port_num = 1;
3485                         wc->vendor_err = cqe->status;
3486
3487                         switch (cqe->opcode) {
3488                         case CQ_BASE_CQE_TYPE_REQ:
3489                                 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3490                                 if (sh_qp &&
3491                                     qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3492                                         /* Handle this completion with
3493                                          * the stored completion
3494                                          */
3495                                         memset(wc, 0, sizeof(*wc));
3496                                         continue;
3497                                 }
3498                                 bnxt_re_process_req_wc(wc, cqe);
3499                                 break;
3500                         case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3501                                 if (!cqe->status) {
3502                                         int rc = 0;
3503
3504                                         rc = bnxt_re_process_raw_qp_pkt_rx
3505                                                                 (qp, cqe);
3506                                         if (!rc) {
3507                                                 memset(wc, 0, sizeof(*wc));
3508                                                 continue;
3509                                         }
3510                                         cqe->status = -1;
3511                                 }
3512                                 /* Errors need not be looped back.
3513                                  * But change the wr_id to the one
3514                                  * stored in the table
3515                                  */
3516                                 tbl_idx = cqe->wr_id;
3517                                 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3518                                 wc->wr_id = sqp_entry->wrid;
3519                                 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3520                                 break;
3521                         case CQ_BASE_CQE_TYPE_RES_RC:
3522                                 bnxt_re_process_res_rc_wc(wc, cqe);
3523                                 break;
3524                         case CQ_BASE_CQE_TYPE_RES_UD:
3525                                 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3526                                 if (sh_qp &&
3527                                     qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3528                                         /* Handle this completion with
3529                                          * the stored completion
3530                                          */
3531                                         if (cqe->status) {
3532                                                 continue;
3533                                         } else {
3534                                                 bnxt_re_process_res_shadow_qp_wc
3535                                                                 (qp, wc, cqe);
3536                                                 break;
3537                                         }
3538                                 }
3539                                 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3540                                 break;
3541                         default:
3542                                 ibdev_err(&cq->rdev->ibdev,
3543                                           "POLL CQ : type 0x%x not handled",
3544                                           cqe->opcode);
3545                                 continue;
3546                         }
3547                         wc++;
3548                         budget--;
3549                 }
3550         }
3551 exit:
3552         spin_unlock_irqrestore(&cq->cq_lock, flags);
3553         return num_entries - budget;
3554 }
3555
3556 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3557                           enum ib_cq_notify_flags ib_cqn_flags)
3558 {
3559         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3560         int type = 0, rc = 0;
3561         unsigned long flags;
3562
3563         spin_lock_irqsave(&cq->cq_lock, flags);
3564         /* Trigger on the very next completion */
3565         if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3566                 type = DBC_DBC_TYPE_CQ_ARMALL;
3567         /* Trigger on the next solicited completion */
3568         else if (ib_cqn_flags & IB_CQ_SOLICITED)
3569                 type = DBC_DBC_TYPE_CQ_ARMSE;
3570
3571         /* Poll to see if there are missed events */
3572         if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3573             !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3574                 rc = 1;
3575                 goto exit;
3576         }
3577         bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3578
3579 exit:
3580         spin_unlock_irqrestore(&cq->cq_lock, flags);
3581         return rc;
3582 }
3583
3584 /* Memory Regions */
3585 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3586 {
3587         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3588         struct bnxt_re_dev *rdev = pd->rdev;
3589         struct bnxt_re_mr *mr;
3590         int rc;
3591
3592         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3593         if (!mr)
3594                 return ERR_PTR(-ENOMEM);
3595
3596         mr->rdev = rdev;
3597         mr->qplib_mr.pd = &pd->qplib_pd;
3598         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3599         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3600
3601         /* Allocate and register 0 as the address */
3602         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3603         if (rc)
3604                 goto fail;
3605
3606         mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3607         mr->qplib_mr.total_size = -1; /* Infinte length */
3608         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3609                                PAGE_SIZE);
3610         if (rc)
3611                 goto fail_mr;
3612
3613         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3614         if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3615                                IB_ACCESS_REMOTE_ATOMIC))
3616                 mr->ib_mr.rkey = mr->ib_mr.lkey;
3617         atomic_inc(&rdev->mr_count);
3618
3619         return &mr->ib_mr;
3620
3621 fail_mr:
3622         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3623 fail:
3624         kfree(mr);
3625         return ERR_PTR(rc);
3626 }
3627
3628 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3629 {
3630         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3631         struct bnxt_re_dev *rdev = mr->rdev;
3632         int rc;
3633
3634         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3635         if (rc) {
3636                 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3637                 return rc;
3638         }
3639
3640         if (mr->pages) {
3641                 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3642                                                         &mr->qplib_frpl);
3643                 kfree(mr->pages);
3644                 mr->npages = 0;
3645                 mr->pages = NULL;
3646         }
3647         ib_umem_release(mr->ib_umem);
3648
3649         kfree(mr);
3650         atomic_dec(&rdev->mr_count);
3651         return rc;
3652 }
3653
3654 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3655 {
3656         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3657
3658         if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3659                 return -ENOMEM;
3660
3661         mr->pages[mr->npages++] = addr;
3662         return 0;
3663 }
3664
3665 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3666                       unsigned int *sg_offset)
3667 {
3668         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3669
3670         mr->npages = 0;
3671         return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3672 }
3673
3674 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3675                                u32 max_num_sg)
3676 {
3677         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3678         struct bnxt_re_dev *rdev = pd->rdev;
3679         struct bnxt_re_mr *mr = NULL;
3680         int rc;
3681
3682         if (type != IB_MR_TYPE_MEM_REG) {
3683                 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3684                 return ERR_PTR(-EINVAL);
3685         }
3686         if (max_num_sg > MAX_PBL_LVL_1_PGS)
3687                 return ERR_PTR(-EINVAL);
3688
3689         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3690         if (!mr)
3691                 return ERR_PTR(-ENOMEM);
3692
3693         mr->rdev = rdev;
3694         mr->qplib_mr.pd = &pd->qplib_pd;
3695         mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3696         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3697
3698         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3699         if (rc)
3700                 goto bail;
3701
3702         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3703         mr->ib_mr.rkey = mr->ib_mr.lkey;
3704
3705         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3706         if (!mr->pages) {
3707                 rc = -ENOMEM;
3708                 goto fail;
3709         }
3710         rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3711                                                  &mr->qplib_frpl, max_num_sg);
3712         if (rc) {
3713                 ibdev_err(&rdev->ibdev,
3714                           "Failed to allocate HW FR page list");
3715                 goto fail_mr;
3716         }
3717
3718         atomic_inc(&rdev->mr_count);
3719         return &mr->ib_mr;
3720
3721 fail_mr:
3722         kfree(mr->pages);
3723 fail:
3724         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3725 bail:
3726         kfree(mr);
3727         return ERR_PTR(rc);
3728 }
3729
3730 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3731                                struct ib_udata *udata)
3732 {
3733         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3734         struct bnxt_re_dev *rdev = pd->rdev;
3735         struct bnxt_re_mw *mw;
3736         int rc;
3737
3738         mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3739         if (!mw)
3740                 return ERR_PTR(-ENOMEM);
3741         mw->rdev = rdev;
3742         mw->qplib_mw.pd = &pd->qplib_pd;
3743
3744         mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3745                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3746                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3747         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3748         if (rc) {
3749                 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3750                 goto fail;
3751         }
3752         mw->ib_mw.rkey = mw->qplib_mw.rkey;
3753
3754         atomic_inc(&rdev->mw_count);
3755         return &mw->ib_mw;
3756
3757 fail:
3758         kfree(mw);
3759         return ERR_PTR(rc);
3760 }
3761
3762 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3763 {
3764         struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3765         struct bnxt_re_dev *rdev = mw->rdev;
3766         int rc;
3767
3768         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3769         if (rc) {
3770                 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3771                 return rc;
3772         }
3773
3774         kfree(mw);
3775         atomic_dec(&rdev->mw_count);
3776         return rc;
3777 }
3778
3779 /* uverbs */
3780 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3781                                   u64 virt_addr, int mr_access_flags,
3782                                   struct ib_udata *udata)
3783 {
3784         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3785         struct bnxt_re_dev *rdev = pd->rdev;
3786         struct bnxt_re_mr *mr;
3787         struct ib_umem *umem;
3788         unsigned long page_size;
3789         int umem_pgs, rc;
3790
3791         if (length > BNXT_RE_MAX_MR_SIZE) {
3792                 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3793                           length, BNXT_RE_MAX_MR_SIZE);
3794                 return ERR_PTR(-ENOMEM);
3795         }
3796
3797         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3798         if (!mr)
3799                 return ERR_PTR(-ENOMEM);
3800
3801         mr->rdev = rdev;
3802         mr->qplib_mr.pd = &pd->qplib_pd;
3803         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3804         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3805
3806         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3807         if (rc) {
3808                 ibdev_err(&rdev->ibdev, "Failed to allocate MR");
3809                 goto free_mr;
3810         }
3811         /* The fixed portion of the rkey is the same as the lkey */
3812         mr->ib_mr.rkey = mr->qplib_mr.rkey;
3813
3814         umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
3815         if (IS_ERR(umem)) {
3816                 ibdev_err(&rdev->ibdev, "Failed to get umem");
3817                 rc = -EFAULT;
3818                 goto free_mrw;
3819         }
3820         mr->ib_umem = umem;
3821
3822         mr->qplib_mr.va = virt_addr;
3823         page_size = ib_umem_find_best_pgsz(
3824                 umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
3825         if (!page_size) {
3826                 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3827                 rc = -EFAULT;
3828                 goto free_umem;
3829         }
3830         mr->qplib_mr.total_size = length;
3831
3832         umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
3833         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
3834                                umem_pgs, page_size);
3835         if (rc) {
3836                 ibdev_err(&rdev->ibdev, "Failed to register user MR");
3837                 goto free_umem;
3838         }
3839
3840         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3841         mr->ib_mr.rkey = mr->qplib_mr.lkey;
3842         atomic_inc(&rdev->mr_count);
3843
3844         return &mr->ib_mr;
3845 free_umem:
3846         ib_umem_release(umem);
3847 free_mrw:
3848         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3849 free_mr:
3850         kfree(mr);
3851         return ERR_PTR(rc);
3852 }
3853
3854 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3855 {
3856         struct ib_device *ibdev = ctx->device;
3857         struct bnxt_re_ucontext *uctx =
3858                 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3859         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3860         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3861         struct bnxt_re_uctx_resp resp;
3862         u32 chip_met_rev_num = 0;
3863         int rc;
3864
3865         ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
3866
3867         if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3868                 ibdev_dbg(ibdev, " is different from the device %d ",
3869                           BNXT_RE_ABI_VERSION);
3870                 return -EPERM;
3871         }
3872
3873         uctx->rdev = rdev;
3874
3875         uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3876         if (!uctx->shpg) {
3877                 rc = -ENOMEM;
3878                 goto fail;
3879         }
3880         spin_lock_init(&uctx->sh_lock);
3881
3882         resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3883         chip_met_rev_num = rdev->chip_ctx->chip_num;
3884         chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3885                              BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3886         chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3887                              BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3888         resp.chip_id0 = chip_met_rev_num;
3889         /* Future extension of chip info */
3890         resp.chip_id1 = 0;
3891         /*Temp, Use xa_alloc instead */
3892         resp.dev_id = rdev->en_dev->pdev->devfn;
3893         resp.max_qp = rdev->qplib_ctx.qpc_count;
3894         resp.pg_size = PAGE_SIZE;
3895         resp.cqe_sz = sizeof(struct cq_base);
3896         resp.max_cqd = dev_attr->max_cq_wqes;
3897         resp.rsvd    = 0;
3898
3899         rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3900         if (rc) {
3901                 ibdev_err(ibdev, "Failed to copy user context");
3902                 rc = -EFAULT;
3903                 goto cfail;
3904         }
3905
3906         return 0;
3907 cfail:
3908         free_page((unsigned long)uctx->shpg);
3909         uctx->shpg = NULL;
3910 fail:
3911         return rc;
3912 }
3913
3914 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3915 {
3916         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3917                                                    struct bnxt_re_ucontext,
3918                                                    ib_uctx);
3919
3920         struct bnxt_re_dev *rdev = uctx->rdev;
3921
3922         if (uctx->shpg)
3923                 free_page((unsigned long)uctx->shpg);
3924
3925         if (uctx->dpi.dbr) {
3926                 /* Free DPI only if this is the first PD allocated by the
3927                  * application and mark the context dpi as NULL
3928                  */
3929                 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3930                                        &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3931                 uctx->dpi.dbr = NULL;
3932         }
3933 }
3934
3935 /* Helper function to mmap the virtual memory from user app */
3936 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3937 {
3938         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3939                                                    struct bnxt_re_ucontext,
3940                                                    ib_uctx);
3941         struct bnxt_re_dev *rdev = uctx->rdev;
3942         u64 pfn;
3943
3944         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3945                 return -EINVAL;
3946
3947         if (vma->vm_pgoff) {
3948                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3949                 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3950                                        PAGE_SIZE, vma->vm_page_prot)) {
3951                         ibdev_err(&rdev->ibdev, "Failed to map DPI");
3952                         return -EAGAIN;
3953                 }
3954         } else {
3955                 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3956                 if (remap_pfn_range(vma, vma->vm_start,
3957                                     pfn, PAGE_SIZE, vma->vm_page_prot)) {
3958                         ibdev_err(&rdev->ibdev, "Failed to map shared page");
3959                         return -EAGAIN;
3960                 }
3961         }
3962
3963         return 0;
3964 }