Merge tag 'acpi-part2-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / bnxt_re / ib_verbs.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51
52 #include "bnxt_ulp.h"
53
54 #include "roce_hsi.h"
55 #include "qplib_res.h"
56 #include "qplib_sp.h"
57 #include "qplib_fp.h"
58 #include "qplib_rcfw.h"
59
60 #include "bnxt_re.h"
61 #include "ib_verbs.h"
62 #include <rdma/bnxt_re-abi.h>
63
64 static int __from_ib_access_flags(int iflags)
65 {
66         int qflags = 0;
67
68         if (iflags & IB_ACCESS_LOCAL_WRITE)
69                 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70         if (iflags & IB_ACCESS_REMOTE_READ)
71                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72         if (iflags & IB_ACCESS_REMOTE_WRITE)
73                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74         if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76         if (iflags & IB_ACCESS_MW_BIND)
77                 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78         if (iflags & IB_ZERO_BASED)
79                 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80         if (iflags & IB_ACCESS_ON_DEMAND)
81                 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82         return qflags;
83 };
84
85 static enum ib_access_flags __to_ib_access_flags(int qflags)
86 {
87         enum ib_access_flags iflags = 0;
88
89         if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90                 iflags |= IB_ACCESS_LOCAL_WRITE;
91         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92                 iflags |= IB_ACCESS_REMOTE_WRITE;
93         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94                 iflags |= IB_ACCESS_REMOTE_READ;
95         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96                 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97         if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98                 iflags |= IB_ACCESS_MW_BIND;
99         if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100                 iflags |= IB_ZERO_BASED;
101         if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102                 iflags |= IB_ACCESS_ON_DEMAND;
103         return iflags;
104 };
105
106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107                              struct bnxt_qplib_sge *sg_list, int num)
108 {
109         int i, total = 0;
110
111         for (i = 0; i < num; i++) {
112                 sg_list[i].addr = ib_sg_list[i].addr;
113                 sg_list[i].lkey = ib_sg_list[i].lkey;
114                 sg_list[i].size = ib_sg_list[i].length;
115                 total += sg_list[i].size;
116         }
117         return total;
118 }
119
120 /* Device */
121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122 {
123         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124         struct net_device *netdev = NULL;
125
126         rcu_read_lock();
127         if (rdev)
128                 netdev = rdev->netdev;
129         if (netdev)
130                 dev_hold(netdev);
131
132         rcu_read_unlock();
133         return netdev;
134 }
135
136 int bnxt_re_query_device(struct ib_device *ibdev,
137                          struct ib_device_attr *ib_attr,
138                          struct ib_udata *udata)
139 {
140         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143         memset(ib_attr, 0, sizeof(*ib_attr));
144         memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145                min(sizeof(dev_attr->fw_ver),
146                    sizeof(ib_attr->fw_ver)));
147         bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148                             (u8 *)&ib_attr->sys_image_guid);
149         ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
150         ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
151
152         ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153         ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154         ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155         ib_attr->max_qp = dev_attr->max_qp;
156         ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157         ib_attr->device_cap_flags =
158                                     IB_DEVICE_CURR_QP_STATE_MOD
159                                     | IB_DEVICE_RC_RNR_NAK_GEN
160                                     | IB_DEVICE_SHUTDOWN_PORT
161                                     | IB_DEVICE_SYS_IMAGE_GUID
162                                     | IB_DEVICE_LOCAL_DMA_LKEY
163                                     | IB_DEVICE_RESIZE_MAX_WR
164                                     | IB_DEVICE_PORT_ACTIVE_EVENT
165                                     | IB_DEVICE_N_NOTIFY_CQ
166                                     | IB_DEVICE_MEM_WINDOW
167                                     | IB_DEVICE_MEM_WINDOW_TYPE_2B
168                                     | IB_DEVICE_MEM_MGT_EXTENSIONS;
169         ib_attr->max_sge = dev_attr->max_qp_sges;
170         ib_attr->max_sge_rd = dev_attr->max_qp_sges;
171         ib_attr->max_cq = dev_attr->max_cq;
172         ib_attr->max_cqe = dev_attr->max_cq_wqes;
173         ib_attr->max_mr = dev_attr->max_mr;
174         ib_attr->max_pd = dev_attr->max_pd;
175         ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
176         ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
177         if (dev_attr->is_atomic) {
178                 ib_attr->atomic_cap = IB_ATOMIC_HCA;
179                 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
180         }
181
182         ib_attr->max_ee_rd_atom = 0;
183         ib_attr->max_res_rd_atom = 0;
184         ib_attr->max_ee_init_rd_atom = 0;
185         ib_attr->max_ee = 0;
186         ib_attr->max_rdd = 0;
187         ib_attr->max_mw = dev_attr->max_mw;
188         ib_attr->max_raw_ipv6_qp = 0;
189         ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
190         ib_attr->max_mcast_grp = 0;
191         ib_attr->max_mcast_qp_attach = 0;
192         ib_attr->max_total_mcast_qp_attach = 0;
193         ib_attr->max_ah = dev_attr->max_ah;
194
195         ib_attr->max_fmr = 0;
196         ib_attr->max_map_per_fmr = 0;
197
198         ib_attr->max_srq = dev_attr->max_srq;
199         ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
200         ib_attr->max_srq_sge = dev_attr->max_srq_sges;
201
202         ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
203
204         ib_attr->max_pkeys = 1;
205         ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
206         return 0;
207 }
208
209 int bnxt_re_modify_device(struct ib_device *ibdev,
210                           int device_modify_mask,
211                           struct ib_device_modify *device_modify)
212 {
213         switch (device_modify_mask) {
214         case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
215                 /* Modify the GUID requires the modification of the GID table */
216                 /* GUID should be made as READ-ONLY */
217                 break;
218         case IB_DEVICE_MODIFY_NODE_DESC:
219                 /* Node Desc should be made as READ-ONLY */
220                 break;
221         default:
222                 break;
223         }
224         return 0;
225 }
226
227 /* Port */
228 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
229                        struct ib_port_attr *port_attr)
230 {
231         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
232         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
233
234         memset(port_attr, 0, sizeof(*port_attr));
235
236         if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
237                 port_attr->state = IB_PORT_ACTIVE;
238                 port_attr->phys_state = 5;
239         } else {
240                 port_attr->state = IB_PORT_DOWN;
241                 port_attr->phys_state = 3;
242         }
243         port_attr->max_mtu = IB_MTU_4096;
244         port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
245         port_attr->gid_tbl_len = dev_attr->max_sgid;
246         port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
247                                     IB_PORT_DEVICE_MGMT_SUP |
248                                     IB_PORT_VENDOR_CLASS_SUP |
249                                     IB_PORT_IP_BASED_GIDS;
250
251         port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
252         port_attr->bad_pkey_cntr = 0;
253         port_attr->qkey_viol_cntr = 0;
254         port_attr->pkey_tbl_len = dev_attr->max_pkey;
255         port_attr->lid = 0;
256         port_attr->sm_lid = 0;
257         port_attr->lmc = 0;
258         port_attr->max_vl_num = 4;
259         port_attr->sm_sl = 0;
260         port_attr->subnet_timeout = 0;
261         port_attr->init_type_reply = 0;
262         port_attr->active_speed = rdev->active_speed;
263         port_attr->active_width = rdev->active_width;
264
265         return 0;
266 }
267
268 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
269                                struct ib_port_immutable *immutable)
270 {
271         struct ib_port_attr port_attr;
272
273         if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274                 return -EINVAL;
275
276         immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
277         immutable->gid_tbl_len = port_attr.gid_tbl_len;
278         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
279         immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
280         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
281         return 0;
282 }
283
284 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
285 {
286         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
287
288         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
289                  rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
290                  rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
291 }
292
293 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
294                        u16 index, u16 *pkey)
295 {
296         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
297
298         /* Ignore port_num */
299
300         memset(pkey, 0, sizeof(*pkey));
301         return bnxt_qplib_get_pkey(&rdev->qplib_res,
302                                    &rdev->qplib_res.pkey_tbl, index, pkey);
303 }
304
305 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
306                       int index, union ib_gid *gid)
307 {
308         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
309         int rc = 0;
310
311         /* Ignore port_num */
312         memset(gid, 0, sizeof(*gid));
313         rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
314                                  &rdev->qplib_res.sgid_tbl, index,
315                                  (struct bnxt_qplib_gid *)gid);
316         return rc;
317 }
318
319 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
320                     unsigned int index, void **context)
321 {
322         int rc = 0;
323         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
324         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
325         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
326         struct bnxt_qplib_gid *gid_to_del;
327
328         /* Delete the entry from the hardware */
329         ctx = *context;
330         if (!ctx)
331                 return -EINVAL;
332
333         if (sgid_tbl && sgid_tbl->active) {
334                 if (ctx->idx >= sgid_tbl->max)
335                         return -EINVAL;
336                 gid_to_del = &sgid_tbl->tbl[ctx->idx];
337                 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
338                  * or via the ib_unregister_device path. In the former case QP1
339                  * may not be destroyed yet, in which case just return as FW
340                  * needs that entry to be present and will fail it's deletion.
341                  * We could get invoked again after QP1 is destroyed OR get an
342                  * ADD_GID call with a different GID value for the same index
343                  * where we issue MODIFY_GID cmd to update the GID entry -- TBD
344                  */
345                 if (ctx->idx == 0 &&
346                     rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
347                     ctx->refcnt == 1 && rdev->qp1_sqp) {
348                         dev_dbg(rdev_to_dev(rdev),
349                                 "Trying to delete GID0 while QP1 is alive\n");
350                         return -EFAULT;
351                 }
352                 ctx->refcnt--;
353                 if (!ctx->refcnt) {
354                         rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
355                         if (rc) {
356                                 dev_err(rdev_to_dev(rdev),
357                                         "Failed to remove GID: %#x", rc);
358                         } else {
359                                 ctx_tbl = sgid_tbl->ctx;
360                                 ctx_tbl[ctx->idx] = NULL;
361                                 kfree(ctx);
362                         }
363                 }
364         } else {
365                 return -EINVAL;
366         }
367         return rc;
368 }
369
370 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
371                     unsigned int index, const union ib_gid *gid,
372                     const struct ib_gid_attr *attr, void **context)
373 {
374         int rc;
375         u32 tbl_idx = 0;
376         u16 vlan_id = 0xFFFF;
377         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
378         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
379         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
380
381         if ((attr->ndev) && is_vlan_dev(attr->ndev))
382                 vlan_id = vlan_dev_vlan_id(attr->ndev);
383
384         rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
385                                  rdev->qplib_res.netdev->dev_addr,
386                                  vlan_id, true, &tbl_idx);
387         if (rc == -EALREADY) {
388                 ctx_tbl = sgid_tbl->ctx;
389                 ctx_tbl[tbl_idx]->refcnt++;
390                 *context = ctx_tbl[tbl_idx];
391                 return 0;
392         }
393
394         if (rc < 0) {
395                 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
396                 return rc;
397         }
398
399         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
400         if (!ctx)
401                 return -ENOMEM;
402         ctx_tbl = sgid_tbl->ctx;
403         ctx->idx = tbl_idx;
404         ctx->refcnt = 1;
405         ctx_tbl[tbl_idx] = ctx;
406         *context = ctx;
407
408         return rc;
409 }
410
411 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
412                                             u8 port_num)
413 {
414         return IB_LINK_LAYER_ETHERNET;
415 }
416
417 #define BNXT_RE_FENCE_PBL_SIZE  DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
418
419 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
420 {
421         struct bnxt_re_fence_data *fence = &pd->fence;
422         struct ib_mr *ib_mr = &fence->mr->ib_mr;
423         struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
424
425         memset(wqe, 0, sizeof(*wqe));
426         wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
427         wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
428         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
429         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
430         wqe->bind.zero_based = false;
431         wqe->bind.parent_l_key = ib_mr->lkey;
432         wqe->bind.va = (u64)(unsigned long)fence->va;
433         wqe->bind.length = fence->size;
434         wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
435         wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
436
437         /* Save the initial rkey in fence structure for now;
438          * wqe->bind.r_key will be set at (re)bind time.
439          */
440         fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
441 }
442
443 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
444 {
445         struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
446                                              qplib_qp);
447         struct ib_pd *ib_pd = qp->ib_qp.pd;
448         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
449         struct bnxt_re_fence_data *fence = &pd->fence;
450         struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
451         struct bnxt_qplib_swqe wqe;
452         int rc;
453
454         memcpy(&wqe, fence_wqe, sizeof(wqe));
455         wqe.bind.r_key = fence->bind_rkey;
456         fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
457
458         dev_dbg(rdev_to_dev(qp->rdev),
459                 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
460                 wqe.bind.r_key, qp->qplib_qp.id, pd);
461         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
462         if (rc) {
463                 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
464                 return rc;
465         }
466         bnxt_qplib_post_send_db(&qp->qplib_qp);
467
468         return rc;
469 }
470
471 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
472 {
473         struct bnxt_re_fence_data *fence = &pd->fence;
474         struct bnxt_re_dev *rdev = pd->rdev;
475         struct device *dev = &rdev->en_dev->pdev->dev;
476         struct bnxt_re_mr *mr = fence->mr;
477
478         if (fence->mw) {
479                 bnxt_re_dealloc_mw(fence->mw);
480                 fence->mw = NULL;
481         }
482         if (mr) {
483                 if (mr->ib_mr.rkey)
484                         bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
485                                              true);
486                 if (mr->ib_mr.lkey)
487                         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
488                 kfree(mr);
489                 fence->mr = NULL;
490         }
491         if (fence->dma_addr) {
492                 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
493                                  DMA_BIDIRECTIONAL);
494                 fence->dma_addr = 0;
495         }
496 }
497
498 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
499 {
500         int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
501         struct bnxt_re_fence_data *fence = &pd->fence;
502         struct bnxt_re_dev *rdev = pd->rdev;
503         struct device *dev = &rdev->en_dev->pdev->dev;
504         struct bnxt_re_mr *mr = NULL;
505         dma_addr_t dma_addr = 0;
506         struct ib_mw *mw;
507         u64 pbl_tbl;
508         int rc;
509
510         dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
511                                   DMA_BIDIRECTIONAL);
512         rc = dma_mapping_error(dev, dma_addr);
513         if (rc) {
514                 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
515                 rc = -EIO;
516                 fence->dma_addr = 0;
517                 goto fail;
518         }
519         fence->dma_addr = dma_addr;
520
521         /* Allocate a MR */
522         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
523         if (!mr) {
524                 rc = -ENOMEM;
525                 goto fail;
526         }
527         fence->mr = mr;
528         mr->rdev = rdev;
529         mr->qplib_mr.pd = &pd->qplib_pd;
530         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
531         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
532         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
533         if (rc) {
534                 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
535                 goto fail;
536         }
537
538         /* Register MR */
539         mr->ib_mr.lkey = mr->qplib_mr.lkey;
540         mr->qplib_mr.va = (u64)(unsigned long)fence->va;
541         mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
542         pbl_tbl = dma_addr;
543         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
544                                BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
545         if (rc) {
546                 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
547                 goto fail;
548         }
549         mr->ib_mr.rkey = mr->qplib_mr.rkey;
550
551         /* Create a fence MW only for kernel consumers */
552         mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
553         if (IS_ERR(mw)) {
554                 dev_err(rdev_to_dev(rdev),
555                         "Failed to create fence-MW for PD: %p\n", pd);
556                 rc = PTR_ERR(mw);
557                 goto fail;
558         }
559         fence->mw = mw;
560
561         bnxt_re_create_fence_wqe(pd);
562         return 0;
563
564 fail:
565         bnxt_re_destroy_fence_mr(pd);
566         return rc;
567 }
568
569 /* Protection Domains */
570 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
571 {
572         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
573         struct bnxt_re_dev *rdev = pd->rdev;
574         int rc;
575
576         bnxt_re_destroy_fence_mr(pd);
577
578         if (pd->qplib_pd.id) {
579                 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
580                                            &rdev->qplib_res.pd_tbl,
581                                            &pd->qplib_pd);
582                 if (rc)
583                         dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
584         }
585
586         kfree(pd);
587         return 0;
588 }
589
590 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
591                                struct ib_ucontext *ucontext,
592                                struct ib_udata *udata)
593 {
594         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
595         struct bnxt_re_ucontext *ucntx = container_of(ucontext,
596                                                       struct bnxt_re_ucontext,
597                                                       ib_uctx);
598         struct bnxt_re_pd *pd;
599         int rc;
600
601         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
602         if (!pd)
603                 return ERR_PTR(-ENOMEM);
604
605         pd->rdev = rdev;
606         if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
607                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
608                 rc = -ENOMEM;
609                 goto fail;
610         }
611
612         if (udata) {
613                 struct bnxt_re_pd_resp resp;
614
615                 if (!ucntx->dpi.dbr) {
616                         /* Allocate DPI in alloc_pd to avoid failing of
617                          * ibv_devinfo and family of application when DPIs
618                          * are depleted.
619                          */
620                         if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
621                                                  &ucntx->dpi, ucntx)) {
622                                 rc = -ENOMEM;
623                                 goto dbfail;
624                         }
625                 }
626
627                 resp.pdid = pd->qplib_pd.id;
628                 /* Still allow mapping this DBR to the new user PD. */
629                 resp.dpi = ucntx->dpi.dpi;
630                 resp.dbr = (u64)ucntx->dpi.umdbr;
631
632                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
633                 if (rc) {
634                         dev_err(rdev_to_dev(rdev),
635                                 "Failed to copy user response\n");
636                         goto dbfail;
637                 }
638         }
639
640         if (!udata)
641                 if (bnxt_re_create_fence_mr(pd))
642                         dev_warn(rdev_to_dev(rdev),
643                                  "Failed to create Fence-MR\n");
644         return &pd->ib_pd;
645 dbfail:
646         (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
647                                     &pd->qplib_pd);
648 fail:
649         kfree(pd);
650         return ERR_PTR(rc);
651 }
652
653 /* Address Handles */
654 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
655 {
656         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
657         struct bnxt_re_dev *rdev = ah->rdev;
658         int rc;
659
660         rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
661         if (rc) {
662                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
663                 return rc;
664         }
665         kfree(ah);
666         return 0;
667 }
668
669 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
670                                 struct rdma_ah_attr *ah_attr,
671                                 struct ib_udata *udata)
672 {
673         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
674         struct bnxt_re_dev *rdev = pd->rdev;
675         struct bnxt_re_ah *ah;
676         const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
677         int rc;
678         u8 nw_type;
679
680         struct ib_gid_attr sgid_attr;
681
682         if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
683                 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
684                 return ERR_PTR(-EINVAL);
685         }
686         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
687         if (!ah)
688                 return ERR_PTR(-ENOMEM);
689
690         ah->rdev = rdev;
691         ah->qplib_ah.pd = &pd->qplib_pd;
692
693         /* Supply the configuration for the HW */
694         memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
695                sizeof(union ib_gid));
696         /*
697          * If RoCE V2 is enabled, stack will have two entries for
698          * each GID entry. Avoiding this duplicte entry in HW. Dividing
699          * the GID index by 2 for RoCE V2
700          */
701         ah->qplib_ah.sgid_index = grh->sgid_index / 2;
702         ah->qplib_ah.host_sgid_index = grh->sgid_index;
703         ah->qplib_ah.traffic_class = grh->traffic_class;
704         ah->qplib_ah.flow_label = grh->flow_label;
705         ah->qplib_ah.hop_limit = grh->hop_limit;
706         ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
707         if (ib_pd->uobject &&
708             !rdma_is_multicast_addr((struct in6_addr *)
709                                     grh->dgid.raw) &&
710             !rdma_link_local_addr((struct in6_addr *)
711                                   grh->dgid.raw)) {
712                 union ib_gid sgid;
713
714                 rc = ib_get_cached_gid(&rdev->ibdev, 1,
715                                        grh->sgid_index, &sgid,
716                                        &sgid_attr);
717                 if (rc) {
718                         dev_err(rdev_to_dev(rdev),
719                                 "Failed to query gid at index %d",
720                                 grh->sgid_index);
721                         goto fail;
722                 }
723                 if (sgid_attr.ndev)
724                         dev_put(sgid_attr.ndev);
725                 /* Get network header type for this GID */
726                 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
727                 switch (nw_type) {
728                 case RDMA_NETWORK_IPV4:
729                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
730                         break;
731                 case RDMA_NETWORK_IPV6:
732                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
733                         break;
734                 default:
735                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
736                         break;
737                 }
738         }
739
740         memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
741         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
742         if (rc) {
743                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
744                 goto fail;
745         }
746
747         /* Write AVID to shared page. */
748         if (ib_pd->uobject) {
749                 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
750                 struct bnxt_re_ucontext *uctx;
751                 unsigned long flag;
752                 u32 *wrptr;
753
754                 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
755                 spin_lock_irqsave(&uctx->sh_lock, flag);
756                 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
757                 *wrptr = ah->qplib_ah.id;
758                 wmb(); /* make sure cache is updated. */
759                 spin_unlock_irqrestore(&uctx->sh_lock, flag);
760         }
761
762         return &ah->ib_ah;
763
764 fail:
765         kfree(ah);
766         return ERR_PTR(rc);
767 }
768
769 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
770 {
771         return 0;
772 }
773
774 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
775 {
776         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
777
778         ah_attr->type = ib_ah->type;
779         rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
780         memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
781         rdma_ah_set_grh(ah_attr, NULL, 0,
782                         ah->qplib_ah.host_sgid_index,
783                         0, ah->qplib_ah.traffic_class);
784         rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
785         rdma_ah_set_port_num(ah_attr, 1);
786         rdma_ah_set_static_rate(ah_attr, 0);
787         return 0;
788 }
789
790 /* Queue Pairs */
791 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
792 {
793         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
794         struct bnxt_re_dev *rdev = qp->rdev;
795         int rc;
796
797         bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
798         bnxt_qplib_del_flush_qp(&qp->qplib_qp);
799         rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
800         if (rc) {
801                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
802                 return rc;
803         }
804         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
805                 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
806                                            &rdev->sqp_ah->qplib_ah);
807                 if (rc) {
808                         dev_err(rdev_to_dev(rdev),
809                                 "Failed to destroy HW AH for shadow QP");
810                         return rc;
811                 }
812
813                 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
814                 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
815                                            &rdev->qp1_sqp->qplib_qp);
816                 if (rc) {
817                         dev_err(rdev_to_dev(rdev),
818                                 "Failed to destroy Shadow QP");
819                         return rc;
820                 }
821                 mutex_lock(&rdev->qp_lock);
822                 list_del(&rdev->qp1_sqp->list);
823                 atomic_dec(&rdev->qp_count);
824                 mutex_unlock(&rdev->qp_lock);
825
826                 kfree(rdev->sqp_ah);
827                 kfree(rdev->qp1_sqp);
828                 rdev->qp1_sqp = NULL;
829                 rdev->sqp_ah = NULL;
830         }
831
832         if (!IS_ERR_OR_NULL(qp->rumem))
833                 ib_umem_release(qp->rumem);
834         if (!IS_ERR_OR_NULL(qp->sumem))
835                 ib_umem_release(qp->sumem);
836
837         mutex_lock(&rdev->qp_lock);
838         list_del(&qp->list);
839         atomic_dec(&rdev->qp_count);
840         mutex_unlock(&rdev->qp_lock);
841         kfree(qp);
842         return 0;
843 }
844
845 static u8 __from_ib_qp_type(enum ib_qp_type type)
846 {
847         switch (type) {
848         case IB_QPT_GSI:
849                 return CMDQ_CREATE_QP1_TYPE_GSI;
850         case IB_QPT_RC:
851                 return CMDQ_CREATE_QP_TYPE_RC;
852         case IB_QPT_UD:
853                 return CMDQ_CREATE_QP_TYPE_UD;
854         default:
855                 return IB_QPT_MAX;
856         }
857 }
858
859 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
860                                 struct bnxt_re_qp *qp, struct ib_udata *udata)
861 {
862         struct bnxt_re_qp_req ureq;
863         struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
864         struct ib_umem *umem;
865         int bytes = 0;
866         struct ib_ucontext *context = pd->ib_pd.uobject->context;
867         struct bnxt_re_ucontext *cntx = container_of(context,
868                                                      struct bnxt_re_ucontext,
869                                                      ib_uctx);
870         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
871                 return -EFAULT;
872
873         bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
874         /* Consider mapping PSN search memory only for RC QPs. */
875         if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
876                 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
877         bytes = PAGE_ALIGN(bytes);
878         umem = ib_umem_get(context, ureq.qpsva, bytes,
879                            IB_ACCESS_LOCAL_WRITE, 1);
880         if (IS_ERR(umem))
881                 return PTR_ERR(umem);
882
883         qp->sumem = umem;
884         qplib_qp->sq.sglist = umem->sg_head.sgl;
885         qplib_qp->sq.nmap = umem->nmap;
886         qplib_qp->qp_handle = ureq.qp_handle;
887
888         if (!qp->qplib_qp.srq) {
889                 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
890                 bytes = PAGE_ALIGN(bytes);
891                 umem = ib_umem_get(context, ureq.qprva, bytes,
892                                    IB_ACCESS_LOCAL_WRITE, 1);
893                 if (IS_ERR(umem))
894                         goto rqfail;
895                 qp->rumem = umem;
896                 qplib_qp->rq.sglist = umem->sg_head.sgl;
897                 qplib_qp->rq.nmap = umem->nmap;
898         }
899
900         qplib_qp->dpi = &cntx->dpi;
901         return 0;
902 rqfail:
903         ib_umem_release(qp->sumem);
904         qp->sumem = NULL;
905         qplib_qp->sq.sglist = NULL;
906         qplib_qp->sq.nmap = 0;
907
908         return PTR_ERR(umem);
909 }
910
911 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
912                                 (struct bnxt_re_pd *pd,
913                                  struct bnxt_qplib_res *qp1_res,
914                                  struct bnxt_qplib_qp *qp1_qp)
915 {
916         struct bnxt_re_dev *rdev = pd->rdev;
917         struct bnxt_re_ah *ah;
918         union ib_gid sgid;
919         int rc;
920
921         ah = kzalloc(sizeof(*ah), GFP_KERNEL);
922         if (!ah)
923                 return NULL;
924
925         ah->rdev = rdev;
926         ah->qplib_ah.pd = &pd->qplib_pd;
927
928         rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
929         if (rc)
930                 goto fail;
931
932         /* supply the dgid data same as sgid */
933         memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
934                sizeof(union ib_gid));
935         ah->qplib_ah.sgid_index = 0;
936
937         ah->qplib_ah.traffic_class = 0;
938         ah->qplib_ah.flow_label = 0;
939         ah->qplib_ah.hop_limit = 1;
940         ah->qplib_ah.sl = 0;
941         /* Have DMAC same as SMAC */
942         ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
943
944         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
945         if (rc) {
946                 dev_err(rdev_to_dev(rdev),
947                         "Failed to allocate HW AH for Shadow QP");
948                 goto fail;
949         }
950
951         return ah;
952
953 fail:
954         kfree(ah);
955         return NULL;
956 }
957
958 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
959                                 (struct bnxt_re_pd *pd,
960                                  struct bnxt_qplib_res *qp1_res,
961                                  struct bnxt_qplib_qp *qp1_qp)
962 {
963         struct bnxt_re_dev *rdev = pd->rdev;
964         struct bnxt_re_qp *qp;
965         int rc;
966
967         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
968         if (!qp)
969                 return NULL;
970
971         qp->rdev = rdev;
972
973         /* Initialize the shadow QP structure from the QP1 values */
974         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
975
976         qp->qplib_qp.pd = &pd->qplib_pd;
977         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
978         qp->qplib_qp.type = IB_QPT_UD;
979
980         qp->qplib_qp.max_inline_data = 0;
981         qp->qplib_qp.sig_type = true;
982
983         /* Shadow QP SQ depth should be same as QP1 RQ depth */
984         qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
985         qp->qplib_qp.sq.max_sge = 2;
986         /* Q full delta can be 1 since it is internal QP */
987         qp->qplib_qp.sq.q_full_delta = 1;
988
989         qp->qplib_qp.scq = qp1_qp->scq;
990         qp->qplib_qp.rcq = qp1_qp->rcq;
991
992         qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
993         qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
994         /* Q full delta can be 1 since it is internal QP */
995         qp->qplib_qp.rq.q_full_delta = 1;
996
997         qp->qplib_qp.mtu = qp1_qp->mtu;
998
999         qp->qplib_qp.sq_hdr_buf_size = 0;
1000         qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1001         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1002
1003         rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1004         if (rc)
1005                 goto fail;
1006
1007         rdev->sqp_id = qp->qplib_qp.id;
1008
1009         spin_lock_init(&qp->sq_lock);
1010         INIT_LIST_HEAD(&qp->list);
1011         mutex_lock(&rdev->qp_lock);
1012         list_add_tail(&qp->list, &rdev->qp_list);
1013         atomic_inc(&rdev->qp_count);
1014         mutex_unlock(&rdev->qp_lock);
1015         return qp;
1016 fail:
1017         kfree(qp);
1018         return NULL;
1019 }
1020
1021 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1022                                 struct ib_qp_init_attr *qp_init_attr,
1023                                 struct ib_udata *udata)
1024 {
1025         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1026         struct bnxt_re_dev *rdev = pd->rdev;
1027         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1028         struct bnxt_re_qp *qp;
1029         struct bnxt_re_cq *cq;
1030         struct bnxt_re_srq *srq;
1031         int rc, entries;
1032
1033         if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1034             (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1035             (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1036             (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1037             (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1038                 return ERR_PTR(-EINVAL);
1039
1040         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1041         if (!qp)
1042                 return ERR_PTR(-ENOMEM);
1043
1044         qp->rdev = rdev;
1045         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1046         qp->qplib_qp.pd = &pd->qplib_pd;
1047         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1048         qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1049         if (qp->qplib_qp.type == IB_QPT_MAX) {
1050                 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1051                         qp->qplib_qp.type);
1052                 rc = -EINVAL;
1053                 goto fail;
1054         }
1055         qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1056         qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1057                                   IB_SIGNAL_ALL_WR) ? true : false);
1058
1059         qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1060         if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1061                 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1062
1063         if (qp_init_attr->send_cq) {
1064                 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1065                                   ib_cq);
1066                 if (!cq) {
1067                         dev_err(rdev_to_dev(rdev), "Send CQ not found");
1068                         rc = -EINVAL;
1069                         goto fail;
1070                 }
1071                 qp->qplib_qp.scq = &cq->qplib_cq;
1072         }
1073
1074         if (qp_init_attr->recv_cq) {
1075                 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1076                                   ib_cq);
1077                 if (!cq) {
1078                         dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1079                         rc = -EINVAL;
1080                         goto fail;
1081                 }
1082                 qp->qplib_qp.rcq = &cq->qplib_cq;
1083         }
1084
1085         if (qp_init_attr->srq) {
1086                 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1087                                    ib_srq);
1088                 if (!srq) {
1089                         dev_err(rdev_to_dev(rdev), "SRQ not found");
1090                         rc = -EINVAL;
1091                         goto fail;
1092                 }
1093                 qp->qplib_qp.srq = &srq->qplib_srq;
1094                 qp->qplib_qp.rq.max_wqe = 0;
1095         } else {
1096                 /* Allocate 1 more than what's provided so posting max doesn't
1097                  * mean empty
1098                  */
1099                 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1100                 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1101                                                 dev_attr->max_qp_wqes + 1);
1102
1103                 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1104                                                 qp_init_attr->cap.max_recv_wr;
1105
1106                 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1107                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1108                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1109         }
1110
1111         qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1112
1113         if (qp_init_attr->qp_type == IB_QPT_GSI) {
1114                 /* Allocate 1 more than what's provided */
1115                 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1116                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1117                                                 dev_attr->max_qp_wqes + 1);
1118                 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1119                                                 qp_init_attr->cap.max_send_wr;
1120                 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1121                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1122                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1123                 qp->qplib_qp.sq.max_sge++;
1124                 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1125                         qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1126
1127                 qp->qplib_qp.rq_hdr_buf_size =
1128                                         BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1129
1130                 qp->qplib_qp.sq_hdr_buf_size =
1131                                         BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1132                 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1133                 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1134                 if (rc) {
1135                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1136                         goto fail;
1137                 }
1138                 /* Create a shadow QP to handle the QP1 traffic */
1139                 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1140                                                          &qp->qplib_qp);
1141                 if (!rdev->qp1_sqp) {
1142                         rc = -EINVAL;
1143                         dev_err(rdev_to_dev(rdev),
1144                                 "Failed to create Shadow QP for QP1");
1145                         goto qp_destroy;
1146                 }
1147                 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1148                                                            &qp->qplib_qp);
1149                 if (!rdev->sqp_ah) {
1150                         bnxt_qplib_destroy_qp(&rdev->qplib_res,
1151                                               &rdev->qp1_sqp->qplib_qp);
1152                         rc = -EINVAL;
1153                         dev_err(rdev_to_dev(rdev),
1154                                 "Failed to create AH entry for ShadowQP");
1155                         goto qp_destroy;
1156                 }
1157
1158         } else {
1159                 /* Allocate 128 + 1 more than what's provided */
1160                 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1161                                              BNXT_QPLIB_RESERVED_QP_WRS + 1);
1162                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1163                                                 dev_attr->max_qp_wqes +
1164                                                 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1165                 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1166
1167                 /*
1168                  * Reserving one slot for Phantom WQE. Application can
1169                  * post one extra entry in this case. But allowing this to avoid
1170                  * unexpected Queue full condition
1171                  */
1172
1173                 qp->qplib_qp.sq.q_full_delta -= 1;
1174
1175                 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1176                 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1177                 if (udata) {
1178                         rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1179                         if (rc)
1180                                 goto fail;
1181                 } else {
1182                         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1183                 }
1184
1185                 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1186                 if (rc) {
1187                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1188                         goto fail;
1189                 }
1190         }
1191
1192         qp->ib_qp.qp_num = qp->qplib_qp.id;
1193         spin_lock_init(&qp->sq_lock);
1194         spin_lock_init(&qp->rq_lock);
1195
1196         if (udata) {
1197                 struct bnxt_re_qp_resp resp;
1198
1199                 resp.qpid = qp->ib_qp.qp_num;
1200                 resp.rsvd = 0;
1201                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1202                 if (rc) {
1203                         dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1204                         goto qp_destroy;
1205                 }
1206         }
1207         INIT_LIST_HEAD(&qp->list);
1208         mutex_lock(&rdev->qp_lock);
1209         list_add_tail(&qp->list, &rdev->qp_list);
1210         atomic_inc(&rdev->qp_count);
1211         mutex_unlock(&rdev->qp_lock);
1212
1213         return &qp->ib_qp;
1214 qp_destroy:
1215         bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1216 fail:
1217         kfree(qp);
1218         return ERR_PTR(rc);
1219 }
1220
1221 static u8 __from_ib_qp_state(enum ib_qp_state state)
1222 {
1223         switch (state) {
1224         case IB_QPS_RESET:
1225                 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1226         case IB_QPS_INIT:
1227                 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1228         case IB_QPS_RTR:
1229                 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1230         case IB_QPS_RTS:
1231                 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1232         case IB_QPS_SQD:
1233                 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1234         case IB_QPS_SQE:
1235                 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1236         case IB_QPS_ERR:
1237         default:
1238                 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1239         }
1240 }
1241
1242 static enum ib_qp_state __to_ib_qp_state(u8 state)
1243 {
1244         switch (state) {
1245         case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1246                 return IB_QPS_RESET;
1247         case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1248                 return IB_QPS_INIT;
1249         case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1250                 return IB_QPS_RTR;
1251         case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1252                 return IB_QPS_RTS;
1253         case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1254                 return IB_QPS_SQD;
1255         case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1256                 return IB_QPS_SQE;
1257         case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1258         default:
1259                 return IB_QPS_ERR;
1260         }
1261 }
1262
1263 static u32 __from_ib_mtu(enum ib_mtu mtu)
1264 {
1265         switch (mtu) {
1266         case IB_MTU_256:
1267                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1268         case IB_MTU_512:
1269                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1270         case IB_MTU_1024:
1271                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1272         case IB_MTU_2048:
1273                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1274         case IB_MTU_4096:
1275                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1276         default:
1277                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1278         }
1279 }
1280
1281 static enum ib_mtu __to_ib_mtu(u32 mtu)
1282 {
1283         switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1284         case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1285                 return IB_MTU_256;
1286         case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1287                 return IB_MTU_512;
1288         case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1289                 return IB_MTU_1024;
1290         case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1291                 return IB_MTU_2048;
1292         case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1293                 return IB_MTU_4096;
1294         default:
1295                 return IB_MTU_2048;
1296         }
1297 }
1298
1299 /* Shared Receive Queues */
1300 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1301 {
1302         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1303                                                ib_srq);
1304         struct bnxt_re_dev *rdev = srq->rdev;
1305         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1306         struct bnxt_qplib_nq *nq = NULL;
1307         int rc;
1308
1309         if (qplib_srq->cq)
1310                 nq = qplib_srq->cq->nq;
1311         rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1312         if (rc) {
1313                 dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1314                 return rc;
1315         }
1316
1317         if (srq->umem)
1318                 ib_umem_release(srq->umem);
1319         kfree(srq);
1320         atomic_dec(&rdev->srq_count);
1321         if (nq)
1322                 nq->budget--;
1323         return 0;
1324 }
1325
1326 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1327                                  struct bnxt_re_pd *pd,
1328                                  struct bnxt_re_srq *srq,
1329                                  struct ib_udata *udata)
1330 {
1331         struct bnxt_re_srq_req ureq;
1332         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1333         struct ib_umem *umem;
1334         int bytes = 0;
1335         struct ib_ucontext *context = pd->ib_pd.uobject->context;
1336         struct bnxt_re_ucontext *cntx = container_of(context,
1337                                                      struct bnxt_re_ucontext,
1338                                                      ib_uctx);
1339         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1340                 return -EFAULT;
1341
1342         bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1343         bytes = PAGE_ALIGN(bytes);
1344         umem = ib_umem_get(context, ureq.srqva, bytes,
1345                            IB_ACCESS_LOCAL_WRITE, 1);
1346         if (IS_ERR(umem))
1347                 return PTR_ERR(umem);
1348
1349         srq->umem = umem;
1350         qplib_srq->nmap = umem->nmap;
1351         qplib_srq->sglist = umem->sg_head.sgl;
1352         qplib_srq->srq_handle = ureq.srq_handle;
1353         qplib_srq->dpi = &cntx->dpi;
1354
1355         return 0;
1356 }
1357
1358 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1359                                   struct ib_srq_init_attr *srq_init_attr,
1360                                   struct ib_udata *udata)
1361 {
1362         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1363         struct bnxt_re_dev *rdev = pd->rdev;
1364         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1365         struct bnxt_re_srq *srq;
1366         struct bnxt_qplib_nq *nq = NULL;
1367         int rc, entries;
1368
1369         if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1370                 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1371                 rc = -EINVAL;
1372                 goto exit;
1373         }
1374
1375         if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1376                 rc = -ENOTSUPP;
1377                 goto exit;
1378         }
1379
1380         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1381         if (!srq) {
1382                 rc = -ENOMEM;
1383                 goto exit;
1384         }
1385         srq->rdev = rdev;
1386         srq->qplib_srq.pd = &pd->qplib_pd;
1387         srq->qplib_srq.dpi = &rdev->dpi_privileged;
1388         /* Allocate 1 more than what's provided so posting max doesn't
1389          * mean empty
1390          */
1391         entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1392         if (entries > dev_attr->max_srq_wqes + 1)
1393                 entries = dev_attr->max_srq_wqes + 1;
1394
1395         srq->qplib_srq.max_wqe = entries;
1396         srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1397         srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1398         srq->srq_limit = srq_init_attr->attr.srq_limit;
1399         srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1400         nq = &rdev->nq[0];
1401
1402         if (udata) {
1403                 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1404                 if (rc)
1405                         goto fail;
1406         }
1407
1408         rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1409         if (rc) {
1410                 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1411                 goto fail;
1412         }
1413
1414         if (udata) {
1415                 struct bnxt_re_srq_resp resp;
1416
1417                 resp.srqid = srq->qplib_srq.id;
1418                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1419                 if (rc) {
1420                         dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1421                         bnxt_qplib_destroy_srq(&rdev->qplib_res,
1422                                                &srq->qplib_srq);
1423                         goto exit;
1424                 }
1425         }
1426         if (nq)
1427                 nq->budget++;
1428         atomic_inc(&rdev->srq_count);
1429
1430         return &srq->ib_srq;
1431
1432 fail:
1433         if (srq->umem)
1434                 ib_umem_release(srq->umem);
1435         kfree(srq);
1436 exit:
1437         return ERR_PTR(rc);
1438 }
1439
1440 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1441                        enum ib_srq_attr_mask srq_attr_mask,
1442                        struct ib_udata *udata)
1443 {
1444         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1445                                                ib_srq);
1446         struct bnxt_re_dev *rdev = srq->rdev;
1447         int rc;
1448
1449         switch (srq_attr_mask) {
1450         case IB_SRQ_MAX_WR:
1451                 /* SRQ resize is not supported */
1452                 break;
1453         case IB_SRQ_LIMIT:
1454                 /* Change the SRQ threshold */
1455                 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1456                         return -EINVAL;
1457
1458                 srq->qplib_srq.threshold = srq_attr->srq_limit;
1459                 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1460                 if (rc) {
1461                         dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1462                         return rc;
1463                 }
1464                 /* On success, update the shadow */
1465                 srq->srq_limit = srq_attr->srq_limit;
1466                 /* No need to Build and send response back to udata */
1467                 break;
1468         default:
1469                 dev_err(rdev_to_dev(rdev),
1470                         "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1471                 return -EINVAL;
1472         }
1473         return 0;
1474 }
1475
1476 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1477 {
1478         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1479                                                ib_srq);
1480         struct bnxt_re_srq tsrq;
1481         struct bnxt_re_dev *rdev = srq->rdev;
1482         int rc;
1483
1484         /* Get live SRQ attr */
1485         tsrq.qplib_srq.id = srq->qplib_srq.id;
1486         rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1487         if (rc) {
1488                 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1489                 return rc;
1490         }
1491         srq_attr->max_wr = srq->qplib_srq.max_wqe;
1492         srq_attr->max_sge = srq->qplib_srq.max_sge;
1493         srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1494
1495         return 0;
1496 }
1497
1498 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
1499                           struct ib_recv_wr **bad_wr)
1500 {
1501         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1502                                                ib_srq);
1503         struct bnxt_qplib_swqe wqe;
1504         unsigned long flags;
1505         int rc = 0, payload_sz = 0;
1506
1507         spin_lock_irqsave(&srq->lock, flags);
1508         while (wr) {
1509                 /* Transcribe each ib_recv_wr to qplib_swqe */
1510                 wqe.num_sge = wr->num_sge;
1511                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
1512                                                wr->num_sge);
1513                 wqe.wr_id = wr->wr_id;
1514                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1515
1516                 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1517                 if (rc) {
1518                         *bad_wr = wr;
1519                         break;
1520                 }
1521                 wr = wr->next;
1522         }
1523         spin_unlock_irqrestore(&srq->lock, flags);
1524
1525         return rc;
1526 }
1527 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1528                                     struct bnxt_re_qp *qp1_qp,
1529                                     int qp_attr_mask)
1530 {
1531         struct bnxt_re_qp *qp = rdev->qp1_sqp;
1532         int rc = 0;
1533
1534         if (qp_attr_mask & IB_QP_STATE) {
1535                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1536                 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1537         }
1538         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1539                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1540                 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1541         }
1542
1543         if (qp_attr_mask & IB_QP_QKEY) {
1544                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1545                 /* Using a Random  QKEY */
1546                 qp->qplib_qp.qkey = 0x81818181;
1547         }
1548         if (qp_attr_mask & IB_QP_SQ_PSN) {
1549                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1550                 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1551         }
1552
1553         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1554         if (rc)
1555                 dev_err(rdev_to_dev(rdev),
1556                         "Failed to modify Shadow QP for QP1");
1557         return rc;
1558 }
1559
1560 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1561                       int qp_attr_mask, struct ib_udata *udata)
1562 {
1563         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1564         struct bnxt_re_dev *rdev = qp->rdev;
1565         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1566         enum ib_qp_state curr_qp_state, new_qp_state;
1567         int rc, entries;
1568         int status;
1569         union ib_gid sgid;
1570         struct ib_gid_attr sgid_attr;
1571         u8 nw_type;
1572
1573         qp->qplib_qp.modify_flags = 0;
1574         if (qp_attr_mask & IB_QP_STATE) {
1575                 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1576                 new_qp_state = qp_attr->qp_state;
1577                 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1578                                         ib_qp->qp_type, qp_attr_mask,
1579                                         IB_LINK_LAYER_ETHERNET)) {
1580                         dev_err(rdev_to_dev(rdev),
1581                                 "Invalid attribute mask: %#x specified ",
1582                                 qp_attr_mask);
1583                         dev_err(rdev_to_dev(rdev),
1584                                 "for qpn: %#x type: %#x",
1585                                 ib_qp->qp_num, ib_qp->qp_type);
1586                         dev_err(rdev_to_dev(rdev),
1587                                 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1588                                 curr_qp_state, new_qp_state);
1589                         return -EINVAL;
1590                 }
1591                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1592                 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1593
1594                 if (!qp->sumem &&
1595                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1596                         dev_dbg(rdev_to_dev(rdev),
1597                                 "Move QP = %p to flush list\n",
1598                                 qp);
1599                         bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1600                 }
1601                 if (!qp->sumem &&
1602                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1603                         dev_dbg(rdev_to_dev(rdev),
1604                                 "Move QP = %p out of flush list\n",
1605                                 qp);
1606                         bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1607                 }
1608         }
1609         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1610                 qp->qplib_qp.modify_flags |=
1611                                 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1612                 qp->qplib_qp.en_sqd_async_notify = true;
1613         }
1614         if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1615                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1616                 qp->qplib_qp.access =
1617                         __from_ib_access_flags(qp_attr->qp_access_flags);
1618                 /* LOCAL_WRITE access must be set to allow RC receive */
1619                 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1620         }
1621         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1622                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1623                 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1624         }
1625         if (qp_attr_mask & IB_QP_QKEY) {
1626                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1627                 qp->qplib_qp.qkey = qp_attr->qkey;
1628         }
1629         if (qp_attr_mask & IB_QP_AV) {
1630                 const struct ib_global_route *grh =
1631                         rdma_ah_read_grh(&qp_attr->ah_attr);
1632
1633                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1634                                      CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1635                                      CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1636                                      CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1637                                      CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1638                                      CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1639                                      CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1640                 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1641                        sizeof(qp->qplib_qp.ah.dgid.data));
1642                 qp->qplib_qp.ah.flow_label = grh->flow_label;
1643                 /* If RoCE V2 is enabled, stack will have two entries for
1644                  * each GID entry. Avoiding this duplicte entry in HW. Dividing
1645                  * the GID index by 2 for RoCE V2
1646                  */
1647                 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1648                 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1649                 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1650                 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1651                 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1652                 ether_addr_copy(qp->qplib_qp.ah.dmac,
1653                                 qp_attr->ah_attr.roce.dmac);
1654
1655                 status = ib_get_cached_gid(&rdev->ibdev, 1,
1656                                            grh->sgid_index,
1657                                            &sgid, &sgid_attr);
1658                 if (!status && sgid_attr.ndev) {
1659                         memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1660                                ETH_ALEN);
1661                         dev_put(sgid_attr.ndev);
1662                         nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1663                                                          &sgid);
1664                         switch (nw_type) {
1665                         case RDMA_NETWORK_IPV4:
1666                                 qp->qplib_qp.nw_type =
1667                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1668                                 break;
1669                         case RDMA_NETWORK_IPV6:
1670                                 qp->qplib_qp.nw_type =
1671                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1672                                 break;
1673                         default:
1674                                 qp->qplib_qp.nw_type =
1675                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1676                                 break;
1677                         }
1678                 }
1679         }
1680
1681         if (qp_attr_mask & IB_QP_PATH_MTU) {
1682                 qp->qplib_qp.modify_flags |=
1683                                 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1684                 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1685                 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1686         } else if (qp_attr->qp_state == IB_QPS_RTR) {
1687                 qp->qplib_qp.modify_flags |=
1688                         CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1689                 qp->qplib_qp.path_mtu =
1690                         __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1691                 qp->qplib_qp.mtu =
1692                         ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1693         }
1694
1695         if (qp_attr_mask & IB_QP_TIMEOUT) {
1696                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1697                 qp->qplib_qp.timeout = qp_attr->timeout;
1698         }
1699         if (qp_attr_mask & IB_QP_RETRY_CNT) {
1700                 qp->qplib_qp.modify_flags |=
1701                                 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1702                 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1703         }
1704         if (qp_attr_mask & IB_QP_RNR_RETRY) {
1705                 qp->qplib_qp.modify_flags |=
1706                                 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1707                 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1708         }
1709         if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1710                 qp->qplib_qp.modify_flags |=
1711                                 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1712                 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1713         }
1714         if (qp_attr_mask & IB_QP_RQ_PSN) {
1715                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1716                 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1717         }
1718         if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1719                 qp->qplib_qp.modify_flags |=
1720                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1721                 /* Cap the max_rd_atomic to device max */
1722                 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1723                                                    dev_attr->max_qp_rd_atom);
1724         }
1725         if (qp_attr_mask & IB_QP_SQ_PSN) {
1726                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1727                 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1728         }
1729         if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1730                 if (qp_attr->max_dest_rd_atomic >
1731                     dev_attr->max_qp_init_rd_atom) {
1732                         dev_err(rdev_to_dev(rdev),
1733                                 "max_dest_rd_atomic requested%d is > dev_max%d",
1734                                 qp_attr->max_dest_rd_atomic,
1735                                 dev_attr->max_qp_init_rd_atom);
1736                         return -EINVAL;
1737                 }
1738
1739                 qp->qplib_qp.modify_flags |=
1740                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1741                 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1742         }
1743         if (qp_attr_mask & IB_QP_CAP) {
1744                 qp->qplib_qp.modify_flags |=
1745                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1746                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1747                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1748                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1749                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1750                 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1751                     (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1752                     (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1753                     (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1754                     (qp_attr->cap.max_inline_data >=
1755                                                 dev_attr->max_inline_data)) {
1756                         dev_err(rdev_to_dev(rdev),
1757                                 "Create QP failed - max exceeded");
1758                         return -EINVAL;
1759                 }
1760                 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1761                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1762                                                 dev_attr->max_qp_wqes + 1);
1763                 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1764                                                 qp_attr->cap.max_send_wr;
1765                 /*
1766                  * Reserving one slot for Phantom WQE. Some application can
1767                  * post one extra entry in this case. Allowing this to avoid
1768                  * unexpected Queue full condition
1769                  */
1770                 qp->qplib_qp.sq.q_full_delta -= 1;
1771                 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1772                 if (qp->qplib_qp.rq.max_wqe) {
1773                         entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1774                         qp->qplib_qp.rq.max_wqe =
1775                                 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1776                         qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1777                                                        qp_attr->cap.max_recv_wr;
1778                         qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1779                 } else {
1780                         /* SRQ was used prior, just ignore the RQ caps */
1781                 }
1782         }
1783         if (qp_attr_mask & IB_QP_DEST_QPN) {
1784                 qp->qplib_qp.modify_flags |=
1785                                 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1786                 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1787         }
1788         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1789         if (rc) {
1790                 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1791                 return rc;
1792         }
1793         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1794                 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1795         return rc;
1796 }
1797
1798 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1799                      int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1800 {
1801         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1802         struct bnxt_re_dev *rdev = qp->rdev;
1803         struct bnxt_qplib_qp *qplib_qp;
1804         int rc;
1805
1806         qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1807         if (!qplib_qp)
1808                 return -ENOMEM;
1809
1810         qplib_qp->id = qp->qplib_qp.id;
1811         qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1812
1813         rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1814         if (rc) {
1815                 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1816                 goto out;
1817         }
1818         qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1819         qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1820         qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1821         qp_attr->pkey_index = qplib_qp->pkey_index;
1822         qp_attr->qkey = qplib_qp->qkey;
1823         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1824         rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1825                         qplib_qp->ah.host_sgid_index,
1826                         qplib_qp->ah.hop_limit,
1827                         qplib_qp->ah.traffic_class);
1828         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1829         rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1830         ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1831         qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1832         qp_attr->timeout = qplib_qp->timeout;
1833         qp_attr->retry_cnt = qplib_qp->retry_cnt;
1834         qp_attr->rnr_retry = qplib_qp->rnr_retry;
1835         qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1836         qp_attr->rq_psn = qplib_qp->rq.psn;
1837         qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1838         qp_attr->sq_psn = qplib_qp->sq.psn;
1839         qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1840         qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1841                                                          IB_SIGNAL_REQ_WR;
1842         qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1843
1844         qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1845         qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1846         qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1847         qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1848         qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1849         qp_init_attr->cap = qp_attr->cap;
1850
1851 out:
1852         kfree(qplib_qp);
1853         return rc;
1854 }
1855
1856 /* Routine for sending QP1 packets for RoCE V1 an V2
1857  */
1858 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1859                                      struct ib_send_wr *wr,
1860                                      struct bnxt_qplib_swqe *wqe,
1861                                      int payload_size)
1862 {
1863         struct ib_device *ibdev = &qp->rdev->ibdev;
1864         struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1865                                              ib_ah);
1866         struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1867         struct bnxt_qplib_sge sge;
1868         union ib_gid sgid;
1869         u8 nw_type;
1870         u16 ether_type;
1871         struct ib_gid_attr sgid_attr;
1872         union ib_gid dgid;
1873         bool is_eth = false;
1874         bool is_vlan = false;
1875         bool is_grh = false;
1876         bool is_udp = false;
1877         u8 ip_version = 0;
1878         u16 vlan_id = 0xFFFF;
1879         void *buf;
1880         int i, rc = 0;
1881
1882         memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1883
1884         rc = ib_get_cached_gid(ibdev, 1,
1885                                qplib_ah->host_sgid_index, &sgid,
1886                                &sgid_attr);
1887         if (rc) {
1888                 dev_err(rdev_to_dev(qp->rdev),
1889                         "Failed to query gid at index %d",
1890                         qplib_ah->host_sgid_index);
1891                 return rc;
1892         }
1893         if (sgid_attr.ndev) {
1894                 if (is_vlan_dev(sgid_attr.ndev))
1895                         vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1896                 dev_put(sgid_attr.ndev);
1897         }
1898         /* Get network header type for this GID */
1899         nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1900         switch (nw_type) {
1901         case RDMA_NETWORK_IPV4:
1902                 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1903                 break;
1904         case RDMA_NETWORK_IPV6:
1905                 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1906                 break;
1907         default:
1908                 nw_type = BNXT_RE_ROCE_V1_PACKET;
1909                 break;
1910         }
1911         memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1912         is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1913         if (is_udp) {
1914                 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1915                         ip_version = 4;
1916                         ether_type = ETH_P_IP;
1917                 } else {
1918                         ip_version = 6;
1919                         ether_type = ETH_P_IPV6;
1920                 }
1921                 is_grh = false;
1922         } else {
1923                 ether_type = ETH_P_IBOE;
1924                 is_grh = true;
1925         }
1926
1927         is_eth = true;
1928         is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1929
1930         ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1931                           ip_version, is_udp, 0, &qp->qp1_hdr);
1932
1933         /* ETH */
1934         ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1935         ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1936
1937         /* For vlan, check the sgid for vlan existence */
1938
1939         if (!is_vlan) {
1940                 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1941         } else {
1942                 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1943                 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1944         }
1945
1946         if (is_grh || (ip_version == 6)) {
1947                 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1948                 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1949                        sizeof(sgid));
1950                 qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
1951         }
1952
1953         if (ip_version == 4) {
1954                 qp->qp1_hdr.ip4.tos = 0;
1955                 qp->qp1_hdr.ip4.id = 0;
1956                 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1957                 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1958
1959                 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1960                 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1961                 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1962         }
1963
1964         if (is_udp) {
1965                 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1966                 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1967                 qp->qp1_hdr.udp.csum = 0;
1968         }
1969
1970         /* BTH */
1971         if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1972                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1973                 qp->qp1_hdr.immediate_present = 1;
1974         } else {
1975                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1976         }
1977         if (wr->send_flags & IB_SEND_SOLICITED)
1978                 qp->qp1_hdr.bth.solicited_event = 1;
1979         /* pad_count */
1980         qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1981
1982         /* P_key for QP1 is for all members */
1983         qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1984         qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1985         qp->qp1_hdr.bth.ack_req = 0;
1986         qp->send_psn++;
1987         qp->send_psn &= BTH_PSN_MASK;
1988         qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1989         /* DETH */
1990         /* Use the priviledged Q_Key for QP1 */
1991         qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1992         qp->qp1_hdr.deth.source_qpn = IB_QP1;
1993
1994         /* Pack the QP1 to the transmit buffer */
1995         buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1996         if (buf) {
1997                 ib_ud_header_pack(&qp->qp1_hdr, buf);
1998                 for (i = wqe->num_sge; i; i--) {
1999                         wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2000                         wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2001                         wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2002                 }
2003
2004                 /*
2005                  * Max Header buf size for IPV6 RoCE V2 is 86,
2006                  * which is same as the QP1 SQ header buffer.
2007                  * Header buf size for IPV4 RoCE V2 can be 66.
2008                  * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2009                  * Subtract 20 bytes from QP1 SQ header buf size
2010                  */
2011                 if (is_udp && ip_version == 4)
2012                         sge.size -= 20;
2013                 /*
2014                  * Max Header buf size for RoCE V1 is 78.
2015                  * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2016                  * Subtract 8 bytes from QP1 SQ header buf size
2017                  */
2018                 if (!is_udp)
2019                         sge.size -= 8;
2020
2021                 /* Subtract 4 bytes for non vlan packets */
2022                 if (!is_vlan)
2023                         sge.size -= 4;
2024
2025                 wqe->sg_list[0].addr = sge.addr;
2026                 wqe->sg_list[0].lkey = sge.lkey;
2027                 wqe->sg_list[0].size = sge.size;
2028                 wqe->num_sge++;
2029
2030         } else {
2031                 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2032                 rc = -ENOMEM;
2033         }
2034         return rc;
2035 }
2036
2037 /* For the MAD layer, it only provides the recv SGE the size of
2038  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2039  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2040  * receive packet (334 bytes) with no VLAN and then copy the GRH
2041  * and the MAD datagram out to the provided SGE.
2042  */
2043 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2044                                             struct ib_recv_wr *wr,
2045                                             struct bnxt_qplib_swqe *wqe,
2046                                             int payload_size)
2047 {
2048         struct bnxt_qplib_sge ref, sge;
2049         u32 rq_prod_index;
2050         struct bnxt_re_sqp_entries *sqp_entry;
2051
2052         rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2053
2054         if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2055                 return -ENOMEM;
2056
2057         /* Create 1 SGE to receive the entire
2058          * ethernet packet
2059          */
2060         /* Save the reference from ULP */
2061         ref.addr = wqe->sg_list[0].addr;
2062         ref.lkey = wqe->sg_list[0].lkey;
2063         ref.size = wqe->sg_list[0].size;
2064
2065         sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2066
2067         /* SGE 1 */
2068         wqe->sg_list[0].addr = sge.addr;
2069         wqe->sg_list[0].lkey = sge.lkey;
2070         wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2071         sge.size -= wqe->sg_list[0].size;
2072
2073         sqp_entry->sge.addr = ref.addr;
2074         sqp_entry->sge.lkey = ref.lkey;
2075         sqp_entry->sge.size = ref.size;
2076         /* Store the wrid for reporting completion */
2077         sqp_entry->wrid = wqe->wr_id;
2078         /* change the wqe->wrid to table index */
2079         wqe->wr_id = rq_prod_index;
2080         return 0;
2081 }
2082
2083 static int is_ud_qp(struct bnxt_re_qp *qp)
2084 {
2085         return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2086 }
2087
2088 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2089                                   struct ib_send_wr *wr,
2090                                   struct bnxt_qplib_swqe *wqe)
2091 {
2092         struct bnxt_re_ah *ah = NULL;
2093
2094         if (is_ud_qp(qp)) {
2095                 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2096                 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2097                 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2098                 wqe->send.avid = ah->qplib_ah.id;
2099         }
2100         switch (wr->opcode) {
2101         case IB_WR_SEND:
2102                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2103                 break;
2104         case IB_WR_SEND_WITH_IMM:
2105                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2106                 wqe->send.imm_data = wr->ex.imm_data;
2107                 break;
2108         case IB_WR_SEND_WITH_INV:
2109                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2110                 wqe->send.inv_key = wr->ex.invalidate_rkey;
2111                 break;
2112         default:
2113                 return -EINVAL;
2114         }
2115         if (wr->send_flags & IB_SEND_SIGNALED)
2116                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2117         if (wr->send_flags & IB_SEND_FENCE)
2118                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2119         if (wr->send_flags & IB_SEND_SOLICITED)
2120                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2121         if (wr->send_flags & IB_SEND_INLINE)
2122                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2123
2124         return 0;
2125 }
2126
2127 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
2128                                   struct bnxt_qplib_swqe *wqe)
2129 {
2130         switch (wr->opcode) {
2131         case IB_WR_RDMA_WRITE:
2132                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2133                 break;
2134         case IB_WR_RDMA_WRITE_WITH_IMM:
2135                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2136                 wqe->rdma.imm_data = wr->ex.imm_data;
2137                 break;
2138         case IB_WR_RDMA_READ:
2139                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2140                 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2141                 break;
2142         default:
2143                 return -EINVAL;
2144         }
2145         wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2146         wqe->rdma.r_key = rdma_wr(wr)->rkey;
2147         if (wr->send_flags & IB_SEND_SIGNALED)
2148                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2149         if (wr->send_flags & IB_SEND_FENCE)
2150                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2151         if (wr->send_flags & IB_SEND_SOLICITED)
2152                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2153         if (wr->send_flags & IB_SEND_INLINE)
2154                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2155
2156         return 0;
2157 }
2158
2159 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
2160                                     struct bnxt_qplib_swqe *wqe)
2161 {
2162         switch (wr->opcode) {
2163         case IB_WR_ATOMIC_CMP_AND_SWP:
2164                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2165                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2166                 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2167                 break;
2168         case IB_WR_ATOMIC_FETCH_AND_ADD:
2169                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2170                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2171                 break;
2172         default:
2173                 return -EINVAL;
2174         }
2175         wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2176         wqe->atomic.r_key = atomic_wr(wr)->rkey;
2177         if (wr->send_flags & IB_SEND_SIGNALED)
2178                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2179         if (wr->send_flags & IB_SEND_FENCE)
2180                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2181         if (wr->send_flags & IB_SEND_SOLICITED)
2182                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2183         return 0;
2184 }
2185
2186 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2187                                  struct bnxt_qplib_swqe *wqe)
2188 {
2189         wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2190         wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2191
2192         if (wr->send_flags & IB_SEND_SIGNALED)
2193                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2194         if (wr->send_flags & IB_SEND_FENCE)
2195                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2196         if (wr->send_flags & IB_SEND_SOLICITED)
2197                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2198
2199         return 0;
2200 }
2201
2202 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2203                                  struct bnxt_qplib_swqe *wqe)
2204 {
2205         struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2206         struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2207         int access = wr->access;
2208
2209         wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2210         wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2211         wqe->frmr.page_list = mr->pages;
2212         wqe->frmr.page_list_len = mr->npages;
2213         wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2214         wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2215
2216         if (wr->wr.send_flags & IB_SEND_FENCE)
2217                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2218         if (wr->wr.send_flags & IB_SEND_SIGNALED)
2219                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2220
2221         if (access & IB_ACCESS_LOCAL_WRITE)
2222                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2223         if (access & IB_ACCESS_REMOTE_READ)
2224                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2225         if (access & IB_ACCESS_REMOTE_WRITE)
2226                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2227         if (access & IB_ACCESS_REMOTE_ATOMIC)
2228                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2229         if (access & IB_ACCESS_MW_BIND)
2230                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2231
2232         wqe->frmr.l_key = wr->key;
2233         wqe->frmr.length = wr->mr->length;
2234         wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2235         wqe->frmr.va = wr->mr->iova;
2236         return 0;
2237 }
2238
2239 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2240                                     struct ib_send_wr *wr,
2241                                     struct bnxt_qplib_swqe *wqe)
2242 {
2243         /*  Copy the inline data to the data  field */
2244         u8 *in_data;
2245         u32 i, sge_len;
2246         void *sge_addr;
2247
2248         in_data = wqe->inline_data;
2249         for (i = 0; i < wr->num_sge; i++) {
2250                 sge_addr = (void *)(unsigned long)
2251                                 wr->sg_list[i].addr;
2252                 sge_len = wr->sg_list[i].length;
2253
2254                 if ((sge_len + wqe->inline_len) >
2255                     BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2256                         dev_err(rdev_to_dev(rdev),
2257                                 "Inline data size requested > supported value");
2258                         return -EINVAL;
2259                 }
2260                 sge_len = wr->sg_list[i].length;
2261
2262                 memcpy(in_data, sge_addr, sge_len);
2263                 in_data += wr->sg_list[i].length;
2264                 wqe->inline_len += wr->sg_list[i].length;
2265         }
2266         return wqe->inline_len;
2267 }
2268
2269 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2270                                    struct ib_send_wr *wr,
2271                                    struct bnxt_qplib_swqe *wqe)
2272 {
2273         int payload_sz = 0;
2274
2275         if (wr->send_flags & IB_SEND_INLINE)
2276                 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2277         else
2278                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2279                                                wqe->num_sge);
2280
2281         return payload_sz;
2282 }
2283
2284 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2285 {
2286         if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2287              qp->ib_qp.qp_type == IB_QPT_GSI ||
2288              qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2289              qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2290                 int qp_attr_mask;
2291                 struct ib_qp_attr qp_attr;
2292
2293                 qp_attr_mask = IB_QP_STATE;
2294                 qp_attr.qp_state = IB_QPS_RTS;
2295                 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2296                 qp->qplib_qp.wqe_cnt = 0;
2297         }
2298 }
2299
2300 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2301                                        struct bnxt_re_qp *qp,
2302                                 struct ib_send_wr *wr)
2303 {
2304         struct bnxt_qplib_swqe wqe;
2305         int rc = 0, payload_sz = 0;
2306         unsigned long flags;
2307
2308         spin_lock_irqsave(&qp->sq_lock, flags);
2309         memset(&wqe, 0, sizeof(wqe));
2310         while (wr) {
2311                 /* House keeping */
2312                 memset(&wqe, 0, sizeof(wqe));
2313
2314                 /* Common */
2315                 wqe.num_sge = wr->num_sge;
2316                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2317                         dev_err(rdev_to_dev(rdev),
2318                                 "Limit exceeded for Send SGEs");
2319                         rc = -EINVAL;
2320                         goto bad;
2321                 }
2322
2323                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2324                 if (payload_sz < 0) {
2325                         rc = -EINVAL;
2326                         goto bad;
2327                 }
2328                 wqe.wr_id = wr->wr_id;
2329
2330                 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2331
2332                 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2333                 if (!rc)
2334                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2335 bad:
2336                 if (rc) {
2337                         dev_err(rdev_to_dev(rdev),
2338                                 "Post send failed opcode = %#x rc = %d",
2339                                 wr->opcode, rc);
2340                         break;
2341                 }
2342                 wr = wr->next;
2343         }
2344         bnxt_qplib_post_send_db(&qp->qplib_qp);
2345         bnxt_ud_qp_hw_stall_workaround(qp);
2346         spin_unlock_irqrestore(&qp->sq_lock, flags);
2347         return rc;
2348 }
2349
2350 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2351                       struct ib_send_wr **bad_wr)
2352 {
2353         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2354         struct bnxt_qplib_swqe wqe;
2355         int rc = 0, payload_sz = 0;
2356         unsigned long flags;
2357
2358         spin_lock_irqsave(&qp->sq_lock, flags);
2359         while (wr) {
2360                 /* House keeping */
2361                 memset(&wqe, 0, sizeof(wqe));
2362
2363                 /* Common */
2364                 wqe.num_sge = wr->num_sge;
2365                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2366                         dev_err(rdev_to_dev(qp->rdev),
2367                                 "Limit exceeded for Send SGEs");
2368                         rc = -EINVAL;
2369                         goto bad;
2370                 }
2371
2372                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2373                 if (payload_sz < 0) {
2374                         rc = -EINVAL;
2375                         goto bad;
2376                 }
2377                 wqe.wr_id = wr->wr_id;
2378
2379                 switch (wr->opcode) {
2380                 case IB_WR_SEND:
2381                 case IB_WR_SEND_WITH_IMM:
2382                         if (ib_qp->qp_type == IB_QPT_GSI) {
2383                                 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2384                                                                payload_sz);
2385                                 if (rc)
2386                                         goto bad;
2387                                 wqe.rawqp1.lflags |=
2388                                         SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2389                         }
2390                         switch (wr->send_flags) {
2391                         case IB_SEND_IP_CSUM:
2392                                 wqe.rawqp1.lflags |=
2393                                         SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2394                                 break;
2395                         default:
2396                                 break;
2397                         }
2398                         /* Fall thru to build the wqe */
2399                 case IB_WR_SEND_WITH_INV:
2400                         rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2401                         break;
2402                 case IB_WR_RDMA_WRITE:
2403                 case IB_WR_RDMA_WRITE_WITH_IMM:
2404                 case IB_WR_RDMA_READ:
2405                         rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2406                         break;
2407                 case IB_WR_ATOMIC_CMP_AND_SWP:
2408                 case IB_WR_ATOMIC_FETCH_AND_ADD:
2409                         rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2410                         break;
2411                 case IB_WR_RDMA_READ_WITH_INV:
2412                         dev_err(rdev_to_dev(qp->rdev),
2413                                 "RDMA Read with Invalidate is not supported");
2414                         rc = -EINVAL;
2415                         goto bad;
2416                 case IB_WR_LOCAL_INV:
2417                         rc = bnxt_re_build_inv_wqe(wr, &wqe);
2418                         break;
2419                 case IB_WR_REG_MR:
2420                         rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2421                         break;
2422                 default:
2423                         /* Unsupported WRs */
2424                         dev_err(rdev_to_dev(qp->rdev),
2425                                 "WR (%#x) is not supported", wr->opcode);
2426                         rc = -EINVAL;
2427                         goto bad;
2428                 }
2429                 if (!rc)
2430                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2431 bad:
2432                 if (rc) {
2433                         dev_err(rdev_to_dev(qp->rdev),
2434                                 "post_send failed op:%#x qps = %#x rc = %d\n",
2435                                 wr->opcode, qp->qplib_qp.state, rc);
2436                         *bad_wr = wr;
2437                         break;
2438                 }
2439                 wr = wr->next;
2440         }
2441         bnxt_qplib_post_send_db(&qp->qplib_qp);
2442         bnxt_ud_qp_hw_stall_workaround(qp);
2443         spin_unlock_irqrestore(&qp->sq_lock, flags);
2444
2445         return rc;
2446 }
2447
2448 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2449                                        struct bnxt_re_qp *qp,
2450                                        struct ib_recv_wr *wr)
2451 {
2452         struct bnxt_qplib_swqe wqe;
2453         int rc = 0;
2454
2455         memset(&wqe, 0, sizeof(wqe));
2456         while (wr) {
2457                 /* House keeping */
2458                 memset(&wqe, 0, sizeof(wqe));
2459
2460                 /* Common */
2461                 wqe.num_sge = wr->num_sge;
2462                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2463                         dev_err(rdev_to_dev(rdev),
2464                                 "Limit exceeded for Receive SGEs");
2465                         rc = -EINVAL;
2466                         break;
2467                 }
2468                 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2469                 wqe.wr_id = wr->wr_id;
2470                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2471
2472                 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2473                 if (rc)
2474                         break;
2475
2476                 wr = wr->next;
2477         }
2478         if (!rc)
2479                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2480         return rc;
2481 }
2482
2483 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2484                       struct ib_recv_wr **bad_wr)
2485 {
2486         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2487         struct bnxt_qplib_swqe wqe;
2488         int rc = 0, payload_sz = 0;
2489         unsigned long flags;
2490         u32 count = 0;
2491
2492         spin_lock_irqsave(&qp->rq_lock, flags);
2493         while (wr) {
2494                 /* House keeping */
2495                 memset(&wqe, 0, sizeof(wqe));
2496
2497                 /* Common */
2498                 wqe.num_sge = wr->num_sge;
2499                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2500                         dev_err(rdev_to_dev(qp->rdev),
2501                                 "Limit exceeded for Receive SGEs");
2502                         rc = -EINVAL;
2503                         *bad_wr = wr;
2504                         break;
2505                 }
2506
2507                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2508                                                wr->num_sge);
2509                 wqe.wr_id = wr->wr_id;
2510                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2511
2512                 if (ib_qp->qp_type == IB_QPT_GSI)
2513                         rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2514                                                               payload_sz);
2515                 if (!rc)
2516                         rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2517                 if (rc) {
2518                         *bad_wr = wr;
2519                         break;
2520                 }
2521
2522                 /* Ring DB if the RQEs posted reaches a threshold value */
2523                 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2524                         bnxt_qplib_post_recv_db(&qp->qplib_qp);
2525                         count = 0;
2526                 }
2527
2528                 wr = wr->next;
2529         }
2530
2531         if (count)
2532                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2533
2534         spin_unlock_irqrestore(&qp->rq_lock, flags);
2535
2536         return rc;
2537 }
2538
2539 /* Completion Queues */
2540 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2541 {
2542         int rc;
2543         struct bnxt_re_cq *cq;
2544         struct bnxt_qplib_nq *nq;
2545         struct bnxt_re_dev *rdev;
2546
2547         cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2548         rdev = cq->rdev;
2549         nq = cq->qplib_cq.nq;
2550
2551         rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2552         if (rc) {
2553                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2554                 return rc;
2555         }
2556         if (!IS_ERR_OR_NULL(cq->umem))
2557                 ib_umem_release(cq->umem);
2558
2559         atomic_dec(&rdev->cq_count);
2560         nq->budget--;
2561         kfree(cq->cql);
2562         kfree(cq);
2563
2564         return 0;
2565 }
2566
2567 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2568                                 const struct ib_cq_init_attr *attr,
2569                                 struct ib_ucontext *context,
2570                                 struct ib_udata *udata)
2571 {
2572         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2573         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2574         struct bnxt_re_cq *cq = NULL;
2575         int rc, entries;
2576         int cqe = attr->cqe;
2577         struct bnxt_qplib_nq *nq = NULL;
2578         unsigned int nq_alloc_cnt;
2579
2580         /* Validate CQ fields */
2581         if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2582                 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2583                 return ERR_PTR(-EINVAL);
2584         }
2585         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2586         if (!cq)
2587                 return ERR_PTR(-ENOMEM);
2588
2589         cq->rdev = rdev;
2590         cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2591
2592         entries = roundup_pow_of_two(cqe + 1);
2593         if (entries > dev_attr->max_cq_wqes + 1)
2594                 entries = dev_attr->max_cq_wqes + 1;
2595
2596         if (context) {
2597                 struct bnxt_re_cq_req req;
2598                 struct bnxt_re_ucontext *uctx = container_of
2599                                                 (context,
2600                                                  struct bnxt_re_ucontext,
2601                                                  ib_uctx);
2602                 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2603                         rc = -EFAULT;
2604                         goto fail;
2605                 }
2606
2607                 cq->umem = ib_umem_get(context, req.cq_va,
2608                                        entries * sizeof(struct cq_base),
2609                                        IB_ACCESS_LOCAL_WRITE, 1);
2610                 if (IS_ERR(cq->umem)) {
2611                         rc = PTR_ERR(cq->umem);
2612                         goto fail;
2613                 }
2614                 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2615                 cq->qplib_cq.nmap = cq->umem->nmap;
2616                 cq->qplib_cq.dpi = &uctx->dpi;
2617         } else {
2618                 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2619                 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2620                                   GFP_KERNEL);
2621                 if (!cq->cql) {
2622                         rc = -ENOMEM;
2623                         goto fail;
2624                 }
2625
2626                 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2627                 cq->qplib_cq.sghead = NULL;
2628                 cq->qplib_cq.nmap = 0;
2629         }
2630         /*
2631          * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2632          * used for getting the NQ index.
2633          */
2634         nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2635         nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2636         cq->qplib_cq.max_wqe = entries;
2637         cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2638         cq->qplib_cq.nq = nq;
2639
2640         rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2641         if (rc) {
2642                 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2643                 goto fail;
2644         }
2645
2646         cq->ib_cq.cqe = entries;
2647         cq->cq_period = cq->qplib_cq.period;
2648         nq->budget++;
2649
2650         atomic_inc(&rdev->cq_count);
2651
2652         if (context) {
2653                 struct bnxt_re_cq_resp resp;
2654
2655                 resp.cqid = cq->qplib_cq.id;
2656                 resp.tail = cq->qplib_cq.hwq.cons;
2657                 resp.phase = cq->qplib_cq.period;
2658                 resp.rsvd = 0;
2659                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2660                 if (rc) {
2661                         dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2662                         bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2663                         goto c2fail;
2664                 }
2665         }
2666
2667         return &cq->ib_cq;
2668
2669 c2fail:
2670         if (context)
2671                 ib_umem_release(cq->umem);
2672 fail:
2673         kfree(cq->cql);
2674         kfree(cq);
2675         return ERR_PTR(rc);
2676 }
2677
2678 static u8 __req_to_ib_wc_status(u8 qstatus)
2679 {
2680         switch (qstatus) {
2681         case CQ_REQ_STATUS_OK:
2682                 return IB_WC_SUCCESS;
2683         case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2684                 return IB_WC_BAD_RESP_ERR;
2685         case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2686                 return IB_WC_LOC_LEN_ERR;
2687         case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2688                 return IB_WC_LOC_QP_OP_ERR;
2689         case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2690                 return IB_WC_LOC_PROT_ERR;
2691         case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2692                 return IB_WC_GENERAL_ERR;
2693         case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2694                 return IB_WC_REM_INV_REQ_ERR;
2695         case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2696                 return IB_WC_REM_ACCESS_ERR;
2697         case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2698                 return IB_WC_REM_OP_ERR;
2699         case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2700                 return IB_WC_RNR_RETRY_EXC_ERR;
2701         case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2702                 return IB_WC_RETRY_EXC_ERR;
2703         case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2704                 return IB_WC_WR_FLUSH_ERR;
2705         default:
2706                 return IB_WC_GENERAL_ERR;
2707         }
2708         return 0;
2709 }
2710
2711 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2712 {
2713         switch (qstatus) {
2714         case CQ_RES_RAWETH_QP1_STATUS_OK:
2715                 return IB_WC_SUCCESS;
2716         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2717                 return IB_WC_LOC_ACCESS_ERR;
2718         case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2719                 return IB_WC_LOC_LEN_ERR;
2720         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2721                 return IB_WC_LOC_PROT_ERR;
2722         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2723                 return IB_WC_LOC_QP_OP_ERR;
2724         case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2725                 return IB_WC_GENERAL_ERR;
2726         case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2727                 return IB_WC_WR_FLUSH_ERR;
2728         case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2729                 return IB_WC_WR_FLUSH_ERR;
2730         default:
2731                 return IB_WC_GENERAL_ERR;
2732         }
2733 }
2734
2735 static u8 __rc_to_ib_wc_status(u8 qstatus)
2736 {
2737         switch (qstatus) {
2738         case CQ_RES_RC_STATUS_OK:
2739                 return IB_WC_SUCCESS;
2740         case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2741                 return IB_WC_LOC_ACCESS_ERR;
2742         case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2743                 return IB_WC_LOC_LEN_ERR;
2744         case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2745                 return IB_WC_LOC_PROT_ERR;
2746         case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2747                 return IB_WC_LOC_QP_OP_ERR;
2748         case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2749                 return IB_WC_GENERAL_ERR;
2750         case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2751                 return IB_WC_REM_INV_REQ_ERR;
2752         case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2753                 return IB_WC_WR_FLUSH_ERR;
2754         case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2755                 return IB_WC_WR_FLUSH_ERR;
2756         default:
2757                 return IB_WC_GENERAL_ERR;
2758         }
2759 }
2760
2761 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2762 {
2763         switch (cqe->type) {
2764         case BNXT_QPLIB_SWQE_TYPE_SEND:
2765                 wc->opcode = IB_WC_SEND;
2766                 break;
2767         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2768                 wc->opcode = IB_WC_SEND;
2769                 wc->wc_flags |= IB_WC_WITH_IMM;
2770                 break;
2771         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2772                 wc->opcode = IB_WC_SEND;
2773                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2774                 break;
2775         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2776                 wc->opcode = IB_WC_RDMA_WRITE;
2777                 break;
2778         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2779                 wc->opcode = IB_WC_RDMA_WRITE;
2780                 wc->wc_flags |= IB_WC_WITH_IMM;
2781                 break;
2782         case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2783                 wc->opcode = IB_WC_RDMA_READ;
2784                 break;
2785         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2786                 wc->opcode = IB_WC_COMP_SWAP;
2787                 break;
2788         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2789                 wc->opcode = IB_WC_FETCH_ADD;
2790                 break;
2791         case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2792                 wc->opcode = IB_WC_LOCAL_INV;
2793                 break;
2794         case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2795                 wc->opcode = IB_WC_REG_MR;
2796                 break;
2797         default:
2798                 wc->opcode = IB_WC_SEND;
2799                 break;
2800         }
2801
2802         wc->status = __req_to_ib_wc_status(cqe->status);
2803 }
2804
2805 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2806                                      u16 raweth_qp1_flags2)
2807 {
2808         bool is_ipv6 = false, is_ipv4 = false;
2809
2810         /* raweth_qp1_flags Bit 9-6 indicates itype */
2811         if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2812             != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2813                 return -1;
2814
2815         if (raweth_qp1_flags2 &
2816             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2817             raweth_qp1_flags2 &
2818             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2819                 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2820                 (raweth_qp1_flags2 &
2821                  CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2822                         (is_ipv6 = true) : (is_ipv4 = true);
2823                 return ((is_ipv6) ?
2824                          BNXT_RE_ROCEV2_IPV6_PACKET :
2825                          BNXT_RE_ROCEV2_IPV4_PACKET);
2826         } else {
2827                 return BNXT_RE_ROCE_V1_PACKET;
2828         }
2829 }
2830
2831 static int bnxt_re_to_ib_nw_type(int nw_type)
2832 {
2833         u8 nw_hdr_type = 0xFF;
2834
2835         switch (nw_type) {
2836         case BNXT_RE_ROCE_V1_PACKET:
2837                 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2838                 break;
2839         case BNXT_RE_ROCEV2_IPV4_PACKET:
2840                 nw_hdr_type = RDMA_NETWORK_IPV4;
2841                 break;
2842         case BNXT_RE_ROCEV2_IPV6_PACKET:
2843                 nw_hdr_type = RDMA_NETWORK_IPV6;
2844                 break;
2845         }
2846         return nw_hdr_type;
2847 }
2848
2849 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2850                                        void *rq_hdr_buf)
2851 {
2852         u8 *tmp_buf = NULL;
2853         struct ethhdr *eth_hdr;
2854         u16 eth_type;
2855         bool rc = false;
2856
2857         tmp_buf = (u8 *)rq_hdr_buf;
2858         /*
2859          * If dest mac is not same as I/F mac, this could be a
2860          * loopback address or multicast address, check whether
2861          * it is a loopback packet
2862          */
2863         if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2864                 tmp_buf += 4;
2865                 /* Check the  ether type */
2866                 eth_hdr = (struct ethhdr *)tmp_buf;
2867                 eth_type = ntohs(eth_hdr->h_proto);
2868                 switch (eth_type) {
2869                 case ETH_P_IBOE:
2870                         rc = true;
2871                         break;
2872                 case ETH_P_IP:
2873                 case ETH_P_IPV6: {
2874                         u32 len;
2875                         struct udphdr *udp_hdr;
2876
2877                         len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2878                                                       sizeof(struct ipv6hdr));
2879                         tmp_buf += sizeof(struct ethhdr) + len;
2880                         udp_hdr = (struct udphdr *)tmp_buf;
2881                         if (ntohs(udp_hdr->dest) ==
2882                                     ROCE_V2_UDP_DPORT)
2883                                 rc = true;
2884                         break;
2885                         }
2886                 default:
2887                         break;
2888                 }
2889         }
2890
2891         return rc;
2892 }
2893
2894 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2895                                          struct bnxt_qplib_cqe *cqe)
2896 {
2897         struct bnxt_re_dev *rdev = qp1_qp->rdev;
2898         struct bnxt_re_sqp_entries *sqp_entry = NULL;
2899         struct bnxt_re_qp *qp = rdev->qp1_sqp;
2900         struct ib_send_wr *swr;
2901         struct ib_ud_wr udwr;
2902         struct ib_recv_wr rwr;
2903         int pkt_type = 0;
2904         u32 tbl_idx;
2905         void *rq_hdr_buf;
2906         dma_addr_t rq_hdr_buf_map;
2907         dma_addr_t shrq_hdr_buf_map;
2908         u32 offset = 0;
2909         u32 skip_bytes = 0;
2910         struct ib_sge s_sge[2];
2911         struct ib_sge r_sge[2];
2912         int rc;
2913
2914         memset(&udwr, 0, sizeof(udwr));
2915         memset(&rwr, 0, sizeof(rwr));
2916         memset(&s_sge, 0, sizeof(s_sge));
2917         memset(&r_sge, 0, sizeof(r_sge));
2918
2919         swr = &udwr.wr;
2920         tbl_idx = cqe->wr_id;
2921
2922         rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2923                         (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2924         rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2925                                                           tbl_idx);
2926
2927         /* Shadow QP header buffer */
2928         shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2929                                                             tbl_idx);
2930         sqp_entry = &rdev->sqp_tbl[tbl_idx];
2931
2932         /* Store this cqe */
2933         memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2934         sqp_entry->qp1_qp = qp1_qp;
2935
2936         /* Find packet type from the cqe */
2937
2938         pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2939                                              cqe->raweth_qp1_flags2);
2940         if (pkt_type < 0) {
2941                 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2942                 return -EINVAL;
2943         }
2944
2945         /* Adjust the offset for the user buffer and post in the rq */
2946
2947         if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2948                 offset = 20;
2949
2950         /*
2951          * QP1 loopback packet has 4 bytes of internal header before
2952          * ether header. Skip these four bytes.
2953          */
2954         if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2955                 skip_bytes = 4;
2956
2957         /* First send SGE . Skip the ether header*/
2958         s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2959                         + skip_bytes;
2960         s_sge[0].lkey = 0xFFFFFFFF;
2961         s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2962                                 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2963
2964         /* Second Send SGE */
2965         s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2966                         BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2967         if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2968                 s_sge[1].addr += 8;
2969         s_sge[1].lkey = 0xFFFFFFFF;
2970         s_sge[1].length = 256;
2971
2972         /* First recv SGE */
2973
2974         r_sge[0].addr = shrq_hdr_buf_map;
2975         r_sge[0].lkey = 0xFFFFFFFF;
2976         r_sge[0].length = 40;
2977
2978         r_sge[1].addr = sqp_entry->sge.addr + offset;
2979         r_sge[1].lkey = sqp_entry->sge.lkey;
2980         r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2981
2982         /* Create receive work request */
2983         rwr.num_sge = 2;
2984         rwr.sg_list = r_sge;
2985         rwr.wr_id = tbl_idx;
2986         rwr.next = NULL;
2987
2988         rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2989         if (rc) {
2990                 dev_err(rdev_to_dev(rdev),
2991                         "Failed to post Rx buffers to shadow QP");
2992                 return -ENOMEM;
2993         }
2994
2995         swr->num_sge = 2;
2996         swr->sg_list = s_sge;
2997         swr->wr_id = tbl_idx;
2998         swr->opcode = IB_WR_SEND;
2999         swr->next = NULL;
3000
3001         udwr.ah = &rdev->sqp_ah->ib_ah;
3002         udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3003         udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3004
3005         /* post data received  in the send queue */
3006         rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3007
3008         return 0;
3009 }
3010
3011 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3012                                           struct bnxt_qplib_cqe *cqe)
3013 {
3014         wc->opcode = IB_WC_RECV;
3015         wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3016         wc->wc_flags |= IB_WC_GRH;
3017 }
3018
3019 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3020                                 u16 *vid, u8 *sl)
3021 {
3022         bool ret = false;
3023         u32 metadata;
3024         u16 tpid;
3025
3026         metadata = orig_cqe->raweth_qp1_metadata;
3027         if (orig_cqe->raweth_qp1_flags2 &
3028                 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3029                 tpid = ((metadata &
3030                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3031                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3032                 if (tpid == ETH_P_8021Q) {
3033                         *vid = metadata &
3034                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3035                         *sl = (metadata &
3036                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3037                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3038                         ret = true;
3039                 }
3040         }
3041
3042         return ret;
3043 }
3044
3045 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3046                                       struct bnxt_qplib_cqe *cqe)
3047 {
3048         wc->opcode = IB_WC_RECV;
3049         wc->status = __rc_to_ib_wc_status(cqe->status);
3050
3051         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3052                 wc->wc_flags |= IB_WC_WITH_IMM;
3053         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3054                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3055         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3056             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3057                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3058 }
3059
3060 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3061                                              struct ib_wc *wc,
3062                                              struct bnxt_qplib_cqe *cqe)
3063 {
3064         struct bnxt_re_dev *rdev = qp->rdev;
3065         struct bnxt_re_qp *qp1_qp = NULL;
3066         struct bnxt_qplib_cqe *orig_cqe = NULL;
3067         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3068         int nw_type;
3069         u32 tbl_idx;
3070         u16 vlan_id;
3071         u8 sl;
3072
3073         tbl_idx = cqe->wr_id;
3074
3075         sqp_entry = &rdev->sqp_tbl[tbl_idx];
3076         qp1_qp = sqp_entry->qp1_qp;
3077         orig_cqe = &sqp_entry->cqe;
3078
3079         wc->wr_id = sqp_entry->wrid;
3080         wc->byte_len = orig_cqe->length;
3081         wc->qp = &qp1_qp->ib_qp;
3082
3083         wc->ex.imm_data = orig_cqe->immdata;
3084         wc->src_qp = orig_cqe->src_qp;
3085         memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3086         if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3087                 wc->vlan_id = vlan_id;
3088                 wc->sl = sl;
3089                 wc->wc_flags |= IB_WC_WITH_VLAN;
3090         }
3091         wc->port_num = 1;
3092         wc->vendor_err = orig_cqe->status;
3093
3094         wc->opcode = IB_WC_RECV;
3095         wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3096         wc->wc_flags |= IB_WC_GRH;
3097
3098         nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3099                                             orig_cqe->raweth_qp1_flags2);
3100         if (nw_type >= 0) {
3101                 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3102                 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3103         }
3104 }
3105
3106 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3107                                       struct bnxt_qplib_cqe *cqe)
3108 {
3109         wc->opcode = IB_WC_RECV;
3110         wc->status = __rc_to_ib_wc_status(cqe->status);
3111
3112         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3113                 wc->wc_flags |= IB_WC_WITH_IMM;
3114         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3115                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3116         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3117             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3118                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3119 }
3120
3121 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3122 {
3123         struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3124         unsigned long flags;
3125         int rc = 0;
3126
3127         spin_lock_irqsave(&qp->sq_lock, flags);
3128
3129         rc = bnxt_re_bind_fence_mw(lib_qp);
3130         if (!rc) {
3131                 lib_qp->sq.phantom_wqe_cnt++;
3132                 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3133                         "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3134                         lib_qp->id, lib_qp->sq.hwq.prod,
3135                         HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3136                         lib_qp->sq.phantom_wqe_cnt);
3137         }
3138
3139         spin_unlock_irqrestore(&qp->sq_lock, flags);
3140         return rc;
3141 }
3142
3143 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3144 {
3145         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3146         struct bnxt_re_qp *qp;
3147         struct bnxt_qplib_cqe *cqe;
3148         int i, ncqe, budget;
3149         struct bnxt_qplib_q *sq;
3150         struct bnxt_qplib_qp *lib_qp;
3151         u32 tbl_idx;
3152         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3153         unsigned long flags;
3154
3155         spin_lock_irqsave(&cq->cq_lock, flags);
3156         budget = min_t(u32, num_entries, cq->max_cql);
3157         num_entries = budget;
3158         if (!cq->cql) {
3159                 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3160                 goto exit;
3161         }
3162         cqe = &cq->cql[0];
3163         while (budget) {
3164                 lib_qp = NULL;
3165                 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3166                 if (lib_qp) {
3167                         sq = &lib_qp->sq;
3168                         if (sq->send_phantom) {
3169                                 qp = container_of(lib_qp,
3170                                                   struct bnxt_re_qp, qplib_qp);
3171                                 if (send_phantom_wqe(qp) == -ENOMEM)
3172                                         dev_err(rdev_to_dev(cq->rdev),
3173                                                 "Phantom failed! Scheduled to send again\n");
3174                                 else
3175                                         sq->send_phantom = false;
3176                         }
3177                 }
3178                 if (ncqe < budget)
3179                         ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3180                                                               cqe + ncqe,
3181                                                               budget - ncqe);
3182
3183                 if (!ncqe)
3184                         break;
3185
3186                 for (i = 0; i < ncqe; i++, cqe++) {
3187                         /* Transcribe each qplib_wqe back to ib_wc */
3188                         memset(wc, 0, sizeof(*wc));
3189
3190                         wc->wr_id = cqe->wr_id;
3191                         wc->byte_len = cqe->length;
3192                         qp = container_of
3193                                 ((struct bnxt_qplib_qp *)
3194                                  (unsigned long)(cqe->qp_handle),
3195                                  struct bnxt_re_qp, qplib_qp);
3196                         if (!qp) {
3197                                 dev_err(rdev_to_dev(cq->rdev),
3198                                         "POLL CQ : bad QP handle");
3199                                 continue;
3200                         }
3201                         wc->qp = &qp->ib_qp;
3202                         wc->ex.imm_data = cqe->immdata;
3203                         wc->src_qp = cqe->src_qp;
3204                         memcpy(wc->smac, cqe->smac, ETH_ALEN);
3205                         wc->port_num = 1;
3206                         wc->vendor_err = cqe->status;
3207
3208                         switch (cqe->opcode) {
3209                         case CQ_BASE_CQE_TYPE_REQ:
3210                                 if (qp->qplib_qp.id ==
3211                                     qp->rdev->qp1_sqp->qplib_qp.id) {
3212                                         /* Handle this completion with
3213                                          * the stored completion
3214                                          */
3215                                         memset(wc, 0, sizeof(*wc));
3216                                         continue;
3217                                 }
3218                                 bnxt_re_process_req_wc(wc, cqe);
3219                                 break;
3220                         case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3221                                 if (!cqe->status) {
3222                                         int rc = 0;
3223
3224                                         rc = bnxt_re_process_raw_qp_pkt_rx
3225                                                                 (qp, cqe);
3226                                         if (!rc) {
3227                                                 memset(wc, 0, sizeof(*wc));
3228                                                 continue;
3229                                         }
3230                                         cqe->status = -1;
3231                                 }
3232                                 /* Errors need not be looped back.
3233                                  * But change the wr_id to the one
3234                                  * stored in the table
3235                                  */
3236                                 tbl_idx = cqe->wr_id;
3237                                 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3238                                 wc->wr_id = sqp_entry->wrid;
3239                                 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3240                                 break;
3241                         case CQ_BASE_CQE_TYPE_RES_RC:
3242                                 bnxt_re_process_res_rc_wc(wc, cqe);
3243                                 break;
3244                         case CQ_BASE_CQE_TYPE_RES_UD:
3245                                 if (qp->qplib_qp.id ==
3246                                     qp->rdev->qp1_sqp->qplib_qp.id) {
3247                                         /* Handle this completion with
3248                                          * the stored completion
3249                                          */
3250                                         if (cqe->status) {
3251                                                 continue;
3252                                         } else {
3253                                                 bnxt_re_process_res_shadow_qp_wc
3254                                                                 (qp, wc, cqe);
3255                                                 break;
3256                                         }
3257                                 }
3258                                 bnxt_re_process_res_ud_wc(wc, cqe);
3259                                 break;
3260                         default:
3261                                 dev_err(rdev_to_dev(cq->rdev),
3262                                         "POLL CQ : type 0x%x not handled",
3263                                         cqe->opcode);
3264                                 continue;
3265                         }
3266                         wc++;
3267                         budget--;
3268                 }
3269         }
3270 exit:
3271         spin_unlock_irqrestore(&cq->cq_lock, flags);
3272         return num_entries - budget;
3273 }
3274
3275 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3276                           enum ib_cq_notify_flags ib_cqn_flags)
3277 {
3278         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3279         int type = 0, rc = 0;
3280         unsigned long flags;
3281
3282         spin_lock_irqsave(&cq->cq_lock, flags);
3283         /* Trigger on the very next completion */
3284         if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3285                 type = DBR_DBR_TYPE_CQ_ARMALL;
3286         /* Trigger on the next solicited completion */
3287         else if (ib_cqn_flags & IB_CQ_SOLICITED)
3288                 type = DBR_DBR_TYPE_CQ_ARMSE;
3289
3290         /* Poll to see if there are missed events */
3291         if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3292             !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3293                 rc = 1;
3294                 goto exit;
3295         }
3296         bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3297
3298 exit:
3299         spin_unlock_irqrestore(&cq->cq_lock, flags);
3300         return rc;
3301 }
3302
3303 /* Memory Regions */
3304 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3305 {
3306         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3307         struct bnxt_re_dev *rdev = pd->rdev;
3308         struct bnxt_re_mr *mr;
3309         u64 pbl = 0;
3310         int rc;
3311
3312         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3313         if (!mr)
3314                 return ERR_PTR(-ENOMEM);
3315
3316         mr->rdev = rdev;
3317         mr->qplib_mr.pd = &pd->qplib_pd;
3318         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3319         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3320
3321         /* Allocate and register 0 as the address */
3322         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3323         if (rc)
3324                 goto fail;
3325
3326         mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3327         mr->qplib_mr.total_size = -1; /* Infinte length */
3328         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3329                                PAGE_SIZE);
3330         if (rc)
3331                 goto fail_mr;
3332
3333         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3334         if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3335                                IB_ACCESS_REMOTE_ATOMIC))
3336                 mr->ib_mr.rkey = mr->ib_mr.lkey;
3337         atomic_inc(&rdev->mr_count);
3338
3339         return &mr->ib_mr;
3340
3341 fail_mr:
3342         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3343 fail:
3344         kfree(mr);
3345         return ERR_PTR(rc);
3346 }
3347
3348 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3349 {
3350         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3351         struct bnxt_re_dev *rdev = mr->rdev;
3352         int rc;
3353
3354         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3355         if (rc)
3356                 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3357
3358         if (mr->pages) {
3359                 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3360                                                         &mr->qplib_frpl);
3361                 kfree(mr->pages);
3362                 mr->npages = 0;
3363                 mr->pages = NULL;
3364         }
3365         if (!IS_ERR_OR_NULL(mr->ib_umem))
3366                 ib_umem_release(mr->ib_umem);
3367
3368         kfree(mr);
3369         atomic_dec(&rdev->mr_count);
3370         return rc;
3371 }
3372
3373 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3374 {
3375         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3376
3377         if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3378                 return -ENOMEM;
3379
3380         mr->pages[mr->npages++] = addr;
3381         return 0;
3382 }
3383
3384 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3385                       unsigned int *sg_offset)
3386 {
3387         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3388
3389         mr->npages = 0;
3390         return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3391 }
3392
3393 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3394                                u32 max_num_sg)
3395 {
3396         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3397         struct bnxt_re_dev *rdev = pd->rdev;
3398         struct bnxt_re_mr *mr = NULL;
3399         int rc;
3400
3401         if (type != IB_MR_TYPE_MEM_REG) {
3402                 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3403                 return ERR_PTR(-EINVAL);
3404         }
3405         if (max_num_sg > MAX_PBL_LVL_1_PGS)
3406                 return ERR_PTR(-EINVAL);
3407
3408         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3409         if (!mr)
3410                 return ERR_PTR(-ENOMEM);
3411
3412         mr->rdev = rdev;
3413         mr->qplib_mr.pd = &pd->qplib_pd;
3414         mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3415         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3416
3417         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3418         if (rc)
3419                 goto bail;
3420
3421         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3422         mr->ib_mr.rkey = mr->ib_mr.lkey;
3423
3424         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3425         if (!mr->pages) {
3426                 rc = -ENOMEM;
3427                 goto fail;
3428         }
3429         rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3430                                                  &mr->qplib_frpl, max_num_sg);
3431         if (rc) {
3432                 dev_err(rdev_to_dev(rdev),
3433                         "Failed to allocate HW FR page list");
3434                 goto fail_mr;
3435         }
3436
3437         atomic_inc(&rdev->mr_count);
3438         return &mr->ib_mr;
3439
3440 fail_mr:
3441         kfree(mr->pages);
3442 fail:
3443         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3444 bail:
3445         kfree(mr);
3446         return ERR_PTR(rc);
3447 }
3448
3449 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3450                                struct ib_udata *udata)
3451 {
3452         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3453         struct bnxt_re_dev *rdev = pd->rdev;
3454         struct bnxt_re_mw *mw;
3455         int rc;
3456
3457         mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3458         if (!mw)
3459                 return ERR_PTR(-ENOMEM);
3460         mw->rdev = rdev;
3461         mw->qplib_mw.pd = &pd->qplib_pd;
3462
3463         mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3464                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3465                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3466         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3467         if (rc) {
3468                 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3469                 goto fail;
3470         }
3471         mw->ib_mw.rkey = mw->qplib_mw.rkey;
3472
3473         atomic_inc(&rdev->mw_count);
3474         return &mw->ib_mw;
3475
3476 fail:
3477         kfree(mw);
3478         return ERR_PTR(rc);
3479 }
3480
3481 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3482 {
3483         struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3484         struct bnxt_re_dev *rdev = mw->rdev;
3485         int rc;
3486
3487         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3488         if (rc) {
3489                 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3490                 return rc;
3491         }
3492
3493         kfree(mw);
3494         atomic_dec(&rdev->mw_count);
3495         return rc;
3496 }
3497
3498 static int bnxt_re_page_size_ok(int page_shift)
3499 {
3500         switch (page_shift) {
3501         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3502         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3503         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3504         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3505         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3506         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3507         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3508         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3509                 return 1;
3510         default:
3511                 return 0;
3512         }
3513 }
3514
3515 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3516                              int page_shift)
3517 {
3518         u64 *pbl_tbl = pbl_tbl_orig;
3519         u64 paddr;
3520         u64 page_mask = (1ULL << page_shift) - 1;
3521         int i, pages;
3522         struct scatterlist *sg;
3523         int entry;
3524
3525         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3526                 pages = sg_dma_len(sg) >> PAGE_SHIFT;
3527                 for (i = 0; i < pages; i++) {
3528                         paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3529                         if (pbl_tbl == pbl_tbl_orig)
3530                                 *pbl_tbl++ = paddr & ~page_mask;
3531                         else if ((paddr & page_mask) == 0)
3532                                 *pbl_tbl++ = paddr;
3533                 }
3534         }
3535         return pbl_tbl - pbl_tbl_orig;
3536 }
3537
3538 /* uverbs */
3539 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3540                                   u64 virt_addr, int mr_access_flags,
3541                                   struct ib_udata *udata)
3542 {
3543         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3544         struct bnxt_re_dev *rdev = pd->rdev;
3545         struct bnxt_re_mr *mr;
3546         struct ib_umem *umem;
3547         u64 *pbl_tbl = NULL;
3548         int umem_pgs, page_shift, rc;
3549
3550         if (length > BNXT_RE_MAX_MR_SIZE) {
3551                 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3552                         length, BNXT_RE_MAX_MR_SIZE);
3553                 return ERR_PTR(-ENOMEM);
3554         }
3555
3556         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3557         if (!mr)
3558                 return ERR_PTR(-ENOMEM);
3559
3560         mr->rdev = rdev;
3561         mr->qplib_mr.pd = &pd->qplib_pd;
3562         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3563         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3564
3565         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3566         if (rc) {
3567                 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3568                 goto free_mr;
3569         }
3570         /* The fixed portion of the rkey is the same as the lkey */
3571         mr->ib_mr.rkey = mr->qplib_mr.rkey;
3572
3573         umem = ib_umem_get(ib_pd->uobject->context, start, length,
3574                            mr_access_flags, 0);
3575         if (IS_ERR(umem)) {
3576                 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3577                 rc = -EFAULT;
3578                 goto free_mrw;
3579         }
3580         mr->ib_umem = umem;
3581
3582         mr->qplib_mr.va = virt_addr;
3583         umem_pgs = ib_umem_page_count(umem);
3584         if (!umem_pgs) {
3585                 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3586                 rc = -EINVAL;
3587                 goto free_umem;
3588         }
3589         mr->qplib_mr.total_size = length;
3590
3591         pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3592         if (!pbl_tbl) {
3593                 rc = -ENOMEM;
3594                 goto free_umem;
3595         }
3596
3597         page_shift = umem->page_shift;
3598
3599         if (!bnxt_re_page_size_ok(page_shift)) {
3600                 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3601                 rc = -EFAULT;
3602                 goto fail;
3603         }
3604
3605         if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3606                 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3607                         length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3608                 rc = -EINVAL;
3609                 goto fail;
3610         }
3611         if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3612                 page_shift = BNXT_RE_PAGE_SHIFT_2M;
3613                 dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3614                          1 << page_shift);
3615         }
3616
3617         /* Map umem buf ptrs to the PBL */
3618         umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3619         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3620                                umem_pgs, false, 1 << page_shift);
3621         if (rc) {
3622                 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3623                 goto fail;
3624         }
3625
3626         kfree(pbl_tbl);
3627
3628         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3629         mr->ib_mr.rkey = mr->qplib_mr.lkey;
3630         atomic_inc(&rdev->mr_count);
3631
3632         return &mr->ib_mr;
3633 fail:
3634         kfree(pbl_tbl);
3635 free_umem:
3636         ib_umem_release(umem);
3637 free_mrw:
3638         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3639 free_mr:
3640         kfree(mr);
3641         return ERR_PTR(rc);
3642 }
3643
3644 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3645                                            struct ib_udata *udata)
3646 {
3647         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3648         struct bnxt_re_uctx_resp resp;
3649         struct bnxt_re_ucontext *uctx;
3650         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3651         int rc;
3652
3653         dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3654                 ibdev->uverbs_abi_ver);
3655
3656         if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3657                 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3658                         BNXT_RE_ABI_VERSION);
3659                 return ERR_PTR(-EPERM);
3660         }
3661
3662         uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3663         if (!uctx)
3664                 return ERR_PTR(-ENOMEM);
3665
3666         uctx->rdev = rdev;
3667
3668         uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3669         if (!uctx->shpg) {
3670                 rc = -ENOMEM;
3671                 goto fail;
3672         }
3673         spin_lock_init(&uctx->sh_lock);
3674
3675         resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3676         resp.max_qp = rdev->qplib_ctx.qpc_count;
3677         resp.pg_size = PAGE_SIZE;
3678         resp.cqe_sz = sizeof(struct cq_base);
3679         resp.max_cqd = dev_attr->max_cq_wqes;
3680         resp.rsvd    = 0;
3681
3682         rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3683         if (rc) {
3684                 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3685                 rc = -EFAULT;
3686                 goto cfail;
3687         }
3688
3689         return &uctx->ib_uctx;
3690 cfail:
3691         free_page((unsigned long)uctx->shpg);
3692         uctx->shpg = NULL;
3693 fail:
3694         kfree(uctx);
3695         return ERR_PTR(rc);
3696 }
3697
3698 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3699 {
3700         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3701                                                    struct bnxt_re_ucontext,
3702                                                    ib_uctx);
3703
3704         struct bnxt_re_dev *rdev = uctx->rdev;
3705         int rc = 0;
3706
3707         if (uctx->shpg)
3708                 free_page((unsigned long)uctx->shpg);
3709
3710         if (uctx->dpi.dbr) {
3711                 /* Free DPI only if this is the first PD allocated by the
3712                  * application and mark the context dpi as NULL
3713                  */
3714                 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3715                                             &rdev->qplib_res.dpi_tbl,
3716                                             &uctx->dpi);
3717                 if (rc)
3718                         dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3719                         /* Don't fail, continue*/
3720                 uctx->dpi.dbr = NULL;
3721         }
3722
3723         kfree(uctx);
3724         return 0;
3725 }
3726
3727 /* Helper function to mmap the virtual memory from user app */
3728 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3729 {
3730         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3731                                                    struct bnxt_re_ucontext,
3732                                                    ib_uctx);
3733         struct bnxt_re_dev *rdev = uctx->rdev;
3734         u64 pfn;
3735
3736         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3737                 return -EINVAL;
3738
3739         if (vma->vm_pgoff) {
3740                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3741                 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3742                                        PAGE_SIZE, vma->vm_page_prot)) {
3743                         dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3744                         return -EAGAIN;
3745                 }
3746         } else {
3747                 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3748                 if (remap_pfn_range(vma, vma->vm_start,
3749                                     pfn, PAGE_SIZE, vma->vm_page_prot)) {
3750                         dev_err(rdev_to_dev(rdev),
3751                                 "Failed to map shared page");
3752                         return -EAGAIN;
3753                 }
3754         }
3755
3756         return 0;
3757 }