Merge tag 'mlx5-updates-2018-02-28-1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / bnxt_re / ib_verbs.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51
52 #include "bnxt_ulp.h"
53
54 #include "roce_hsi.h"
55 #include "qplib_res.h"
56 #include "qplib_sp.h"
57 #include "qplib_fp.h"
58 #include "qplib_rcfw.h"
59
60 #include "bnxt_re.h"
61 #include "ib_verbs.h"
62 #include <rdma/bnxt_re-abi.h>
63
64 static int __from_ib_access_flags(int iflags)
65 {
66         int qflags = 0;
67
68         if (iflags & IB_ACCESS_LOCAL_WRITE)
69                 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70         if (iflags & IB_ACCESS_REMOTE_READ)
71                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72         if (iflags & IB_ACCESS_REMOTE_WRITE)
73                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74         if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76         if (iflags & IB_ACCESS_MW_BIND)
77                 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78         if (iflags & IB_ZERO_BASED)
79                 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80         if (iflags & IB_ACCESS_ON_DEMAND)
81                 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82         return qflags;
83 };
84
85 static enum ib_access_flags __to_ib_access_flags(int qflags)
86 {
87         enum ib_access_flags iflags = 0;
88
89         if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90                 iflags |= IB_ACCESS_LOCAL_WRITE;
91         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92                 iflags |= IB_ACCESS_REMOTE_WRITE;
93         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94                 iflags |= IB_ACCESS_REMOTE_READ;
95         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96                 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97         if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98                 iflags |= IB_ACCESS_MW_BIND;
99         if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100                 iflags |= IB_ZERO_BASED;
101         if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102                 iflags |= IB_ACCESS_ON_DEMAND;
103         return iflags;
104 };
105
106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107                              struct bnxt_qplib_sge *sg_list, int num)
108 {
109         int i, total = 0;
110
111         for (i = 0; i < num; i++) {
112                 sg_list[i].addr = ib_sg_list[i].addr;
113                 sg_list[i].lkey = ib_sg_list[i].lkey;
114                 sg_list[i].size = ib_sg_list[i].length;
115                 total += sg_list[i].size;
116         }
117         return total;
118 }
119
120 /* Device */
121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122 {
123         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124         struct net_device *netdev = NULL;
125
126         rcu_read_lock();
127         if (rdev)
128                 netdev = rdev->netdev;
129         if (netdev)
130                 dev_hold(netdev);
131
132         rcu_read_unlock();
133         return netdev;
134 }
135
136 int bnxt_re_query_device(struct ib_device *ibdev,
137                          struct ib_device_attr *ib_attr,
138                          struct ib_udata *udata)
139 {
140         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143         memset(ib_attr, 0, sizeof(*ib_attr));
144         memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145                min(sizeof(dev_attr->fw_ver),
146                    sizeof(ib_attr->fw_ver)));
147         bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148                             (u8 *)&ib_attr->sys_image_guid);
149         ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
150         ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
151
152         ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153         ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154         ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155         ib_attr->max_qp = dev_attr->max_qp;
156         ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157         ib_attr->device_cap_flags =
158                                     IB_DEVICE_CURR_QP_STATE_MOD
159                                     | IB_DEVICE_RC_RNR_NAK_GEN
160                                     | IB_DEVICE_SHUTDOWN_PORT
161                                     | IB_DEVICE_SYS_IMAGE_GUID
162                                     | IB_DEVICE_LOCAL_DMA_LKEY
163                                     | IB_DEVICE_RESIZE_MAX_WR
164                                     | IB_DEVICE_PORT_ACTIVE_EVENT
165                                     | IB_DEVICE_N_NOTIFY_CQ
166                                     | IB_DEVICE_MEM_WINDOW
167                                     | IB_DEVICE_MEM_WINDOW_TYPE_2B
168                                     | IB_DEVICE_MEM_MGT_EXTENSIONS;
169         ib_attr->max_sge = dev_attr->max_qp_sges;
170         ib_attr->max_sge_rd = dev_attr->max_qp_sges;
171         ib_attr->max_cq = dev_attr->max_cq;
172         ib_attr->max_cqe = dev_attr->max_cq_wqes;
173         ib_attr->max_mr = dev_attr->max_mr;
174         ib_attr->max_pd = dev_attr->max_pd;
175         ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
176         ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
177         ib_attr->atomic_cap = IB_ATOMIC_NONE;
178         ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
179
180         ib_attr->max_ee_rd_atom = 0;
181         ib_attr->max_res_rd_atom = 0;
182         ib_attr->max_ee_init_rd_atom = 0;
183         ib_attr->max_ee = 0;
184         ib_attr->max_rdd = 0;
185         ib_attr->max_mw = dev_attr->max_mw;
186         ib_attr->max_raw_ipv6_qp = 0;
187         ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
188         ib_attr->max_mcast_grp = 0;
189         ib_attr->max_mcast_qp_attach = 0;
190         ib_attr->max_total_mcast_qp_attach = 0;
191         ib_attr->max_ah = dev_attr->max_ah;
192
193         ib_attr->max_fmr = 0;
194         ib_attr->max_map_per_fmr = 0;
195
196         ib_attr->max_srq = dev_attr->max_srq;
197         ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
198         ib_attr->max_srq_sge = dev_attr->max_srq_sges;
199
200         ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
201
202         ib_attr->max_pkeys = 1;
203         ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
204         return 0;
205 }
206
207 int bnxt_re_modify_device(struct ib_device *ibdev,
208                           int device_modify_mask,
209                           struct ib_device_modify *device_modify)
210 {
211         switch (device_modify_mask) {
212         case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
213                 /* Modify the GUID requires the modification of the GID table */
214                 /* GUID should be made as READ-ONLY */
215                 break;
216         case IB_DEVICE_MODIFY_NODE_DESC:
217                 /* Node Desc should be made as READ-ONLY */
218                 break;
219         default:
220                 break;
221         }
222         return 0;
223 }
224
225 /* Port */
226 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
227                        struct ib_port_attr *port_attr)
228 {
229         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
230         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
231
232         memset(port_attr, 0, sizeof(*port_attr));
233
234         if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
235                 port_attr->state = IB_PORT_ACTIVE;
236                 port_attr->phys_state = 5;
237         } else {
238                 port_attr->state = IB_PORT_DOWN;
239                 port_attr->phys_state = 3;
240         }
241         port_attr->max_mtu = IB_MTU_4096;
242         port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
243         port_attr->gid_tbl_len = dev_attr->max_sgid;
244         port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
245                                     IB_PORT_DEVICE_MGMT_SUP |
246                                     IB_PORT_VENDOR_CLASS_SUP |
247                                     IB_PORT_IP_BASED_GIDS;
248
249         port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
250         port_attr->bad_pkey_cntr = 0;
251         port_attr->qkey_viol_cntr = 0;
252         port_attr->pkey_tbl_len = dev_attr->max_pkey;
253         port_attr->lid = 0;
254         port_attr->sm_lid = 0;
255         port_attr->lmc = 0;
256         port_attr->max_vl_num = 4;
257         port_attr->sm_sl = 0;
258         port_attr->subnet_timeout = 0;
259         port_attr->init_type_reply = 0;
260         port_attr->active_speed = rdev->active_speed;
261         port_attr->active_width = rdev->active_width;
262
263         return 0;
264 }
265
266 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
267                                struct ib_port_immutable *immutable)
268 {
269         struct ib_port_attr port_attr;
270
271         if (bnxt_re_query_port(ibdev, port_num, &port_attr))
272                 return -EINVAL;
273
274         immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
275         immutable->gid_tbl_len = port_attr.gid_tbl_len;
276         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
277         immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
278         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
279         return 0;
280 }
281
282 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
283 {
284         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
285
286         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
287                  rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
288                  rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
289 }
290
291 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
292                        u16 index, u16 *pkey)
293 {
294         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
295
296         /* Ignore port_num */
297
298         memset(pkey, 0, sizeof(*pkey));
299         return bnxt_qplib_get_pkey(&rdev->qplib_res,
300                                    &rdev->qplib_res.pkey_tbl, index, pkey);
301 }
302
303 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
304                       int index, union ib_gid *gid)
305 {
306         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
307         int rc = 0;
308
309         /* Ignore port_num */
310         memset(gid, 0, sizeof(*gid));
311         rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
312                                  &rdev->qplib_res.sgid_tbl, index,
313                                  (struct bnxt_qplib_gid *)gid);
314         return rc;
315 }
316
317 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
318                     unsigned int index, void **context)
319 {
320         int rc = 0;
321         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
322         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
323         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
324         struct bnxt_qplib_gid *gid_to_del;
325
326         /* Delete the entry from the hardware */
327         ctx = *context;
328         if (!ctx)
329                 return -EINVAL;
330
331         if (sgid_tbl && sgid_tbl->active) {
332                 if (ctx->idx >= sgid_tbl->max)
333                         return -EINVAL;
334                 gid_to_del = &sgid_tbl->tbl[ctx->idx];
335                 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
336                  * or via the ib_unregister_device path. In the former case QP1
337                  * may not be destroyed yet, in which case just return as FW
338                  * needs that entry to be present and will fail it's deletion.
339                  * We could get invoked again after QP1 is destroyed OR get an
340                  * ADD_GID call with a different GID value for the same index
341                  * where we issue MODIFY_GID cmd to update the GID entry -- TBD
342                  */
343                 if (ctx->idx == 0 &&
344                     rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
345                     ctx->refcnt == 1 && rdev->qp1_sqp) {
346                         dev_dbg(rdev_to_dev(rdev),
347                                 "Trying to delete GID0 while QP1 is alive\n");
348                         return -EFAULT;
349                 }
350                 ctx->refcnt--;
351                 if (!ctx->refcnt) {
352                         rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
353                         if (rc) {
354                                 dev_err(rdev_to_dev(rdev),
355                                         "Failed to remove GID: %#x", rc);
356                         } else {
357                                 ctx_tbl = sgid_tbl->ctx;
358                                 ctx_tbl[ctx->idx] = NULL;
359                                 kfree(ctx);
360                         }
361                 }
362         } else {
363                 return -EINVAL;
364         }
365         return rc;
366 }
367
368 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
369                     unsigned int index, const union ib_gid *gid,
370                     const struct ib_gid_attr *attr, void **context)
371 {
372         int rc;
373         u32 tbl_idx = 0;
374         u16 vlan_id = 0xFFFF;
375         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
376         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
377         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
378
379         if ((attr->ndev) && is_vlan_dev(attr->ndev))
380                 vlan_id = vlan_dev_vlan_id(attr->ndev);
381
382         rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
383                                  rdev->qplib_res.netdev->dev_addr,
384                                  vlan_id, true, &tbl_idx);
385         if (rc == -EALREADY) {
386                 ctx_tbl = sgid_tbl->ctx;
387                 ctx_tbl[tbl_idx]->refcnt++;
388                 *context = ctx_tbl[tbl_idx];
389                 return 0;
390         }
391
392         if (rc < 0) {
393                 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
394                 return rc;
395         }
396
397         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
398         if (!ctx)
399                 return -ENOMEM;
400         ctx_tbl = sgid_tbl->ctx;
401         ctx->idx = tbl_idx;
402         ctx->refcnt = 1;
403         ctx_tbl[tbl_idx] = ctx;
404         *context = ctx;
405
406         return rc;
407 }
408
409 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
410                                             u8 port_num)
411 {
412         return IB_LINK_LAYER_ETHERNET;
413 }
414
415 #define BNXT_RE_FENCE_PBL_SIZE  DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
416
417 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
418 {
419         struct bnxt_re_fence_data *fence = &pd->fence;
420         struct ib_mr *ib_mr = &fence->mr->ib_mr;
421         struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
422
423         memset(wqe, 0, sizeof(*wqe));
424         wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
425         wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
426         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
427         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
428         wqe->bind.zero_based = false;
429         wqe->bind.parent_l_key = ib_mr->lkey;
430         wqe->bind.va = (u64)(unsigned long)fence->va;
431         wqe->bind.length = fence->size;
432         wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
433         wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
434
435         /* Save the initial rkey in fence structure for now;
436          * wqe->bind.r_key will be set at (re)bind time.
437          */
438         fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
439 }
440
441 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
442 {
443         struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
444                                              qplib_qp);
445         struct ib_pd *ib_pd = qp->ib_qp.pd;
446         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
447         struct bnxt_re_fence_data *fence = &pd->fence;
448         struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
449         struct bnxt_qplib_swqe wqe;
450         int rc;
451
452         memcpy(&wqe, fence_wqe, sizeof(wqe));
453         wqe.bind.r_key = fence->bind_rkey;
454         fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
455
456         dev_dbg(rdev_to_dev(qp->rdev),
457                 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
458                 wqe.bind.r_key, qp->qplib_qp.id, pd);
459         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
460         if (rc) {
461                 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
462                 return rc;
463         }
464         bnxt_qplib_post_send_db(&qp->qplib_qp);
465
466         return rc;
467 }
468
469 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
470 {
471         struct bnxt_re_fence_data *fence = &pd->fence;
472         struct bnxt_re_dev *rdev = pd->rdev;
473         struct device *dev = &rdev->en_dev->pdev->dev;
474         struct bnxt_re_mr *mr = fence->mr;
475
476         if (fence->mw) {
477                 bnxt_re_dealloc_mw(fence->mw);
478                 fence->mw = NULL;
479         }
480         if (mr) {
481                 if (mr->ib_mr.rkey)
482                         bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
483                                              true);
484                 if (mr->ib_mr.lkey)
485                         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
486                 kfree(mr);
487                 fence->mr = NULL;
488         }
489         if (fence->dma_addr) {
490                 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
491                                  DMA_BIDIRECTIONAL);
492                 fence->dma_addr = 0;
493         }
494 }
495
496 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
497 {
498         int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
499         struct bnxt_re_fence_data *fence = &pd->fence;
500         struct bnxt_re_dev *rdev = pd->rdev;
501         struct device *dev = &rdev->en_dev->pdev->dev;
502         struct bnxt_re_mr *mr = NULL;
503         dma_addr_t dma_addr = 0;
504         struct ib_mw *mw;
505         u64 pbl_tbl;
506         int rc;
507
508         dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
509                                   DMA_BIDIRECTIONAL);
510         rc = dma_mapping_error(dev, dma_addr);
511         if (rc) {
512                 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
513                 rc = -EIO;
514                 fence->dma_addr = 0;
515                 goto fail;
516         }
517         fence->dma_addr = dma_addr;
518
519         /* Allocate a MR */
520         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
521         if (!mr) {
522                 rc = -ENOMEM;
523                 goto fail;
524         }
525         fence->mr = mr;
526         mr->rdev = rdev;
527         mr->qplib_mr.pd = &pd->qplib_pd;
528         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
529         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
530         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
531         if (rc) {
532                 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
533                 goto fail;
534         }
535
536         /* Register MR */
537         mr->ib_mr.lkey = mr->qplib_mr.lkey;
538         mr->qplib_mr.va = (u64)(unsigned long)fence->va;
539         mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
540         pbl_tbl = dma_addr;
541         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
542                                BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
543         if (rc) {
544                 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
545                 goto fail;
546         }
547         mr->ib_mr.rkey = mr->qplib_mr.rkey;
548
549         /* Create a fence MW only for kernel consumers */
550         mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
551         if (IS_ERR(mw)) {
552                 dev_err(rdev_to_dev(rdev),
553                         "Failed to create fence-MW for PD: %p\n", pd);
554                 rc = PTR_ERR(mw);
555                 goto fail;
556         }
557         fence->mw = mw;
558
559         bnxt_re_create_fence_wqe(pd);
560         return 0;
561
562 fail:
563         bnxt_re_destroy_fence_mr(pd);
564         return rc;
565 }
566
567 /* Protection Domains */
568 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
569 {
570         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
571         struct bnxt_re_dev *rdev = pd->rdev;
572         int rc;
573
574         bnxt_re_destroy_fence_mr(pd);
575
576         if (pd->qplib_pd.id) {
577                 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
578                                            &rdev->qplib_res.pd_tbl,
579                                            &pd->qplib_pd);
580                 if (rc)
581                         dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
582         }
583
584         kfree(pd);
585         return 0;
586 }
587
588 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
589                                struct ib_ucontext *ucontext,
590                                struct ib_udata *udata)
591 {
592         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
593         struct bnxt_re_ucontext *ucntx = container_of(ucontext,
594                                                       struct bnxt_re_ucontext,
595                                                       ib_uctx);
596         struct bnxt_re_pd *pd;
597         int rc;
598
599         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
600         if (!pd)
601                 return ERR_PTR(-ENOMEM);
602
603         pd->rdev = rdev;
604         if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
605                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
606                 rc = -ENOMEM;
607                 goto fail;
608         }
609
610         if (udata) {
611                 struct bnxt_re_pd_resp resp;
612
613                 if (!ucntx->dpi.dbr) {
614                         /* Allocate DPI in alloc_pd to avoid failing of
615                          * ibv_devinfo and family of application when DPIs
616                          * are depleted.
617                          */
618                         if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
619                                                  &ucntx->dpi, ucntx)) {
620                                 rc = -ENOMEM;
621                                 goto dbfail;
622                         }
623                 }
624
625                 resp.pdid = pd->qplib_pd.id;
626                 /* Still allow mapping this DBR to the new user PD. */
627                 resp.dpi = ucntx->dpi.dpi;
628                 resp.dbr = (u64)ucntx->dpi.umdbr;
629
630                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
631                 if (rc) {
632                         dev_err(rdev_to_dev(rdev),
633                                 "Failed to copy user response\n");
634                         goto dbfail;
635                 }
636         }
637
638         if (!udata)
639                 if (bnxt_re_create_fence_mr(pd))
640                         dev_warn(rdev_to_dev(rdev),
641                                  "Failed to create Fence-MR\n");
642         return &pd->ib_pd;
643 dbfail:
644         (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
645                                     &pd->qplib_pd);
646 fail:
647         kfree(pd);
648         return ERR_PTR(rc);
649 }
650
651 /* Address Handles */
652 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
653 {
654         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
655         struct bnxt_re_dev *rdev = ah->rdev;
656         int rc;
657
658         rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
659         if (rc) {
660                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
661                 return rc;
662         }
663         kfree(ah);
664         return 0;
665 }
666
667 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
668                                 struct rdma_ah_attr *ah_attr,
669                                 struct ib_udata *udata)
670 {
671         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
672         struct bnxt_re_dev *rdev = pd->rdev;
673         struct bnxt_re_ah *ah;
674         const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
675         int rc;
676         u8 nw_type;
677
678         struct ib_gid_attr sgid_attr;
679
680         if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
681                 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
682                 return ERR_PTR(-EINVAL);
683         }
684         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
685         if (!ah)
686                 return ERR_PTR(-ENOMEM);
687
688         ah->rdev = rdev;
689         ah->qplib_ah.pd = &pd->qplib_pd;
690
691         /* Supply the configuration for the HW */
692         memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
693                sizeof(union ib_gid));
694         /*
695          * If RoCE V2 is enabled, stack will have two entries for
696          * each GID entry. Avoiding this duplicte entry in HW. Dividing
697          * the GID index by 2 for RoCE V2
698          */
699         ah->qplib_ah.sgid_index = grh->sgid_index / 2;
700         ah->qplib_ah.host_sgid_index = grh->sgid_index;
701         ah->qplib_ah.traffic_class = grh->traffic_class;
702         ah->qplib_ah.flow_label = grh->flow_label;
703         ah->qplib_ah.hop_limit = grh->hop_limit;
704         ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
705         if (ib_pd->uobject &&
706             !rdma_is_multicast_addr((struct in6_addr *)
707                                     grh->dgid.raw) &&
708             !rdma_link_local_addr((struct in6_addr *)
709                                   grh->dgid.raw)) {
710                 union ib_gid sgid;
711
712                 rc = ib_get_cached_gid(&rdev->ibdev, 1,
713                                        grh->sgid_index, &sgid,
714                                        &sgid_attr);
715                 if (rc) {
716                         dev_err(rdev_to_dev(rdev),
717                                 "Failed to query gid at index %d",
718                                 grh->sgid_index);
719                         goto fail;
720                 }
721                 if (sgid_attr.ndev)
722                         dev_put(sgid_attr.ndev);
723                 /* Get network header type for this GID */
724                 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
725                 switch (nw_type) {
726                 case RDMA_NETWORK_IPV4:
727                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
728                         break;
729                 case RDMA_NETWORK_IPV6:
730                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
731                         break;
732                 default:
733                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
734                         break;
735                 }
736         }
737
738         memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
739         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
740         if (rc) {
741                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
742                 goto fail;
743         }
744
745         /* Write AVID to shared page. */
746         if (ib_pd->uobject) {
747                 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
748                 struct bnxt_re_ucontext *uctx;
749                 unsigned long flag;
750                 u32 *wrptr;
751
752                 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
753                 spin_lock_irqsave(&uctx->sh_lock, flag);
754                 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
755                 *wrptr = ah->qplib_ah.id;
756                 wmb(); /* make sure cache is updated. */
757                 spin_unlock_irqrestore(&uctx->sh_lock, flag);
758         }
759
760         return &ah->ib_ah;
761
762 fail:
763         kfree(ah);
764         return ERR_PTR(rc);
765 }
766
767 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
768 {
769         return 0;
770 }
771
772 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
773 {
774         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
775
776         ah_attr->type = ib_ah->type;
777         rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
778         memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
779         rdma_ah_set_grh(ah_attr, NULL, 0,
780                         ah->qplib_ah.host_sgid_index,
781                         0, ah->qplib_ah.traffic_class);
782         rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
783         rdma_ah_set_port_num(ah_attr, 1);
784         rdma_ah_set_static_rate(ah_attr, 0);
785         return 0;
786 }
787
788 static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
789         __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790 {
791         unsigned long flags;
792
793         spin_lock_irqsave(&qp->scq->cq_lock, flags);
794         if (qp->rcq != qp->scq)
795                 spin_lock(&qp->rcq->cq_lock);
796         else
797                 __acquire(&qp->rcq->cq_lock);
798
799         return flags;
800 }
801
802 static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
803                                unsigned long flags)
804         __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
805 {
806         if (qp->rcq != qp->scq)
807                 spin_unlock(&qp->rcq->cq_lock);
808         else
809                 __release(&qp->rcq->cq_lock);
810         spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
811 }
812
813 /* Queue Pairs */
814 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
815 {
816         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
817         struct bnxt_re_dev *rdev = qp->rdev;
818         int rc;
819         unsigned int flags;
820
821         bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
822         rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
823         if (rc) {
824                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
825                 return rc;
826         }
827
828         flags = bnxt_re_lock_cqs(qp);
829         bnxt_qplib_clean_qp(&qp->qplib_qp);
830         bnxt_re_unlock_cqs(qp, flags);
831         bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
832
833         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
834                 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
835                                            &rdev->sqp_ah->qplib_ah);
836                 if (rc) {
837                         dev_err(rdev_to_dev(rdev),
838                                 "Failed to destroy HW AH for shadow QP");
839                         return rc;
840                 }
841
842                 bnxt_qplib_clean_qp(&qp->qplib_qp);
843                 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
844                                            &rdev->qp1_sqp->qplib_qp);
845                 if (rc) {
846                         dev_err(rdev_to_dev(rdev),
847                                 "Failed to destroy Shadow QP");
848                         return rc;
849                 }
850                 mutex_lock(&rdev->qp_lock);
851                 list_del(&rdev->qp1_sqp->list);
852                 atomic_dec(&rdev->qp_count);
853                 mutex_unlock(&rdev->qp_lock);
854
855                 kfree(rdev->sqp_ah);
856                 kfree(rdev->qp1_sqp);
857                 rdev->qp1_sqp = NULL;
858                 rdev->sqp_ah = NULL;
859         }
860
861         if (!IS_ERR_OR_NULL(qp->rumem))
862                 ib_umem_release(qp->rumem);
863         if (!IS_ERR_OR_NULL(qp->sumem))
864                 ib_umem_release(qp->sumem);
865
866         mutex_lock(&rdev->qp_lock);
867         list_del(&qp->list);
868         atomic_dec(&rdev->qp_count);
869         mutex_unlock(&rdev->qp_lock);
870         kfree(qp);
871         return 0;
872 }
873
874 static u8 __from_ib_qp_type(enum ib_qp_type type)
875 {
876         switch (type) {
877         case IB_QPT_GSI:
878                 return CMDQ_CREATE_QP1_TYPE_GSI;
879         case IB_QPT_RC:
880                 return CMDQ_CREATE_QP_TYPE_RC;
881         case IB_QPT_UD:
882                 return CMDQ_CREATE_QP_TYPE_UD;
883         default:
884                 return IB_QPT_MAX;
885         }
886 }
887
888 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
889                                 struct bnxt_re_qp *qp, struct ib_udata *udata)
890 {
891         struct bnxt_re_qp_req ureq;
892         struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
893         struct ib_umem *umem;
894         int bytes = 0;
895         struct ib_ucontext *context = pd->ib_pd.uobject->context;
896         struct bnxt_re_ucontext *cntx = container_of(context,
897                                                      struct bnxt_re_ucontext,
898                                                      ib_uctx);
899         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
900                 return -EFAULT;
901
902         bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
903         /* Consider mapping PSN search memory only for RC QPs. */
904         if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
905                 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
906         bytes = PAGE_ALIGN(bytes);
907         umem = ib_umem_get(context, ureq.qpsva, bytes,
908                            IB_ACCESS_LOCAL_WRITE, 1);
909         if (IS_ERR(umem))
910                 return PTR_ERR(umem);
911
912         qp->sumem = umem;
913         qplib_qp->sq.sglist = umem->sg_head.sgl;
914         qplib_qp->sq.nmap = umem->nmap;
915         qplib_qp->qp_handle = ureq.qp_handle;
916
917         if (!qp->qplib_qp.srq) {
918                 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
919                 bytes = PAGE_ALIGN(bytes);
920                 umem = ib_umem_get(context, ureq.qprva, bytes,
921                                    IB_ACCESS_LOCAL_WRITE, 1);
922                 if (IS_ERR(umem))
923                         goto rqfail;
924                 qp->rumem = umem;
925                 qplib_qp->rq.sglist = umem->sg_head.sgl;
926                 qplib_qp->rq.nmap = umem->nmap;
927         }
928
929         qplib_qp->dpi = &cntx->dpi;
930         return 0;
931 rqfail:
932         ib_umem_release(qp->sumem);
933         qp->sumem = NULL;
934         qplib_qp->sq.sglist = NULL;
935         qplib_qp->sq.nmap = 0;
936
937         return PTR_ERR(umem);
938 }
939
940 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
941                                 (struct bnxt_re_pd *pd,
942                                  struct bnxt_qplib_res *qp1_res,
943                                  struct bnxt_qplib_qp *qp1_qp)
944 {
945         struct bnxt_re_dev *rdev = pd->rdev;
946         struct bnxt_re_ah *ah;
947         union ib_gid sgid;
948         int rc;
949
950         ah = kzalloc(sizeof(*ah), GFP_KERNEL);
951         if (!ah)
952                 return NULL;
953
954         ah->rdev = rdev;
955         ah->qplib_ah.pd = &pd->qplib_pd;
956
957         rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
958         if (rc)
959                 goto fail;
960
961         /* supply the dgid data same as sgid */
962         memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
963                sizeof(union ib_gid));
964         ah->qplib_ah.sgid_index = 0;
965
966         ah->qplib_ah.traffic_class = 0;
967         ah->qplib_ah.flow_label = 0;
968         ah->qplib_ah.hop_limit = 1;
969         ah->qplib_ah.sl = 0;
970         /* Have DMAC same as SMAC */
971         ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
972
973         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
974         if (rc) {
975                 dev_err(rdev_to_dev(rdev),
976                         "Failed to allocate HW AH for Shadow QP");
977                 goto fail;
978         }
979
980         return ah;
981
982 fail:
983         kfree(ah);
984         return NULL;
985 }
986
987 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
988                                 (struct bnxt_re_pd *pd,
989                                  struct bnxt_qplib_res *qp1_res,
990                                  struct bnxt_qplib_qp *qp1_qp)
991 {
992         struct bnxt_re_dev *rdev = pd->rdev;
993         struct bnxt_re_qp *qp;
994         int rc;
995
996         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
997         if (!qp)
998                 return NULL;
999
1000         qp->rdev = rdev;
1001
1002         /* Initialize the shadow QP structure from the QP1 values */
1003         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1004
1005         qp->qplib_qp.pd = &pd->qplib_pd;
1006         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1007         qp->qplib_qp.type = IB_QPT_UD;
1008
1009         qp->qplib_qp.max_inline_data = 0;
1010         qp->qplib_qp.sig_type = true;
1011
1012         /* Shadow QP SQ depth should be same as QP1 RQ depth */
1013         qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1014         qp->qplib_qp.sq.max_sge = 2;
1015         /* Q full delta can be 1 since it is internal QP */
1016         qp->qplib_qp.sq.q_full_delta = 1;
1017
1018         qp->qplib_qp.scq = qp1_qp->scq;
1019         qp->qplib_qp.rcq = qp1_qp->rcq;
1020
1021         qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1022         qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1023         /* Q full delta can be 1 since it is internal QP */
1024         qp->qplib_qp.rq.q_full_delta = 1;
1025
1026         qp->qplib_qp.mtu = qp1_qp->mtu;
1027
1028         qp->qplib_qp.sq_hdr_buf_size = 0;
1029         qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1030         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1031
1032         rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1033         if (rc)
1034                 goto fail;
1035
1036         rdev->sqp_id = qp->qplib_qp.id;
1037
1038         spin_lock_init(&qp->sq_lock);
1039         INIT_LIST_HEAD(&qp->list);
1040         mutex_lock(&rdev->qp_lock);
1041         list_add_tail(&qp->list, &rdev->qp_list);
1042         atomic_inc(&rdev->qp_count);
1043         mutex_unlock(&rdev->qp_lock);
1044         return qp;
1045 fail:
1046         kfree(qp);
1047         return NULL;
1048 }
1049
1050 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1051                                 struct ib_qp_init_attr *qp_init_attr,
1052                                 struct ib_udata *udata)
1053 {
1054         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1055         struct bnxt_re_dev *rdev = pd->rdev;
1056         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1057         struct bnxt_re_qp *qp;
1058         struct bnxt_re_cq *cq;
1059         struct bnxt_re_srq *srq;
1060         int rc, entries;
1061
1062         if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1063             (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1064             (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1065             (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1066             (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1067                 return ERR_PTR(-EINVAL);
1068
1069         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1070         if (!qp)
1071                 return ERR_PTR(-ENOMEM);
1072
1073         qp->rdev = rdev;
1074         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1075         qp->qplib_qp.pd = &pd->qplib_pd;
1076         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1077         qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1078         if (qp->qplib_qp.type == IB_QPT_MAX) {
1079                 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1080                         qp->qplib_qp.type);
1081                 rc = -EINVAL;
1082                 goto fail;
1083         }
1084         qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1085         qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1086                                   IB_SIGNAL_ALL_WR) ? true : false);
1087
1088         qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1089         if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1090                 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1091
1092         if (qp_init_attr->send_cq) {
1093                 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1094                                   ib_cq);
1095                 if (!cq) {
1096                         dev_err(rdev_to_dev(rdev), "Send CQ not found");
1097                         rc = -EINVAL;
1098                         goto fail;
1099                 }
1100                 qp->qplib_qp.scq = &cq->qplib_cq;
1101                 qp->scq = cq;
1102         }
1103
1104         if (qp_init_attr->recv_cq) {
1105                 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1106                                   ib_cq);
1107                 if (!cq) {
1108                         dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1109                         rc = -EINVAL;
1110                         goto fail;
1111                 }
1112                 qp->qplib_qp.rcq = &cq->qplib_cq;
1113                 qp->rcq = cq;
1114         }
1115
1116         if (qp_init_attr->srq) {
1117                 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1118                                    ib_srq);
1119                 if (!srq) {
1120                         dev_err(rdev_to_dev(rdev), "SRQ not found");
1121                         rc = -EINVAL;
1122                         goto fail;
1123                 }
1124                 qp->qplib_qp.srq = &srq->qplib_srq;
1125                 qp->qplib_qp.rq.max_wqe = 0;
1126         } else {
1127                 /* Allocate 1 more than what's provided so posting max doesn't
1128                  * mean empty
1129                  */
1130                 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1131                 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1132                                                 dev_attr->max_qp_wqes + 1);
1133
1134                 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1135                                                 qp_init_attr->cap.max_recv_wr;
1136
1137                 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1138                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1139                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1140         }
1141
1142         qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1143
1144         if (qp_init_attr->qp_type == IB_QPT_GSI) {
1145                 /* Allocate 1 more than what's provided */
1146                 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1147                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1148                                                 dev_attr->max_qp_wqes + 1);
1149                 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1150                                                 qp_init_attr->cap.max_send_wr;
1151                 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1152                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1153                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1154                 qp->qplib_qp.sq.max_sge++;
1155                 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1156                         qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1157
1158                 qp->qplib_qp.rq_hdr_buf_size =
1159                                         BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1160
1161                 qp->qplib_qp.sq_hdr_buf_size =
1162                                         BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1163                 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1164                 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1165                 if (rc) {
1166                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1167                         goto fail;
1168                 }
1169                 /* Create a shadow QP to handle the QP1 traffic */
1170                 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1171                                                          &qp->qplib_qp);
1172                 if (!rdev->qp1_sqp) {
1173                         rc = -EINVAL;
1174                         dev_err(rdev_to_dev(rdev),
1175                                 "Failed to create Shadow QP for QP1");
1176                         goto qp_destroy;
1177                 }
1178                 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1179                                                            &qp->qplib_qp);
1180                 if (!rdev->sqp_ah) {
1181                         bnxt_qplib_destroy_qp(&rdev->qplib_res,
1182                                               &rdev->qp1_sqp->qplib_qp);
1183                         rc = -EINVAL;
1184                         dev_err(rdev_to_dev(rdev),
1185                                 "Failed to create AH entry for ShadowQP");
1186                         goto qp_destroy;
1187                 }
1188
1189         } else {
1190                 /* Allocate 128 + 1 more than what's provided */
1191                 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1192                                              BNXT_QPLIB_RESERVED_QP_WRS + 1);
1193                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1194                                                 dev_attr->max_qp_wqes +
1195                                                 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1196                 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1197
1198                 /*
1199                  * Reserving one slot for Phantom WQE. Application can
1200                  * post one extra entry in this case. But allowing this to avoid
1201                  * unexpected Queue full condition
1202                  */
1203
1204                 qp->qplib_qp.sq.q_full_delta -= 1;
1205
1206                 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1207                 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1208                 if (udata) {
1209                         rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1210                         if (rc)
1211                                 goto fail;
1212                 } else {
1213                         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1214                 }
1215
1216                 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1217                 if (rc) {
1218                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1219                         goto free_umem;
1220                 }
1221         }
1222
1223         qp->ib_qp.qp_num = qp->qplib_qp.id;
1224         spin_lock_init(&qp->sq_lock);
1225         spin_lock_init(&qp->rq_lock);
1226
1227         if (udata) {
1228                 struct bnxt_re_qp_resp resp;
1229
1230                 resp.qpid = qp->ib_qp.qp_num;
1231                 resp.rsvd = 0;
1232                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1233                 if (rc) {
1234                         dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1235                         goto qp_destroy;
1236                 }
1237         }
1238         INIT_LIST_HEAD(&qp->list);
1239         mutex_lock(&rdev->qp_lock);
1240         list_add_tail(&qp->list, &rdev->qp_list);
1241         atomic_inc(&rdev->qp_count);
1242         mutex_unlock(&rdev->qp_lock);
1243
1244         return &qp->ib_qp;
1245 qp_destroy:
1246         bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1247 free_umem:
1248         if (udata) {
1249                 if (qp->rumem)
1250                         ib_umem_release(qp->rumem);
1251                 if (qp->sumem)
1252                         ib_umem_release(qp->sumem);
1253         }
1254 fail:
1255         kfree(qp);
1256         return ERR_PTR(rc);
1257 }
1258
1259 static u8 __from_ib_qp_state(enum ib_qp_state state)
1260 {
1261         switch (state) {
1262         case IB_QPS_RESET:
1263                 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1264         case IB_QPS_INIT:
1265                 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1266         case IB_QPS_RTR:
1267                 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1268         case IB_QPS_RTS:
1269                 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1270         case IB_QPS_SQD:
1271                 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1272         case IB_QPS_SQE:
1273                 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1274         case IB_QPS_ERR:
1275         default:
1276                 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1277         }
1278 }
1279
1280 static enum ib_qp_state __to_ib_qp_state(u8 state)
1281 {
1282         switch (state) {
1283         case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1284                 return IB_QPS_RESET;
1285         case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1286                 return IB_QPS_INIT;
1287         case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1288                 return IB_QPS_RTR;
1289         case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1290                 return IB_QPS_RTS;
1291         case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1292                 return IB_QPS_SQD;
1293         case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1294                 return IB_QPS_SQE;
1295         case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1296         default:
1297                 return IB_QPS_ERR;
1298         }
1299 }
1300
1301 static u32 __from_ib_mtu(enum ib_mtu mtu)
1302 {
1303         switch (mtu) {
1304         case IB_MTU_256:
1305                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1306         case IB_MTU_512:
1307                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1308         case IB_MTU_1024:
1309                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1310         case IB_MTU_2048:
1311                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1312         case IB_MTU_4096:
1313                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1314         default:
1315                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1316         }
1317 }
1318
1319 static enum ib_mtu __to_ib_mtu(u32 mtu)
1320 {
1321         switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1322         case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1323                 return IB_MTU_256;
1324         case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1325                 return IB_MTU_512;
1326         case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1327                 return IB_MTU_1024;
1328         case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1329                 return IB_MTU_2048;
1330         case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1331                 return IB_MTU_4096;
1332         default:
1333                 return IB_MTU_2048;
1334         }
1335 }
1336
1337 /* Shared Receive Queues */
1338 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1339 {
1340         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1341                                                ib_srq);
1342         struct bnxt_re_dev *rdev = srq->rdev;
1343         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1344         struct bnxt_qplib_nq *nq = NULL;
1345         int rc;
1346
1347         if (qplib_srq->cq)
1348                 nq = qplib_srq->cq->nq;
1349         rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1350         if (rc) {
1351                 dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1352                 return rc;
1353         }
1354
1355         if (srq->umem)
1356                 ib_umem_release(srq->umem);
1357         kfree(srq);
1358         atomic_dec(&rdev->srq_count);
1359         if (nq)
1360                 nq->budget--;
1361         return 0;
1362 }
1363
1364 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1365                                  struct bnxt_re_pd *pd,
1366                                  struct bnxt_re_srq *srq,
1367                                  struct ib_udata *udata)
1368 {
1369         struct bnxt_re_srq_req ureq;
1370         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1371         struct ib_umem *umem;
1372         int bytes = 0;
1373         struct ib_ucontext *context = pd->ib_pd.uobject->context;
1374         struct bnxt_re_ucontext *cntx = container_of(context,
1375                                                      struct bnxt_re_ucontext,
1376                                                      ib_uctx);
1377         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1378                 return -EFAULT;
1379
1380         bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1381         bytes = PAGE_ALIGN(bytes);
1382         umem = ib_umem_get(context, ureq.srqva, bytes,
1383                            IB_ACCESS_LOCAL_WRITE, 1);
1384         if (IS_ERR(umem))
1385                 return PTR_ERR(umem);
1386
1387         srq->umem = umem;
1388         qplib_srq->nmap = umem->nmap;
1389         qplib_srq->sglist = umem->sg_head.sgl;
1390         qplib_srq->srq_handle = ureq.srq_handle;
1391         qplib_srq->dpi = &cntx->dpi;
1392
1393         return 0;
1394 }
1395
1396 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1397                                   struct ib_srq_init_attr *srq_init_attr,
1398                                   struct ib_udata *udata)
1399 {
1400         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1401         struct bnxt_re_dev *rdev = pd->rdev;
1402         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1403         struct bnxt_re_srq *srq;
1404         struct bnxt_qplib_nq *nq = NULL;
1405         int rc, entries;
1406
1407         if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1408                 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1409                 rc = -EINVAL;
1410                 goto exit;
1411         }
1412
1413         if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1414                 rc = -ENOTSUPP;
1415                 goto exit;
1416         }
1417
1418         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1419         if (!srq) {
1420                 rc = -ENOMEM;
1421                 goto exit;
1422         }
1423         srq->rdev = rdev;
1424         srq->qplib_srq.pd = &pd->qplib_pd;
1425         srq->qplib_srq.dpi = &rdev->dpi_privileged;
1426         /* Allocate 1 more than what's provided so posting max doesn't
1427          * mean empty
1428          */
1429         entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1430         if (entries > dev_attr->max_srq_wqes + 1)
1431                 entries = dev_attr->max_srq_wqes + 1;
1432
1433         srq->qplib_srq.max_wqe = entries;
1434         srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1435         srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1436         srq->srq_limit = srq_init_attr->attr.srq_limit;
1437         srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1438         nq = &rdev->nq[0];
1439
1440         if (udata) {
1441                 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1442                 if (rc)
1443                         goto fail;
1444         }
1445
1446         rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1447         if (rc) {
1448                 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1449                 goto fail;
1450         }
1451
1452         if (udata) {
1453                 struct bnxt_re_srq_resp resp;
1454
1455                 resp.srqid = srq->qplib_srq.id;
1456                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1457                 if (rc) {
1458                         dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1459                         bnxt_qplib_destroy_srq(&rdev->qplib_res,
1460                                                &srq->qplib_srq);
1461                         goto exit;
1462                 }
1463         }
1464         if (nq)
1465                 nq->budget++;
1466         atomic_inc(&rdev->srq_count);
1467
1468         return &srq->ib_srq;
1469
1470 fail:
1471         if (srq->umem)
1472                 ib_umem_release(srq->umem);
1473         kfree(srq);
1474 exit:
1475         return ERR_PTR(rc);
1476 }
1477
1478 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1479                        enum ib_srq_attr_mask srq_attr_mask,
1480                        struct ib_udata *udata)
1481 {
1482         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1483                                                ib_srq);
1484         struct bnxt_re_dev *rdev = srq->rdev;
1485         int rc;
1486
1487         switch (srq_attr_mask) {
1488         case IB_SRQ_MAX_WR:
1489                 /* SRQ resize is not supported */
1490                 break;
1491         case IB_SRQ_LIMIT:
1492                 /* Change the SRQ threshold */
1493                 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1494                         return -EINVAL;
1495
1496                 srq->qplib_srq.threshold = srq_attr->srq_limit;
1497                 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1498                 if (rc) {
1499                         dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1500                         return rc;
1501                 }
1502                 /* On success, update the shadow */
1503                 srq->srq_limit = srq_attr->srq_limit;
1504                 /* No need to Build and send response back to udata */
1505                 break;
1506         default:
1507                 dev_err(rdev_to_dev(rdev),
1508                         "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1509                 return -EINVAL;
1510         }
1511         return 0;
1512 }
1513
1514 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1515 {
1516         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1517                                                ib_srq);
1518         struct bnxt_re_srq tsrq;
1519         struct bnxt_re_dev *rdev = srq->rdev;
1520         int rc;
1521
1522         /* Get live SRQ attr */
1523         tsrq.qplib_srq.id = srq->qplib_srq.id;
1524         rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1525         if (rc) {
1526                 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1527                 return rc;
1528         }
1529         srq_attr->max_wr = srq->qplib_srq.max_wqe;
1530         srq_attr->max_sge = srq->qplib_srq.max_sge;
1531         srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1532
1533         return 0;
1534 }
1535
1536 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
1537                           struct ib_recv_wr **bad_wr)
1538 {
1539         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1540                                                ib_srq);
1541         struct bnxt_qplib_swqe wqe;
1542         unsigned long flags;
1543         int rc = 0, payload_sz = 0;
1544
1545         spin_lock_irqsave(&srq->lock, flags);
1546         while (wr) {
1547                 /* Transcribe each ib_recv_wr to qplib_swqe */
1548                 wqe.num_sge = wr->num_sge;
1549                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
1550                                                wr->num_sge);
1551                 wqe.wr_id = wr->wr_id;
1552                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1553
1554                 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1555                 if (rc) {
1556                         *bad_wr = wr;
1557                         break;
1558                 }
1559                 wr = wr->next;
1560         }
1561         spin_unlock_irqrestore(&srq->lock, flags);
1562
1563         return rc;
1564 }
1565 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1566                                     struct bnxt_re_qp *qp1_qp,
1567                                     int qp_attr_mask)
1568 {
1569         struct bnxt_re_qp *qp = rdev->qp1_sqp;
1570         int rc = 0;
1571
1572         if (qp_attr_mask & IB_QP_STATE) {
1573                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1574                 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1575         }
1576         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1577                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1578                 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1579         }
1580
1581         if (qp_attr_mask & IB_QP_QKEY) {
1582                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1583                 /* Using a Random  QKEY */
1584                 qp->qplib_qp.qkey = 0x81818181;
1585         }
1586         if (qp_attr_mask & IB_QP_SQ_PSN) {
1587                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1588                 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1589         }
1590
1591         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1592         if (rc)
1593                 dev_err(rdev_to_dev(rdev),
1594                         "Failed to modify Shadow QP for QP1");
1595         return rc;
1596 }
1597
1598 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1599                       int qp_attr_mask, struct ib_udata *udata)
1600 {
1601         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1602         struct bnxt_re_dev *rdev = qp->rdev;
1603         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1604         enum ib_qp_state curr_qp_state, new_qp_state;
1605         int rc, entries;
1606         int status;
1607         union ib_gid sgid;
1608         struct ib_gid_attr sgid_attr;
1609         u8 nw_type;
1610
1611         qp->qplib_qp.modify_flags = 0;
1612         if (qp_attr_mask & IB_QP_STATE) {
1613                 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1614                 new_qp_state = qp_attr->qp_state;
1615                 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1616                                         ib_qp->qp_type, qp_attr_mask,
1617                                         IB_LINK_LAYER_ETHERNET)) {
1618                         dev_err(rdev_to_dev(rdev),
1619                                 "Invalid attribute mask: %#x specified ",
1620                                 qp_attr_mask);
1621                         dev_err(rdev_to_dev(rdev),
1622                                 "for qpn: %#x type: %#x",
1623                                 ib_qp->qp_num, ib_qp->qp_type);
1624                         dev_err(rdev_to_dev(rdev),
1625                                 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1626                                 curr_qp_state, new_qp_state);
1627                         return -EINVAL;
1628                 }
1629                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1630                 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1631
1632                 if (!qp->sumem &&
1633                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1634                         dev_dbg(rdev_to_dev(rdev),
1635                                 "Move QP = %p to flush list\n",
1636                                 qp);
1637                         bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1638                 }
1639                 if (!qp->sumem &&
1640                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1641                         dev_dbg(rdev_to_dev(rdev),
1642                                 "Move QP = %p out of flush list\n",
1643                                 qp);
1644                         bnxt_qplib_clean_qp(&qp->qplib_qp);
1645                 }
1646         }
1647         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1648                 qp->qplib_qp.modify_flags |=
1649                                 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1650                 qp->qplib_qp.en_sqd_async_notify = true;
1651         }
1652         if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1653                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1654                 qp->qplib_qp.access =
1655                         __from_ib_access_flags(qp_attr->qp_access_flags);
1656                 /* LOCAL_WRITE access must be set to allow RC receive */
1657                 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1658         }
1659         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1660                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1661                 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1662         }
1663         if (qp_attr_mask & IB_QP_QKEY) {
1664                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1665                 qp->qplib_qp.qkey = qp_attr->qkey;
1666         }
1667         if (qp_attr_mask & IB_QP_AV) {
1668                 const struct ib_global_route *grh =
1669                         rdma_ah_read_grh(&qp_attr->ah_attr);
1670
1671                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1672                                      CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1673                                      CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1674                                      CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1675                                      CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1676                                      CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1677                                      CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1678                 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1679                        sizeof(qp->qplib_qp.ah.dgid.data));
1680                 qp->qplib_qp.ah.flow_label = grh->flow_label;
1681                 /* If RoCE V2 is enabled, stack will have two entries for
1682                  * each GID entry. Avoiding this duplicte entry in HW. Dividing
1683                  * the GID index by 2 for RoCE V2
1684                  */
1685                 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1686                 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1687                 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1688                 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1689                 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1690                 ether_addr_copy(qp->qplib_qp.ah.dmac,
1691                                 qp_attr->ah_attr.roce.dmac);
1692
1693                 status = ib_get_cached_gid(&rdev->ibdev, 1,
1694                                            grh->sgid_index,
1695                                            &sgid, &sgid_attr);
1696                 if (!status && sgid_attr.ndev) {
1697                         memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1698                                ETH_ALEN);
1699                         dev_put(sgid_attr.ndev);
1700                         nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1701                                                          &sgid);
1702                         switch (nw_type) {
1703                         case RDMA_NETWORK_IPV4:
1704                                 qp->qplib_qp.nw_type =
1705                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1706                                 break;
1707                         case RDMA_NETWORK_IPV6:
1708                                 qp->qplib_qp.nw_type =
1709                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1710                                 break;
1711                         default:
1712                                 qp->qplib_qp.nw_type =
1713                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1714                                 break;
1715                         }
1716                 }
1717         }
1718
1719         if (qp_attr_mask & IB_QP_PATH_MTU) {
1720                 qp->qplib_qp.modify_flags |=
1721                                 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1722                 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1723                 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1724         } else if (qp_attr->qp_state == IB_QPS_RTR) {
1725                 qp->qplib_qp.modify_flags |=
1726                         CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1727                 qp->qplib_qp.path_mtu =
1728                         __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1729                 qp->qplib_qp.mtu =
1730                         ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1731         }
1732
1733         if (qp_attr_mask & IB_QP_TIMEOUT) {
1734                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1735                 qp->qplib_qp.timeout = qp_attr->timeout;
1736         }
1737         if (qp_attr_mask & IB_QP_RETRY_CNT) {
1738                 qp->qplib_qp.modify_flags |=
1739                                 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1740                 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1741         }
1742         if (qp_attr_mask & IB_QP_RNR_RETRY) {
1743                 qp->qplib_qp.modify_flags |=
1744                                 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1745                 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1746         }
1747         if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1748                 qp->qplib_qp.modify_flags |=
1749                                 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1750                 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1751         }
1752         if (qp_attr_mask & IB_QP_RQ_PSN) {
1753                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1754                 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1755         }
1756         if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1757                 qp->qplib_qp.modify_flags |=
1758                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1759                 /* Cap the max_rd_atomic to device max */
1760                 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1761                                                    dev_attr->max_qp_rd_atom);
1762         }
1763         if (qp_attr_mask & IB_QP_SQ_PSN) {
1764                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1765                 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1766         }
1767         if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1768                 if (qp_attr->max_dest_rd_atomic >
1769                     dev_attr->max_qp_init_rd_atom) {
1770                         dev_err(rdev_to_dev(rdev),
1771                                 "max_dest_rd_atomic requested%d is > dev_max%d",
1772                                 qp_attr->max_dest_rd_atomic,
1773                                 dev_attr->max_qp_init_rd_atom);
1774                         return -EINVAL;
1775                 }
1776
1777                 qp->qplib_qp.modify_flags |=
1778                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1779                 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1780         }
1781         if (qp_attr_mask & IB_QP_CAP) {
1782                 qp->qplib_qp.modify_flags |=
1783                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1784                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1785                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1786                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1787                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1788                 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1789                     (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1790                     (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1791                     (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1792                     (qp_attr->cap.max_inline_data >=
1793                                                 dev_attr->max_inline_data)) {
1794                         dev_err(rdev_to_dev(rdev),
1795                                 "Create QP failed - max exceeded");
1796                         return -EINVAL;
1797                 }
1798                 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1799                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1800                                                 dev_attr->max_qp_wqes + 1);
1801                 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1802                                                 qp_attr->cap.max_send_wr;
1803                 /*
1804                  * Reserving one slot for Phantom WQE. Some application can
1805                  * post one extra entry in this case. Allowing this to avoid
1806                  * unexpected Queue full condition
1807                  */
1808                 qp->qplib_qp.sq.q_full_delta -= 1;
1809                 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1810                 if (qp->qplib_qp.rq.max_wqe) {
1811                         entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1812                         qp->qplib_qp.rq.max_wqe =
1813                                 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1814                         qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1815                                                        qp_attr->cap.max_recv_wr;
1816                         qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1817                 } else {
1818                         /* SRQ was used prior, just ignore the RQ caps */
1819                 }
1820         }
1821         if (qp_attr_mask & IB_QP_DEST_QPN) {
1822                 qp->qplib_qp.modify_flags |=
1823                                 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1824                 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1825         }
1826         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1827         if (rc) {
1828                 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1829                 return rc;
1830         }
1831         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1832                 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1833         return rc;
1834 }
1835
1836 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1837                      int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1838 {
1839         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1840         struct bnxt_re_dev *rdev = qp->rdev;
1841         struct bnxt_qplib_qp *qplib_qp;
1842         int rc;
1843
1844         qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1845         if (!qplib_qp)
1846                 return -ENOMEM;
1847
1848         qplib_qp->id = qp->qplib_qp.id;
1849         qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1850
1851         rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1852         if (rc) {
1853                 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1854                 goto out;
1855         }
1856         qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1857         qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1858         qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1859         qp_attr->pkey_index = qplib_qp->pkey_index;
1860         qp_attr->qkey = qplib_qp->qkey;
1861         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1862         rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1863                         qplib_qp->ah.host_sgid_index,
1864                         qplib_qp->ah.hop_limit,
1865                         qplib_qp->ah.traffic_class);
1866         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1867         rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1868         ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1869         qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1870         qp_attr->timeout = qplib_qp->timeout;
1871         qp_attr->retry_cnt = qplib_qp->retry_cnt;
1872         qp_attr->rnr_retry = qplib_qp->rnr_retry;
1873         qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1874         qp_attr->rq_psn = qplib_qp->rq.psn;
1875         qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1876         qp_attr->sq_psn = qplib_qp->sq.psn;
1877         qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1878         qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1879                                                          IB_SIGNAL_REQ_WR;
1880         qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1881
1882         qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1883         qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1884         qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1885         qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1886         qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1887         qp_init_attr->cap = qp_attr->cap;
1888
1889 out:
1890         kfree(qplib_qp);
1891         return rc;
1892 }
1893
1894 /* Routine for sending QP1 packets for RoCE V1 an V2
1895  */
1896 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1897                                      struct ib_send_wr *wr,
1898                                      struct bnxt_qplib_swqe *wqe,
1899                                      int payload_size)
1900 {
1901         struct ib_device *ibdev = &qp->rdev->ibdev;
1902         struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1903                                              ib_ah);
1904         struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1905         struct bnxt_qplib_sge sge;
1906         union ib_gid sgid;
1907         u8 nw_type;
1908         u16 ether_type;
1909         struct ib_gid_attr sgid_attr;
1910         union ib_gid dgid;
1911         bool is_eth = false;
1912         bool is_vlan = false;
1913         bool is_grh = false;
1914         bool is_udp = false;
1915         u8 ip_version = 0;
1916         u16 vlan_id = 0xFFFF;
1917         void *buf;
1918         int i, rc = 0;
1919
1920         memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1921
1922         rc = ib_get_cached_gid(ibdev, 1,
1923                                qplib_ah->host_sgid_index, &sgid,
1924                                &sgid_attr);
1925         if (rc) {
1926                 dev_err(rdev_to_dev(qp->rdev),
1927                         "Failed to query gid at index %d",
1928                         qplib_ah->host_sgid_index);
1929                 return rc;
1930         }
1931         if (sgid_attr.ndev) {
1932                 if (is_vlan_dev(sgid_attr.ndev))
1933                         vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1934                 dev_put(sgid_attr.ndev);
1935         }
1936         /* Get network header type for this GID */
1937         nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1938         switch (nw_type) {
1939         case RDMA_NETWORK_IPV4:
1940                 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1941                 break;
1942         case RDMA_NETWORK_IPV6:
1943                 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1944                 break;
1945         default:
1946                 nw_type = BNXT_RE_ROCE_V1_PACKET;
1947                 break;
1948         }
1949         memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1950         is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1951         if (is_udp) {
1952                 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1953                         ip_version = 4;
1954                         ether_type = ETH_P_IP;
1955                 } else {
1956                         ip_version = 6;
1957                         ether_type = ETH_P_IPV6;
1958                 }
1959                 is_grh = false;
1960         } else {
1961                 ether_type = ETH_P_IBOE;
1962                 is_grh = true;
1963         }
1964
1965         is_eth = true;
1966         is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1967
1968         ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1969                           ip_version, is_udp, 0, &qp->qp1_hdr);
1970
1971         /* ETH */
1972         ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1973         ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1974
1975         /* For vlan, check the sgid for vlan existence */
1976
1977         if (!is_vlan) {
1978                 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1979         } else {
1980                 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1981                 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1982         }
1983
1984         if (is_grh || (ip_version == 6)) {
1985                 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1986                 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1987                        sizeof(sgid));
1988                 qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
1989         }
1990
1991         if (ip_version == 4) {
1992                 qp->qp1_hdr.ip4.tos = 0;
1993                 qp->qp1_hdr.ip4.id = 0;
1994                 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1995                 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1996
1997                 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1998                 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1999                 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2000         }
2001
2002         if (is_udp) {
2003                 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2004                 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2005                 qp->qp1_hdr.udp.csum = 0;
2006         }
2007
2008         /* BTH */
2009         if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2010                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2011                 qp->qp1_hdr.immediate_present = 1;
2012         } else {
2013                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2014         }
2015         if (wr->send_flags & IB_SEND_SOLICITED)
2016                 qp->qp1_hdr.bth.solicited_event = 1;
2017         /* pad_count */
2018         qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2019
2020         /* P_key for QP1 is for all members */
2021         qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2022         qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2023         qp->qp1_hdr.bth.ack_req = 0;
2024         qp->send_psn++;
2025         qp->send_psn &= BTH_PSN_MASK;
2026         qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2027         /* DETH */
2028         /* Use the priviledged Q_Key for QP1 */
2029         qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2030         qp->qp1_hdr.deth.source_qpn = IB_QP1;
2031
2032         /* Pack the QP1 to the transmit buffer */
2033         buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2034         if (buf) {
2035                 ib_ud_header_pack(&qp->qp1_hdr, buf);
2036                 for (i = wqe->num_sge; i; i--) {
2037                         wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2038                         wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2039                         wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2040                 }
2041
2042                 /*
2043                  * Max Header buf size for IPV6 RoCE V2 is 86,
2044                  * which is same as the QP1 SQ header buffer.
2045                  * Header buf size for IPV4 RoCE V2 can be 66.
2046                  * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2047                  * Subtract 20 bytes from QP1 SQ header buf size
2048                  */
2049                 if (is_udp && ip_version == 4)
2050                         sge.size -= 20;
2051                 /*
2052                  * Max Header buf size for RoCE V1 is 78.
2053                  * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2054                  * Subtract 8 bytes from QP1 SQ header buf size
2055                  */
2056                 if (!is_udp)
2057                         sge.size -= 8;
2058
2059                 /* Subtract 4 bytes for non vlan packets */
2060                 if (!is_vlan)
2061                         sge.size -= 4;
2062
2063                 wqe->sg_list[0].addr = sge.addr;
2064                 wqe->sg_list[0].lkey = sge.lkey;
2065                 wqe->sg_list[0].size = sge.size;
2066                 wqe->num_sge++;
2067
2068         } else {
2069                 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2070                 rc = -ENOMEM;
2071         }
2072         return rc;
2073 }
2074
2075 /* For the MAD layer, it only provides the recv SGE the size of
2076  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2077  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2078  * receive packet (334 bytes) with no VLAN and then copy the GRH
2079  * and the MAD datagram out to the provided SGE.
2080  */
2081 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2082                                             struct ib_recv_wr *wr,
2083                                             struct bnxt_qplib_swqe *wqe,
2084                                             int payload_size)
2085 {
2086         struct bnxt_qplib_sge ref, sge;
2087         u32 rq_prod_index;
2088         struct bnxt_re_sqp_entries *sqp_entry;
2089
2090         rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2091
2092         if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2093                 return -ENOMEM;
2094
2095         /* Create 1 SGE to receive the entire
2096          * ethernet packet
2097          */
2098         /* Save the reference from ULP */
2099         ref.addr = wqe->sg_list[0].addr;
2100         ref.lkey = wqe->sg_list[0].lkey;
2101         ref.size = wqe->sg_list[0].size;
2102
2103         sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2104
2105         /* SGE 1 */
2106         wqe->sg_list[0].addr = sge.addr;
2107         wqe->sg_list[0].lkey = sge.lkey;
2108         wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2109         sge.size -= wqe->sg_list[0].size;
2110
2111         sqp_entry->sge.addr = ref.addr;
2112         sqp_entry->sge.lkey = ref.lkey;
2113         sqp_entry->sge.size = ref.size;
2114         /* Store the wrid for reporting completion */
2115         sqp_entry->wrid = wqe->wr_id;
2116         /* change the wqe->wrid to table index */
2117         wqe->wr_id = rq_prod_index;
2118         return 0;
2119 }
2120
2121 static int is_ud_qp(struct bnxt_re_qp *qp)
2122 {
2123         return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2124 }
2125
2126 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2127                                   struct ib_send_wr *wr,
2128                                   struct bnxt_qplib_swqe *wqe)
2129 {
2130         struct bnxt_re_ah *ah = NULL;
2131
2132         if (is_ud_qp(qp)) {
2133                 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2134                 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2135                 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2136                 wqe->send.avid = ah->qplib_ah.id;
2137         }
2138         switch (wr->opcode) {
2139         case IB_WR_SEND:
2140                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2141                 break;
2142         case IB_WR_SEND_WITH_IMM:
2143                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2144                 wqe->send.imm_data = wr->ex.imm_data;
2145                 break;
2146         case IB_WR_SEND_WITH_INV:
2147                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2148                 wqe->send.inv_key = wr->ex.invalidate_rkey;
2149                 break;
2150         default:
2151                 return -EINVAL;
2152         }
2153         if (wr->send_flags & IB_SEND_SIGNALED)
2154                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2155         if (wr->send_flags & IB_SEND_FENCE)
2156                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2157         if (wr->send_flags & IB_SEND_SOLICITED)
2158                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2159         if (wr->send_flags & IB_SEND_INLINE)
2160                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2161
2162         return 0;
2163 }
2164
2165 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
2166                                   struct bnxt_qplib_swqe *wqe)
2167 {
2168         switch (wr->opcode) {
2169         case IB_WR_RDMA_WRITE:
2170                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2171                 break;
2172         case IB_WR_RDMA_WRITE_WITH_IMM:
2173                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2174                 wqe->rdma.imm_data = wr->ex.imm_data;
2175                 break;
2176         case IB_WR_RDMA_READ:
2177                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2178                 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2179                 break;
2180         default:
2181                 return -EINVAL;
2182         }
2183         wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2184         wqe->rdma.r_key = rdma_wr(wr)->rkey;
2185         if (wr->send_flags & IB_SEND_SIGNALED)
2186                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2187         if (wr->send_flags & IB_SEND_FENCE)
2188                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2189         if (wr->send_flags & IB_SEND_SOLICITED)
2190                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2191         if (wr->send_flags & IB_SEND_INLINE)
2192                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2193
2194         return 0;
2195 }
2196
2197 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
2198                                     struct bnxt_qplib_swqe *wqe)
2199 {
2200         switch (wr->opcode) {
2201         case IB_WR_ATOMIC_CMP_AND_SWP:
2202                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2203                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2204                 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2205                 break;
2206         case IB_WR_ATOMIC_FETCH_AND_ADD:
2207                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2208                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2209                 break;
2210         default:
2211                 return -EINVAL;
2212         }
2213         wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2214         wqe->atomic.r_key = atomic_wr(wr)->rkey;
2215         if (wr->send_flags & IB_SEND_SIGNALED)
2216                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2217         if (wr->send_flags & IB_SEND_FENCE)
2218                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2219         if (wr->send_flags & IB_SEND_SOLICITED)
2220                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2221         return 0;
2222 }
2223
2224 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2225                                  struct bnxt_qplib_swqe *wqe)
2226 {
2227         wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2228         wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2229
2230         if (wr->send_flags & IB_SEND_SIGNALED)
2231                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2232         if (wr->send_flags & IB_SEND_FENCE)
2233                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2234         if (wr->send_flags & IB_SEND_SOLICITED)
2235                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2236
2237         return 0;
2238 }
2239
2240 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2241                                  struct bnxt_qplib_swqe *wqe)
2242 {
2243         struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2244         struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2245         int access = wr->access;
2246
2247         wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2248         wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2249         wqe->frmr.page_list = mr->pages;
2250         wqe->frmr.page_list_len = mr->npages;
2251         wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2252         wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2253
2254         if (wr->wr.send_flags & IB_SEND_FENCE)
2255                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2256         if (wr->wr.send_flags & IB_SEND_SIGNALED)
2257                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2258
2259         if (access & IB_ACCESS_LOCAL_WRITE)
2260                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2261         if (access & IB_ACCESS_REMOTE_READ)
2262                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2263         if (access & IB_ACCESS_REMOTE_WRITE)
2264                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2265         if (access & IB_ACCESS_REMOTE_ATOMIC)
2266                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2267         if (access & IB_ACCESS_MW_BIND)
2268                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2269
2270         wqe->frmr.l_key = wr->key;
2271         wqe->frmr.length = wr->mr->length;
2272         wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2273         wqe->frmr.va = wr->mr->iova;
2274         return 0;
2275 }
2276
2277 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2278                                     struct ib_send_wr *wr,
2279                                     struct bnxt_qplib_swqe *wqe)
2280 {
2281         /*  Copy the inline data to the data  field */
2282         u8 *in_data;
2283         u32 i, sge_len;
2284         void *sge_addr;
2285
2286         in_data = wqe->inline_data;
2287         for (i = 0; i < wr->num_sge; i++) {
2288                 sge_addr = (void *)(unsigned long)
2289                                 wr->sg_list[i].addr;
2290                 sge_len = wr->sg_list[i].length;
2291
2292                 if ((sge_len + wqe->inline_len) >
2293                     BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2294                         dev_err(rdev_to_dev(rdev),
2295                                 "Inline data size requested > supported value");
2296                         return -EINVAL;
2297                 }
2298                 sge_len = wr->sg_list[i].length;
2299
2300                 memcpy(in_data, sge_addr, sge_len);
2301                 in_data += wr->sg_list[i].length;
2302                 wqe->inline_len += wr->sg_list[i].length;
2303         }
2304         return wqe->inline_len;
2305 }
2306
2307 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2308                                    struct ib_send_wr *wr,
2309                                    struct bnxt_qplib_swqe *wqe)
2310 {
2311         int payload_sz = 0;
2312
2313         if (wr->send_flags & IB_SEND_INLINE)
2314                 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2315         else
2316                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2317                                                wqe->num_sge);
2318
2319         return payload_sz;
2320 }
2321
2322 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2323 {
2324         if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2325              qp->ib_qp.qp_type == IB_QPT_GSI ||
2326              qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2327              qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2328                 int qp_attr_mask;
2329                 struct ib_qp_attr qp_attr;
2330
2331                 qp_attr_mask = IB_QP_STATE;
2332                 qp_attr.qp_state = IB_QPS_RTS;
2333                 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2334                 qp->qplib_qp.wqe_cnt = 0;
2335         }
2336 }
2337
2338 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2339                                        struct bnxt_re_qp *qp,
2340                                 struct ib_send_wr *wr)
2341 {
2342         struct bnxt_qplib_swqe wqe;
2343         int rc = 0, payload_sz = 0;
2344         unsigned long flags;
2345
2346         spin_lock_irqsave(&qp->sq_lock, flags);
2347         memset(&wqe, 0, sizeof(wqe));
2348         while (wr) {
2349                 /* House keeping */
2350                 memset(&wqe, 0, sizeof(wqe));
2351
2352                 /* Common */
2353                 wqe.num_sge = wr->num_sge;
2354                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2355                         dev_err(rdev_to_dev(rdev),
2356                                 "Limit exceeded for Send SGEs");
2357                         rc = -EINVAL;
2358                         goto bad;
2359                 }
2360
2361                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2362                 if (payload_sz < 0) {
2363                         rc = -EINVAL;
2364                         goto bad;
2365                 }
2366                 wqe.wr_id = wr->wr_id;
2367
2368                 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2369
2370                 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2371                 if (!rc)
2372                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2373 bad:
2374                 if (rc) {
2375                         dev_err(rdev_to_dev(rdev),
2376                                 "Post send failed opcode = %#x rc = %d",
2377                                 wr->opcode, rc);
2378                         break;
2379                 }
2380                 wr = wr->next;
2381         }
2382         bnxt_qplib_post_send_db(&qp->qplib_qp);
2383         bnxt_ud_qp_hw_stall_workaround(qp);
2384         spin_unlock_irqrestore(&qp->sq_lock, flags);
2385         return rc;
2386 }
2387
2388 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2389                       struct ib_send_wr **bad_wr)
2390 {
2391         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2392         struct bnxt_qplib_swqe wqe;
2393         int rc = 0, payload_sz = 0;
2394         unsigned long flags;
2395
2396         spin_lock_irqsave(&qp->sq_lock, flags);
2397         while (wr) {
2398                 /* House keeping */
2399                 memset(&wqe, 0, sizeof(wqe));
2400
2401                 /* Common */
2402                 wqe.num_sge = wr->num_sge;
2403                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2404                         dev_err(rdev_to_dev(qp->rdev),
2405                                 "Limit exceeded for Send SGEs");
2406                         rc = -EINVAL;
2407                         goto bad;
2408                 }
2409
2410                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2411                 if (payload_sz < 0) {
2412                         rc = -EINVAL;
2413                         goto bad;
2414                 }
2415                 wqe.wr_id = wr->wr_id;
2416
2417                 switch (wr->opcode) {
2418                 case IB_WR_SEND:
2419                 case IB_WR_SEND_WITH_IMM:
2420                         if (ib_qp->qp_type == IB_QPT_GSI) {
2421                                 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2422                                                                payload_sz);
2423                                 if (rc)
2424                                         goto bad;
2425                                 wqe.rawqp1.lflags |=
2426                                         SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2427                         }
2428                         switch (wr->send_flags) {
2429                         case IB_SEND_IP_CSUM:
2430                                 wqe.rawqp1.lflags |=
2431                                         SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2432                                 break;
2433                         default:
2434                                 break;
2435                         }
2436                         /* Fall thru to build the wqe */
2437                 case IB_WR_SEND_WITH_INV:
2438                         rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2439                         break;
2440                 case IB_WR_RDMA_WRITE:
2441                 case IB_WR_RDMA_WRITE_WITH_IMM:
2442                 case IB_WR_RDMA_READ:
2443                         rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2444                         break;
2445                 case IB_WR_ATOMIC_CMP_AND_SWP:
2446                 case IB_WR_ATOMIC_FETCH_AND_ADD:
2447                         rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2448                         break;
2449                 case IB_WR_RDMA_READ_WITH_INV:
2450                         dev_err(rdev_to_dev(qp->rdev),
2451                                 "RDMA Read with Invalidate is not supported");
2452                         rc = -EINVAL;
2453                         goto bad;
2454                 case IB_WR_LOCAL_INV:
2455                         rc = bnxt_re_build_inv_wqe(wr, &wqe);
2456                         break;
2457                 case IB_WR_REG_MR:
2458                         rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2459                         break;
2460                 default:
2461                         /* Unsupported WRs */
2462                         dev_err(rdev_to_dev(qp->rdev),
2463                                 "WR (%#x) is not supported", wr->opcode);
2464                         rc = -EINVAL;
2465                         goto bad;
2466                 }
2467                 if (!rc)
2468                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2469 bad:
2470                 if (rc) {
2471                         dev_err(rdev_to_dev(qp->rdev),
2472                                 "post_send failed op:%#x qps = %#x rc = %d\n",
2473                                 wr->opcode, qp->qplib_qp.state, rc);
2474                         *bad_wr = wr;
2475                         break;
2476                 }
2477                 wr = wr->next;
2478         }
2479         bnxt_qplib_post_send_db(&qp->qplib_qp);
2480         bnxt_ud_qp_hw_stall_workaround(qp);
2481         spin_unlock_irqrestore(&qp->sq_lock, flags);
2482
2483         return rc;
2484 }
2485
2486 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2487                                        struct bnxt_re_qp *qp,
2488                                        struct ib_recv_wr *wr)
2489 {
2490         struct bnxt_qplib_swqe wqe;
2491         int rc = 0;
2492
2493         memset(&wqe, 0, sizeof(wqe));
2494         while (wr) {
2495                 /* House keeping */
2496                 memset(&wqe, 0, sizeof(wqe));
2497
2498                 /* Common */
2499                 wqe.num_sge = wr->num_sge;
2500                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2501                         dev_err(rdev_to_dev(rdev),
2502                                 "Limit exceeded for Receive SGEs");
2503                         rc = -EINVAL;
2504                         break;
2505                 }
2506                 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2507                 wqe.wr_id = wr->wr_id;
2508                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2509
2510                 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2511                 if (rc)
2512                         break;
2513
2514                 wr = wr->next;
2515         }
2516         if (!rc)
2517                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2518         return rc;
2519 }
2520
2521 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2522                       struct ib_recv_wr **bad_wr)
2523 {
2524         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2525         struct bnxt_qplib_swqe wqe;
2526         int rc = 0, payload_sz = 0;
2527         unsigned long flags;
2528         u32 count = 0;
2529
2530         spin_lock_irqsave(&qp->rq_lock, flags);
2531         while (wr) {
2532                 /* House keeping */
2533                 memset(&wqe, 0, sizeof(wqe));
2534
2535                 /* Common */
2536                 wqe.num_sge = wr->num_sge;
2537                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2538                         dev_err(rdev_to_dev(qp->rdev),
2539                                 "Limit exceeded for Receive SGEs");
2540                         rc = -EINVAL;
2541                         *bad_wr = wr;
2542                         break;
2543                 }
2544
2545                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2546                                                wr->num_sge);
2547                 wqe.wr_id = wr->wr_id;
2548                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2549
2550                 if (ib_qp->qp_type == IB_QPT_GSI)
2551                         rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2552                                                               payload_sz);
2553                 if (!rc)
2554                         rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2555                 if (rc) {
2556                         *bad_wr = wr;
2557                         break;
2558                 }
2559
2560                 /* Ring DB if the RQEs posted reaches a threshold value */
2561                 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2562                         bnxt_qplib_post_recv_db(&qp->qplib_qp);
2563                         count = 0;
2564                 }
2565
2566                 wr = wr->next;
2567         }
2568
2569         if (count)
2570                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2571
2572         spin_unlock_irqrestore(&qp->rq_lock, flags);
2573
2574         return rc;
2575 }
2576
2577 /* Completion Queues */
2578 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2579 {
2580         int rc;
2581         struct bnxt_re_cq *cq;
2582         struct bnxt_qplib_nq *nq;
2583         struct bnxt_re_dev *rdev;
2584
2585         cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2586         rdev = cq->rdev;
2587         nq = cq->qplib_cq.nq;
2588
2589         rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2590         if (rc) {
2591                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2592                 return rc;
2593         }
2594         if (!IS_ERR_OR_NULL(cq->umem))
2595                 ib_umem_release(cq->umem);
2596
2597         atomic_dec(&rdev->cq_count);
2598         nq->budget--;
2599         kfree(cq->cql);
2600         kfree(cq);
2601
2602         return 0;
2603 }
2604
2605 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2606                                 const struct ib_cq_init_attr *attr,
2607                                 struct ib_ucontext *context,
2608                                 struct ib_udata *udata)
2609 {
2610         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2611         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2612         struct bnxt_re_cq *cq = NULL;
2613         int rc, entries;
2614         int cqe = attr->cqe;
2615         struct bnxt_qplib_nq *nq = NULL;
2616         unsigned int nq_alloc_cnt;
2617
2618         /* Validate CQ fields */
2619         if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2620                 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2621                 return ERR_PTR(-EINVAL);
2622         }
2623         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2624         if (!cq)
2625                 return ERR_PTR(-ENOMEM);
2626
2627         cq->rdev = rdev;
2628         cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2629
2630         entries = roundup_pow_of_two(cqe + 1);
2631         if (entries > dev_attr->max_cq_wqes + 1)
2632                 entries = dev_attr->max_cq_wqes + 1;
2633
2634         if (context) {
2635                 struct bnxt_re_cq_req req;
2636                 struct bnxt_re_ucontext *uctx = container_of
2637                                                 (context,
2638                                                  struct bnxt_re_ucontext,
2639                                                  ib_uctx);
2640                 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2641                         rc = -EFAULT;
2642                         goto fail;
2643                 }
2644
2645                 cq->umem = ib_umem_get(context, req.cq_va,
2646                                        entries * sizeof(struct cq_base),
2647                                        IB_ACCESS_LOCAL_WRITE, 1);
2648                 if (IS_ERR(cq->umem)) {
2649                         rc = PTR_ERR(cq->umem);
2650                         goto fail;
2651                 }
2652                 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2653                 cq->qplib_cq.nmap = cq->umem->nmap;
2654                 cq->qplib_cq.dpi = &uctx->dpi;
2655         } else {
2656                 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2657                 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2658                                   GFP_KERNEL);
2659                 if (!cq->cql) {
2660                         rc = -ENOMEM;
2661                         goto fail;
2662                 }
2663
2664                 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2665                 cq->qplib_cq.sghead = NULL;
2666                 cq->qplib_cq.nmap = 0;
2667         }
2668         /*
2669          * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2670          * used for getting the NQ index.
2671          */
2672         nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2673         nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2674         cq->qplib_cq.max_wqe = entries;
2675         cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2676         cq->qplib_cq.nq = nq;
2677
2678         rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2679         if (rc) {
2680                 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2681                 goto fail;
2682         }
2683
2684         cq->ib_cq.cqe = entries;
2685         cq->cq_period = cq->qplib_cq.period;
2686         nq->budget++;
2687
2688         atomic_inc(&rdev->cq_count);
2689
2690         if (context) {
2691                 struct bnxt_re_cq_resp resp;
2692
2693                 resp.cqid = cq->qplib_cq.id;
2694                 resp.tail = cq->qplib_cq.hwq.cons;
2695                 resp.phase = cq->qplib_cq.period;
2696                 resp.rsvd = 0;
2697                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2698                 if (rc) {
2699                         dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2700                         bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2701                         goto c2fail;
2702                 }
2703         }
2704
2705         return &cq->ib_cq;
2706
2707 c2fail:
2708         if (context)
2709                 ib_umem_release(cq->umem);
2710 fail:
2711         kfree(cq->cql);
2712         kfree(cq);
2713         return ERR_PTR(rc);
2714 }
2715
2716 static u8 __req_to_ib_wc_status(u8 qstatus)
2717 {
2718         switch (qstatus) {
2719         case CQ_REQ_STATUS_OK:
2720                 return IB_WC_SUCCESS;
2721         case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2722                 return IB_WC_BAD_RESP_ERR;
2723         case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2724                 return IB_WC_LOC_LEN_ERR;
2725         case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2726                 return IB_WC_LOC_QP_OP_ERR;
2727         case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2728                 return IB_WC_LOC_PROT_ERR;
2729         case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2730                 return IB_WC_GENERAL_ERR;
2731         case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2732                 return IB_WC_REM_INV_REQ_ERR;
2733         case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2734                 return IB_WC_REM_ACCESS_ERR;
2735         case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2736                 return IB_WC_REM_OP_ERR;
2737         case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2738                 return IB_WC_RNR_RETRY_EXC_ERR;
2739         case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2740                 return IB_WC_RETRY_EXC_ERR;
2741         case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2742                 return IB_WC_WR_FLUSH_ERR;
2743         default:
2744                 return IB_WC_GENERAL_ERR;
2745         }
2746         return 0;
2747 }
2748
2749 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2750 {
2751         switch (qstatus) {
2752         case CQ_RES_RAWETH_QP1_STATUS_OK:
2753                 return IB_WC_SUCCESS;
2754         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2755                 return IB_WC_LOC_ACCESS_ERR;
2756         case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2757                 return IB_WC_LOC_LEN_ERR;
2758         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2759                 return IB_WC_LOC_PROT_ERR;
2760         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2761                 return IB_WC_LOC_QP_OP_ERR;
2762         case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2763                 return IB_WC_GENERAL_ERR;
2764         case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2765                 return IB_WC_WR_FLUSH_ERR;
2766         case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2767                 return IB_WC_WR_FLUSH_ERR;
2768         default:
2769                 return IB_WC_GENERAL_ERR;
2770         }
2771 }
2772
2773 static u8 __rc_to_ib_wc_status(u8 qstatus)
2774 {
2775         switch (qstatus) {
2776         case CQ_RES_RC_STATUS_OK:
2777                 return IB_WC_SUCCESS;
2778         case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2779                 return IB_WC_LOC_ACCESS_ERR;
2780         case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2781                 return IB_WC_LOC_LEN_ERR;
2782         case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2783                 return IB_WC_LOC_PROT_ERR;
2784         case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2785                 return IB_WC_LOC_QP_OP_ERR;
2786         case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2787                 return IB_WC_GENERAL_ERR;
2788         case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2789                 return IB_WC_REM_INV_REQ_ERR;
2790         case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2791                 return IB_WC_WR_FLUSH_ERR;
2792         case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2793                 return IB_WC_WR_FLUSH_ERR;
2794         default:
2795                 return IB_WC_GENERAL_ERR;
2796         }
2797 }
2798
2799 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2800 {
2801         switch (cqe->type) {
2802         case BNXT_QPLIB_SWQE_TYPE_SEND:
2803                 wc->opcode = IB_WC_SEND;
2804                 break;
2805         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2806                 wc->opcode = IB_WC_SEND;
2807                 wc->wc_flags |= IB_WC_WITH_IMM;
2808                 break;
2809         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2810                 wc->opcode = IB_WC_SEND;
2811                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2812                 break;
2813         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2814                 wc->opcode = IB_WC_RDMA_WRITE;
2815                 break;
2816         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2817                 wc->opcode = IB_WC_RDMA_WRITE;
2818                 wc->wc_flags |= IB_WC_WITH_IMM;
2819                 break;
2820         case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2821                 wc->opcode = IB_WC_RDMA_READ;
2822                 break;
2823         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2824                 wc->opcode = IB_WC_COMP_SWAP;
2825                 break;
2826         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2827                 wc->opcode = IB_WC_FETCH_ADD;
2828                 break;
2829         case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2830                 wc->opcode = IB_WC_LOCAL_INV;
2831                 break;
2832         case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2833                 wc->opcode = IB_WC_REG_MR;
2834                 break;
2835         default:
2836                 wc->opcode = IB_WC_SEND;
2837                 break;
2838         }
2839
2840         wc->status = __req_to_ib_wc_status(cqe->status);
2841 }
2842
2843 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2844                                      u16 raweth_qp1_flags2)
2845 {
2846         bool is_ipv6 = false, is_ipv4 = false;
2847
2848         /* raweth_qp1_flags Bit 9-6 indicates itype */
2849         if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2850             != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2851                 return -1;
2852
2853         if (raweth_qp1_flags2 &
2854             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2855             raweth_qp1_flags2 &
2856             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2857                 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2858                 (raweth_qp1_flags2 &
2859                  CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2860                         (is_ipv6 = true) : (is_ipv4 = true);
2861                 return ((is_ipv6) ?
2862                          BNXT_RE_ROCEV2_IPV6_PACKET :
2863                          BNXT_RE_ROCEV2_IPV4_PACKET);
2864         } else {
2865                 return BNXT_RE_ROCE_V1_PACKET;
2866         }
2867 }
2868
2869 static int bnxt_re_to_ib_nw_type(int nw_type)
2870 {
2871         u8 nw_hdr_type = 0xFF;
2872
2873         switch (nw_type) {
2874         case BNXT_RE_ROCE_V1_PACKET:
2875                 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2876                 break;
2877         case BNXT_RE_ROCEV2_IPV4_PACKET:
2878                 nw_hdr_type = RDMA_NETWORK_IPV4;
2879                 break;
2880         case BNXT_RE_ROCEV2_IPV6_PACKET:
2881                 nw_hdr_type = RDMA_NETWORK_IPV6;
2882                 break;
2883         }
2884         return nw_hdr_type;
2885 }
2886
2887 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2888                                        void *rq_hdr_buf)
2889 {
2890         u8 *tmp_buf = NULL;
2891         struct ethhdr *eth_hdr;
2892         u16 eth_type;
2893         bool rc = false;
2894
2895         tmp_buf = (u8 *)rq_hdr_buf;
2896         /*
2897          * If dest mac is not same as I/F mac, this could be a
2898          * loopback address or multicast address, check whether
2899          * it is a loopback packet
2900          */
2901         if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2902                 tmp_buf += 4;
2903                 /* Check the  ether type */
2904                 eth_hdr = (struct ethhdr *)tmp_buf;
2905                 eth_type = ntohs(eth_hdr->h_proto);
2906                 switch (eth_type) {
2907                 case ETH_P_IBOE:
2908                         rc = true;
2909                         break;
2910                 case ETH_P_IP:
2911                 case ETH_P_IPV6: {
2912                         u32 len;
2913                         struct udphdr *udp_hdr;
2914
2915                         len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2916                                                       sizeof(struct ipv6hdr));
2917                         tmp_buf += sizeof(struct ethhdr) + len;
2918                         udp_hdr = (struct udphdr *)tmp_buf;
2919                         if (ntohs(udp_hdr->dest) ==
2920                                     ROCE_V2_UDP_DPORT)
2921                                 rc = true;
2922                         break;
2923                         }
2924                 default:
2925                         break;
2926                 }
2927         }
2928
2929         return rc;
2930 }
2931
2932 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2933                                          struct bnxt_qplib_cqe *cqe)
2934 {
2935         struct bnxt_re_dev *rdev = qp1_qp->rdev;
2936         struct bnxt_re_sqp_entries *sqp_entry = NULL;
2937         struct bnxt_re_qp *qp = rdev->qp1_sqp;
2938         struct ib_send_wr *swr;
2939         struct ib_ud_wr udwr;
2940         struct ib_recv_wr rwr;
2941         int pkt_type = 0;
2942         u32 tbl_idx;
2943         void *rq_hdr_buf;
2944         dma_addr_t rq_hdr_buf_map;
2945         dma_addr_t shrq_hdr_buf_map;
2946         u32 offset = 0;
2947         u32 skip_bytes = 0;
2948         struct ib_sge s_sge[2];
2949         struct ib_sge r_sge[2];
2950         int rc;
2951
2952         memset(&udwr, 0, sizeof(udwr));
2953         memset(&rwr, 0, sizeof(rwr));
2954         memset(&s_sge, 0, sizeof(s_sge));
2955         memset(&r_sge, 0, sizeof(r_sge));
2956
2957         swr = &udwr.wr;
2958         tbl_idx = cqe->wr_id;
2959
2960         rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2961                         (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2962         rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2963                                                           tbl_idx);
2964
2965         /* Shadow QP header buffer */
2966         shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2967                                                             tbl_idx);
2968         sqp_entry = &rdev->sqp_tbl[tbl_idx];
2969
2970         /* Store this cqe */
2971         memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2972         sqp_entry->qp1_qp = qp1_qp;
2973
2974         /* Find packet type from the cqe */
2975
2976         pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2977                                              cqe->raweth_qp1_flags2);
2978         if (pkt_type < 0) {
2979                 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2980                 return -EINVAL;
2981         }
2982
2983         /* Adjust the offset for the user buffer and post in the rq */
2984
2985         if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2986                 offset = 20;
2987
2988         /*
2989          * QP1 loopback packet has 4 bytes of internal header before
2990          * ether header. Skip these four bytes.
2991          */
2992         if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2993                 skip_bytes = 4;
2994
2995         /* First send SGE . Skip the ether header*/
2996         s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2997                         + skip_bytes;
2998         s_sge[0].lkey = 0xFFFFFFFF;
2999         s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3000                                 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3001
3002         /* Second Send SGE */
3003         s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3004                         BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3005         if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3006                 s_sge[1].addr += 8;
3007         s_sge[1].lkey = 0xFFFFFFFF;
3008         s_sge[1].length = 256;
3009
3010         /* First recv SGE */
3011
3012         r_sge[0].addr = shrq_hdr_buf_map;
3013         r_sge[0].lkey = 0xFFFFFFFF;
3014         r_sge[0].length = 40;
3015
3016         r_sge[1].addr = sqp_entry->sge.addr + offset;
3017         r_sge[1].lkey = sqp_entry->sge.lkey;
3018         r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3019
3020         /* Create receive work request */
3021         rwr.num_sge = 2;
3022         rwr.sg_list = r_sge;
3023         rwr.wr_id = tbl_idx;
3024         rwr.next = NULL;
3025
3026         rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
3027         if (rc) {
3028                 dev_err(rdev_to_dev(rdev),
3029                         "Failed to post Rx buffers to shadow QP");
3030                 return -ENOMEM;
3031         }
3032
3033         swr->num_sge = 2;
3034         swr->sg_list = s_sge;
3035         swr->wr_id = tbl_idx;
3036         swr->opcode = IB_WR_SEND;
3037         swr->next = NULL;
3038
3039         udwr.ah = &rdev->sqp_ah->ib_ah;
3040         udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3041         udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3042
3043         /* post data received  in the send queue */
3044         rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3045
3046         return 0;
3047 }
3048
3049 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3050                                           struct bnxt_qplib_cqe *cqe)
3051 {
3052         wc->opcode = IB_WC_RECV;
3053         wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3054         wc->wc_flags |= IB_WC_GRH;
3055 }
3056
3057 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3058                                 u16 *vid, u8 *sl)
3059 {
3060         bool ret = false;
3061         u32 metadata;
3062         u16 tpid;
3063
3064         metadata = orig_cqe->raweth_qp1_metadata;
3065         if (orig_cqe->raweth_qp1_flags2 &
3066                 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3067                 tpid = ((metadata &
3068                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3069                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3070                 if (tpid == ETH_P_8021Q) {
3071                         *vid = metadata &
3072                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3073                         *sl = (metadata &
3074                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3075                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3076                         ret = true;
3077                 }
3078         }
3079
3080         return ret;
3081 }
3082
3083 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3084                                       struct bnxt_qplib_cqe *cqe)
3085 {
3086         wc->opcode = IB_WC_RECV;
3087         wc->status = __rc_to_ib_wc_status(cqe->status);
3088
3089         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3090                 wc->wc_flags |= IB_WC_WITH_IMM;
3091         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3092                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3093         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3094             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3095                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3096 }
3097
3098 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3099                                              struct ib_wc *wc,
3100                                              struct bnxt_qplib_cqe *cqe)
3101 {
3102         struct bnxt_re_dev *rdev = qp->rdev;
3103         struct bnxt_re_qp *qp1_qp = NULL;
3104         struct bnxt_qplib_cqe *orig_cqe = NULL;
3105         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3106         int nw_type;
3107         u32 tbl_idx;
3108         u16 vlan_id;
3109         u8 sl;
3110
3111         tbl_idx = cqe->wr_id;
3112
3113         sqp_entry = &rdev->sqp_tbl[tbl_idx];
3114         qp1_qp = sqp_entry->qp1_qp;
3115         orig_cqe = &sqp_entry->cqe;
3116
3117         wc->wr_id = sqp_entry->wrid;
3118         wc->byte_len = orig_cqe->length;
3119         wc->qp = &qp1_qp->ib_qp;
3120
3121         wc->ex.imm_data = orig_cqe->immdata;
3122         wc->src_qp = orig_cqe->src_qp;
3123         memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3124         if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3125                 wc->vlan_id = vlan_id;
3126                 wc->sl = sl;
3127                 wc->wc_flags |= IB_WC_WITH_VLAN;
3128         }
3129         wc->port_num = 1;
3130         wc->vendor_err = orig_cqe->status;
3131
3132         wc->opcode = IB_WC_RECV;
3133         wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3134         wc->wc_flags |= IB_WC_GRH;
3135
3136         nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3137                                             orig_cqe->raweth_qp1_flags2);
3138         if (nw_type >= 0) {
3139                 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3140                 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3141         }
3142 }
3143
3144 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3145                                       struct bnxt_qplib_cqe *cqe)
3146 {
3147         wc->opcode = IB_WC_RECV;
3148         wc->status = __rc_to_ib_wc_status(cqe->status);
3149
3150         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3151                 wc->wc_flags |= IB_WC_WITH_IMM;
3152         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3153                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3154         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3155             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3156                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3157 }
3158
3159 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3160 {
3161         struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3162         unsigned long flags;
3163         int rc = 0;
3164
3165         spin_lock_irqsave(&qp->sq_lock, flags);
3166
3167         rc = bnxt_re_bind_fence_mw(lib_qp);
3168         if (!rc) {
3169                 lib_qp->sq.phantom_wqe_cnt++;
3170                 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3171                         "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3172                         lib_qp->id, lib_qp->sq.hwq.prod,
3173                         HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3174                         lib_qp->sq.phantom_wqe_cnt);
3175         }
3176
3177         spin_unlock_irqrestore(&qp->sq_lock, flags);
3178         return rc;
3179 }
3180
3181 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3182 {
3183         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3184         struct bnxt_re_qp *qp;
3185         struct bnxt_qplib_cqe *cqe;
3186         int i, ncqe, budget;
3187         struct bnxt_qplib_q *sq;
3188         struct bnxt_qplib_qp *lib_qp;
3189         u32 tbl_idx;
3190         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3191         unsigned long flags;
3192
3193         spin_lock_irqsave(&cq->cq_lock, flags);
3194         budget = min_t(u32, num_entries, cq->max_cql);
3195         num_entries = budget;
3196         if (!cq->cql) {
3197                 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3198                 goto exit;
3199         }
3200         cqe = &cq->cql[0];
3201         while (budget) {
3202                 lib_qp = NULL;
3203                 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3204                 if (lib_qp) {
3205                         sq = &lib_qp->sq;
3206                         if (sq->send_phantom) {
3207                                 qp = container_of(lib_qp,
3208                                                   struct bnxt_re_qp, qplib_qp);
3209                                 if (send_phantom_wqe(qp) == -ENOMEM)
3210                                         dev_err(rdev_to_dev(cq->rdev),
3211                                                 "Phantom failed! Scheduled to send again\n");
3212                                 else
3213                                         sq->send_phantom = false;
3214                         }
3215                 }
3216                 if (ncqe < budget)
3217                         ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3218                                                               cqe + ncqe,
3219                                                               budget - ncqe);
3220
3221                 if (!ncqe)
3222                         break;
3223
3224                 for (i = 0; i < ncqe; i++, cqe++) {
3225                         /* Transcribe each qplib_wqe back to ib_wc */
3226                         memset(wc, 0, sizeof(*wc));
3227
3228                         wc->wr_id = cqe->wr_id;
3229                         wc->byte_len = cqe->length;
3230                         qp = container_of
3231                                 ((struct bnxt_qplib_qp *)
3232                                  (unsigned long)(cqe->qp_handle),
3233                                  struct bnxt_re_qp, qplib_qp);
3234                         if (!qp) {
3235                                 dev_err(rdev_to_dev(cq->rdev),
3236                                         "POLL CQ : bad QP handle");
3237                                 continue;
3238                         }
3239                         wc->qp = &qp->ib_qp;
3240                         wc->ex.imm_data = cqe->immdata;
3241                         wc->src_qp = cqe->src_qp;
3242                         memcpy(wc->smac, cqe->smac, ETH_ALEN);
3243                         wc->port_num = 1;
3244                         wc->vendor_err = cqe->status;
3245
3246                         switch (cqe->opcode) {
3247                         case CQ_BASE_CQE_TYPE_REQ:
3248                                 if (qp->qplib_qp.id ==
3249                                     qp->rdev->qp1_sqp->qplib_qp.id) {
3250                                         /* Handle this completion with
3251                                          * the stored completion
3252                                          */
3253                                         memset(wc, 0, sizeof(*wc));
3254                                         continue;
3255                                 }
3256                                 bnxt_re_process_req_wc(wc, cqe);
3257                                 break;
3258                         case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3259                                 if (!cqe->status) {
3260                                         int rc = 0;
3261
3262                                         rc = bnxt_re_process_raw_qp_pkt_rx
3263                                                                 (qp, cqe);
3264                                         if (!rc) {
3265                                                 memset(wc, 0, sizeof(*wc));
3266                                                 continue;
3267                                         }
3268                                         cqe->status = -1;
3269                                 }
3270                                 /* Errors need not be looped back.
3271                                  * But change the wr_id to the one
3272                                  * stored in the table
3273                                  */
3274                                 tbl_idx = cqe->wr_id;
3275                                 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3276                                 wc->wr_id = sqp_entry->wrid;
3277                                 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3278                                 break;
3279                         case CQ_BASE_CQE_TYPE_RES_RC:
3280                                 bnxt_re_process_res_rc_wc(wc, cqe);
3281                                 break;
3282                         case CQ_BASE_CQE_TYPE_RES_UD:
3283                                 if (qp->qplib_qp.id ==
3284                                     qp->rdev->qp1_sqp->qplib_qp.id) {
3285                                         /* Handle this completion with
3286                                          * the stored completion
3287                                          */
3288                                         if (cqe->status) {
3289                                                 continue;
3290                                         } else {
3291                                                 bnxt_re_process_res_shadow_qp_wc
3292                                                                 (qp, wc, cqe);
3293                                                 break;
3294                                         }
3295                                 }
3296                                 bnxt_re_process_res_ud_wc(wc, cqe);
3297                                 break;
3298                         default:
3299                                 dev_err(rdev_to_dev(cq->rdev),
3300                                         "POLL CQ : type 0x%x not handled",
3301                                         cqe->opcode);
3302                                 continue;
3303                         }
3304                         wc++;
3305                         budget--;
3306                 }
3307         }
3308 exit:
3309         spin_unlock_irqrestore(&cq->cq_lock, flags);
3310         return num_entries - budget;
3311 }
3312
3313 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3314                           enum ib_cq_notify_flags ib_cqn_flags)
3315 {
3316         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3317         int type = 0, rc = 0;
3318         unsigned long flags;
3319
3320         spin_lock_irqsave(&cq->cq_lock, flags);
3321         /* Trigger on the very next completion */
3322         if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3323                 type = DBR_DBR_TYPE_CQ_ARMALL;
3324         /* Trigger on the next solicited completion */
3325         else if (ib_cqn_flags & IB_CQ_SOLICITED)
3326                 type = DBR_DBR_TYPE_CQ_ARMSE;
3327
3328         /* Poll to see if there are missed events */
3329         if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3330             !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3331                 rc = 1;
3332                 goto exit;
3333         }
3334         bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3335
3336 exit:
3337         spin_unlock_irqrestore(&cq->cq_lock, flags);
3338         return rc;
3339 }
3340
3341 /* Memory Regions */
3342 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3343 {
3344         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3345         struct bnxt_re_dev *rdev = pd->rdev;
3346         struct bnxt_re_mr *mr;
3347         u64 pbl = 0;
3348         int rc;
3349
3350         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3351         if (!mr)
3352                 return ERR_PTR(-ENOMEM);
3353
3354         mr->rdev = rdev;
3355         mr->qplib_mr.pd = &pd->qplib_pd;
3356         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3357         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3358
3359         /* Allocate and register 0 as the address */
3360         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3361         if (rc)
3362                 goto fail;
3363
3364         mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3365         mr->qplib_mr.total_size = -1; /* Infinte length */
3366         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3367                                PAGE_SIZE);
3368         if (rc)
3369                 goto fail_mr;
3370
3371         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3372         if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3373                                IB_ACCESS_REMOTE_ATOMIC))
3374                 mr->ib_mr.rkey = mr->ib_mr.lkey;
3375         atomic_inc(&rdev->mr_count);
3376
3377         return &mr->ib_mr;
3378
3379 fail_mr:
3380         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3381 fail:
3382         kfree(mr);
3383         return ERR_PTR(rc);
3384 }
3385
3386 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3387 {
3388         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3389         struct bnxt_re_dev *rdev = mr->rdev;
3390         int rc;
3391
3392         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3393         if (rc)
3394                 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3395
3396         if (mr->pages) {
3397                 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3398                                                         &mr->qplib_frpl);
3399                 kfree(mr->pages);
3400                 mr->npages = 0;
3401                 mr->pages = NULL;
3402         }
3403         if (!IS_ERR_OR_NULL(mr->ib_umem))
3404                 ib_umem_release(mr->ib_umem);
3405
3406         kfree(mr);
3407         atomic_dec(&rdev->mr_count);
3408         return rc;
3409 }
3410
3411 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3412 {
3413         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3414
3415         if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3416                 return -ENOMEM;
3417
3418         mr->pages[mr->npages++] = addr;
3419         return 0;
3420 }
3421
3422 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3423                       unsigned int *sg_offset)
3424 {
3425         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3426
3427         mr->npages = 0;
3428         return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3429 }
3430
3431 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3432                                u32 max_num_sg)
3433 {
3434         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3435         struct bnxt_re_dev *rdev = pd->rdev;
3436         struct bnxt_re_mr *mr = NULL;
3437         int rc;
3438
3439         if (type != IB_MR_TYPE_MEM_REG) {
3440                 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3441                 return ERR_PTR(-EINVAL);
3442         }
3443         if (max_num_sg > MAX_PBL_LVL_1_PGS)
3444                 return ERR_PTR(-EINVAL);
3445
3446         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3447         if (!mr)
3448                 return ERR_PTR(-ENOMEM);
3449
3450         mr->rdev = rdev;
3451         mr->qplib_mr.pd = &pd->qplib_pd;
3452         mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3453         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3454
3455         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3456         if (rc)
3457                 goto bail;
3458
3459         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3460         mr->ib_mr.rkey = mr->ib_mr.lkey;
3461
3462         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3463         if (!mr->pages) {
3464                 rc = -ENOMEM;
3465                 goto fail;
3466         }
3467         rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3468                                                  &mr->qplib_frpl, max_num_sg);
3469         if (rc) {
3470                 dev_err(rdev_to_dev(rdev),
3471                         "Failed to allocate HW FR page list");
3472                 goto fail_mr;
3473         }
3474
3475         atomic_inc(&rdev->mr_count);
3476         return &mr->ib_mr;
3477
3478 fail_mr:
3479         kfree(mr->pages);
3480 fail:
3481         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3482 bail:
3483         kfree(mr);
3484         return ERR_PTR(rc);
3485 }
3486
3487 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3488                                struct ib_udata *udata)
3489 {
3490         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3491         struct bnxt_re_dev *rdev = pd->rdev;
3492         struct bnxt_re_mw *mw;
3493         int rc;
3494
3495         mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3496         if (!mw)
3497                 return ERR_PTR(-ENOMEM);
3498         mw->rdev = rdev;
3499         mw->qplib_mw.pd = &pd->qplib_pd;
3500
3501         mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3502                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3503                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3504         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3505         if (rc) {
3506                 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3507                 goto fail;
3508         }
3509         mw->ib_mw.rkey = mw->qplib_mw.rkey;
3510
3511         atomic_inc(&rdev->mw_count);
3512         return &mw->ib_mw;
3513
3514 fail:
3515         kfree(mw);
3516         return ERR_PTR(rc);
3517 }
3518
3519 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3520 {
3521         struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3522         struct bnxt_re_dev *rdev = mw->rdev;
3523         int rc;
3524
3525         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3526         if (rc) {
3527                 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3528                 return rc;
3529         }
3530
3531         kfree(mw);
3532         atomic_dec(&rdev->mw_count);
3533         return rc;
3534 }
3535
3536 static int bnxt_re_page_size_ok(int page_shift)
3537 {
3538         switch (page_shift) {
3539         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3540         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3541         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3542         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3543         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3544         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3545         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3546         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3547                 return 1;
3548         default:
3549                 return 0;
3550         }
3551 }
3552
3553 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3554                              int page_shift)
3555 {
3556         u64 *pbl_tbl = pbl_tbl_orig;
3557         u64 paddr;
3558         u64 page_mask = (1ULL << page_shift) - 1;
3559         int i, pages;
3560         struct scatterlist *sg;
3561         int entry;
3562
3563         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3564                 pages = sg_dma_len(sg) >> PAGE_SHIFT;
3565                 for (i = 0; i < pages; i++) {
3566                         paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3567                         if (pbl_tbl == pbl_tbl_orig)
3568                                 *pbl_tbl++ = paddr & ~page_mask;
3569                         else if ((paddr & page_mask) == 0)
3570                                 *pbl_tbl++ = paddr;
3571                 }
3572         }
3573         return pbl_tbl - pbl_tbl_orig;
3574 }
3575
3576 /* uverbs */
3577 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3578                                   u64 virt_addr, int mr_access_flags,
3579                                   struct ib_udata *udata)
3580 {
3581         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3582         struct bnxt_re_dev *rdev = pd->rdev;
3583         struct bnxt_re_mr *mr;
3584         struct ib_umem *umem;
3585         u64 *pbl_tbl = NULL;
3586         int umem_pgs, page_shift, rc;
3587
3588         if (length > BNXT_RE_MAX_MR_SIZE) {
3589                 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3590                         length, BNXT_RE_MAX_MR_SIZE);
3591                 return ERR_PTR(-ENOMEM);
3592         }
3593
3594         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3595         if (!mr)
3596                 return ERR_PTR(-ENOMEM);
3597
3598         mr->rdev = rdev;
3599         mr->qplib_mr.pd = &pd->qplib_pd;
3600         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3601         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3602
3603         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3604         if (rc) {
3605                 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3606                 goto free_mr;
3607         }
3608         /* The fixed portion of the rkey is the same as the lkey */
3609         mr->ib_mr.rkey = mr->qplib_mr.rkey;
3610
3611         umem = ib_umem_get(ib_pd->uobject->context, start, length,
3612                            mr_access_flags, 0);
3613         if (IS_ERR(umem)) {
3614                 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3615                 rc = -EFAULT;
3616                 goto free_mrw;
3617         }
3618         mr->ib_umem = umem;
3619
3620         mr->qplib_mr.va = virt_addr;
3621         umem_pgs = ib_umem_page_count(umem);
3622         if (!umem_pgs) {
3623                 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3624                 rc = -EINVAL;
3625                 goto free_umem;
3626         }
3627         mr->qplib_mr.total_size = length;
3628
3629         pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3630         if (!pbl_tbl) {
3631                 rc = -ENOMEM;
3632                 goto free_umem;
3633         }
3634
3635         page_shift = umem->page_shift;
3636
3637         if (!bnxt_re_page_size_ok(page_shift)) {
3638                 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3639                 rc = -EFAULT;
3640                 goto fail;
3641         }
3642
3643         if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3644                 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3645                         length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3646                 rc = -EINVAL;
3647                 goto fail;
3648         }
3649         if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3650                 page_shift = BNXT_RE_PAGE_SHIFT_2M;
3651                 dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3652                          1 << page_shift);
3653         }
3654
3655         /* Map umem buf ptrs to the PBL */
3656         umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3657         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3658                                umem_pgs, false, 1 << page_shift);
3659         if (rc) {
3660                 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3661                 goto fail;
3662         }
3663
3664         kfree(pbl_tbl);
3665
3666         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3667         mr->ib_mr.rkey = mr->qplib_mr.lkey;
3668         atomic_inc(&rdev->mr_count);
3669
3670         return &mr->ib_mr;
3671 fail:
3672         kfree(pbl_tbl);
3673 free_umem:
3674         ib_umem_release(umem);
3675 free_mrw:
3676         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3677 free_mr:
3678         kfree(mr);
3679         return ERR_PTR(rc);
3680 }
3681
3682 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3683                                            struct ib_udata *udata)
3684 {
3685         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3686         struct bnxt_re_uctx_resp resp;
3687         struct bnxt_re_ucontext *uctx;
3688         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3689         int rc;
3690
3691         dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3692                 ibdev->uverbs_abi_ver);
3693
3694         if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3695                 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3696                         BNXT_RE_ABI_VERSION);
3697                 return ERR_PTR(-EPERM);
3698         }
3699
3700         uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3701         if (!uctx)
3702                 return ERR_PTR(-ENOMEM);
3703
3704         uctx->rdev = rdev;
3705
3706         uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3707         if (!uctx->shpg) {
3708                 rc = -ENOMEM;
3709                 goto fail;
3710         }
3711         spin_lock_init(&uctx->sh_lock);
3712
3713         resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3714         resp.max_qp = rdev->qplib_ctx.qpc_count;
3715         resp.pg_size = PAGE_SIZE;
3716         resp.cqe_sz = sizeof(struct cq_base);
3717         resp.max_cqd = dev_attr->max_cq_wqes;
3718         resp.rsvd    = 0;
3719
3720         rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3721         if (rc) {
3722                 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3723                 rc = -EFAULT;
3724                 goto cfail;
3725         }
3726
3727         return &uctx->ib_uctx;
3728 cfail:
3729         free_page((unsigned long)uctx->shpg);
3730         uctx->shpg = NULL;
3731 fail:
3732         kfree(uctx);
3733         return ERR_PTR(rc);
3734 }
3735
3736 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3737 {
3738         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3739                                                    struct bnxt_re_ucontext,
3740                                                    ib_uctx);
3741
3742         struct bnxt_re_dev *rdev = uctx->rdev;
3743         int rc = 0;
3744
3745         if (uctx->shpg)
3746                 free_page((unsigned long)uctx->shpg);
3747
3748         if (uctx->dpi.dbr) {
3749                 /* Free DPI only if this is the first PD allocated by the
3750                  * application and mark the context dpi as NULL
3751                  */
3752                 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3753                                             &rdev->qplib_res.dpi_tbl,
3754                                             &uctx->dpi);
3755                 if (rc)
3756                         dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3757                         /* Don't fail, continue*/
3758                 uctx->dpi.dbr = NULL;
3759         }
3760
3761         kfree(uctx);
3762         return 0;
3763 }
3764
3765 /* Helper function to mmap the virtual memory from user app */
3766 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3767 {
3768         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3769                                                    struct bnxt_re_ucontext,
3770                                                    ib_uctx);
3771         struct bnxt_re_dev *rdev = uctx->rdev;
3772         u64 pfn;
3773
3774         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3775                 return -EINVAL;
3776
3777         if (vma->vm_pgoff) {
3778                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3779                 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3780                                        PAGE_SIZE, vma->vm_page_prot)) {
3781                         dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3782                         return -EAGAIN;
3783                 }
3784         } else {
3785                 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3786                 if (remap_pfn_range(vma, vma->vm_start,
3787                                     pfn, PAGE_SIZE, vma->vm_page_prot)) {
3788                         dev_err(rdev_to_dev(rdev),
3789                                 "Failed to map shared page");
3790                         return -EAGAIN;
3791                 }
3792         }
3793
3794         return 0;
3795 }