Merge tag 'nfs-for-4.17-2' of git://git.linux-nfs.org/projects/anna/linux-nfs
[linux-2.6-microblaze.git] / drivers / infiniband / hw / bnxt_re / ib_verbs.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51
52 #include "bnxt_ulp.h"
53
54 #include "roce_hsi.h"
55 #include "qplib_res.h"
56 #include "qplib_sp.h"
57 #include "qplib_fp.h"
58 #include "qplib_rcfw.h"
59
60 #include "bnxt_re.h"
61 #include "ib_verbs.h"
62 #include <rdma/bnxt_re-abi.h>
63
64 static int __from_ib_access_flags(int iflags)
65 {
66         int qflags = 0;
67
68         if (iflags & IB_ACCESS_LOCAL_WRITE)
69                 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70         if (iflags & IB_ACCESS_REMOTE_READ)
71                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72         if (iflags & IB_ACCESS_REMOTE_WRITE)
73                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74         if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76         if (iflags & IB_ACCESS_MW_BIND)
77                 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78         if (iflags & IB_ZERO_BASED)
79                 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80         if (iflags & IB_ACCESS_ON_DEMAND)
81                 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82         return qflags;
83 };
84
85 static enum ib_access_flags __to_ib_access_flags(int qflags)
86 {
87         enum ib_access_flags iflags = 0;
88
89         if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90                 iflags |= IB_ACCESS_LOCAL_WRITE;
91         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92                 iflags |= IB_ACCESS_REMOTE_WRITE;
93         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94                 iflags |= IB_ACCESS_REMOTE_READ;
95         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96                 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97         if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98                 iflags |= IB_ACCESS_MW_BIND;
99         if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100                 iflags |= IB_ZERO_BASED;
101         if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102                 iflags |= IB_ACCESS_ON_DEMAND;
103         return iflags;
104 };
105
106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107                              struct bnxt_qplib_sge *sg_list, int num)
108 {
109         int i, total = 0;
110
111         for (i = 0; i < num; i++) {
112                 sg_list[i].addr = ib_sg_list[i].addr;
113                 sg_list[i].lkey = ib_sg_list[i].lkey;
114                 sg_list[i].size = ib_sg_list[i].length;
115                 total += sg_list[i].size;
116         }
117         return total;
118 }
119
120 /* Device */
121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122 {
123         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124         struct net_device *netdev = NULL;
125
126         rcu_read_lock();
127         if (rdev)
128                 netdev = rdev->netdev;
129         if (netdev)
130                 dev_hold(netdev);
131
132         rcu_read_unlock();
133         return netdev;
134 }
135
136 int bnxt_re_query_device(struct ib_device *ibdev,
137                          struct ib_device_attr *ib_attr,
138                          struct ib_udata *udata)
139 {
140         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143         memset(ib_attr, 0, sizeof(*ib_attr));
144         memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145                min(sizeof(dev_attr->fw_ver),
146                    sizeof(ib_attr->fw_ver)));
147         bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148                             (u8 *)&ib_attr->sys_image_guid);
149         ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
150         ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
151
152         ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153         ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154         ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155         ib_attr->max_qp = dev_attr->max_qp;
156         ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157         ib_attr->device_cap_flags =
158                                     IB_DEVICE_CURR_QP_STATE_MOD
159                                     | IB_DEVICE_RC_RNR_NAK_GEN
160                                     | IB_DEVICE_SHUTDOWN_PORT
161                                     | IB_DEVICE_SYS_IMAGE_GUID
162                                     | IB_DEVICE_LOCAL_DMA_LKEY
163                                     | IB_DEVICE_RESIZE_MAX_WR
164                                     | IB_DEVICE_PORT_ACTIVE_EVENT
165                                     | IB_DEVICE_N_NOTIFY_CQ
166                                     | IB_DEVICE_MEM_WINDOW
167                                     | IB_DEVICE_MEM_WINDOW_TYPE_2B
168                                     | IB_DEVICE_MEM_MGT_EXTENSIONS;
169         ib_attr->max_sge = dev_attr->max_qp_sges;
170         ib_attr->max_sge_rd = dev_attr->max_qp_sges;
171         ib_attr->max_cq = dev_attr->max_cq;
172         ib_attr->max_cqe = dev_attr->max_cq_wqes;
173         ib_attr->max_mr = dev_attr->max_mr;
174         ib_attr->max_pd = dev_attr->max_pd;
175         ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
176         ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
177         ib_attr->atomic_cap = IB_ATOMIC_NONE;
178         ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
179
180         ib_attr->max_ee_rd_atom = 0;
181         ib_attr->max_res_rd_atom = 0;
182         ib_attr->max_ee_init_rd_atom = 0;
183         ib_attr->max_ee = 0;
184         ib_attr->max_rdd = 0;
185         ib_attr->max_mw = dev_attr->max_mw;
186         ib_attr->max_raw_ipv6_qp = 0;
187         ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
188         ib_attr->max_mcast_grp = 0;
189         ib_attr->max_mcast_qp_attach = 0;
190         ib_attr->max_total_mcast_qp_attach = 0;
191         ib_attr->max_ah = dev_attr->max_ah;
192
193         ib_attr->max_fmr = 0;
194         ib_attr->max_map_per_fmr = 0;
195
196         ib_attr->max_srq = dev_attr->max_srq;
197         ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
198         ib_attr->max_srq_sge = dev_attr->max_srq_sges;
199
200         ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
201
202         ib_attr->max_pkeys = 1;
203         ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
204         return 0;
205 }
206
207 int bnxt_re_modify_device(struct ib_device *ibdev,
208                           int device_modify_mask,
209                           struct ib_device_modify *device_modify)
210 {
211         switch (device_modify_mask) {
212         case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
213                 /* Modify the GUID requires the modification of the GID table */
214                 /* GUID should be made as READ-ONLY */
215                 break;
216         case IB_DEVICE_MODIFY_NODE_DESC:
217                 /* Node Desc should be made as READ-ONLY */
218                 break;
219         default:
220                 break;
221         }
222         return 0;
223 }
224
225 /* Port */
226 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
227                        struct ib_port_attr *port_attr)
228 {
229         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
230         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
231
232         memset(port_attr, 0, sizeof(*port_attr));
233
234         if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
235                 port_attr->state = IB_PORT_ACTIVE;
236                 port_attr->phys_state = 5;
237         } else {
238                 port_attr->state = IB_PORT_DOWN;
239                 port_attr->phys_state = 3;
240         }
241         port_attr->max_mtu = IB_MTU_4096;
242         port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
243         port_attr->gid_tbl_len = dev_attr->max_sgid;
244         port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
245                                     IB_PORT_DEVICE_MGMT_SUP |
246                                     IB_PORT_VENDOR_CLASS_SUP |
247                                     IB_PORT_IP_BASED_GIDS;
248
249         port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
250         port_attr->bad_pkey_cntr = 0;
251         port_attr->qkey_viol_cntr = 0;
252         port_attr->pkey_tbl_len = dev_attr->max_pkey;
253         port_attr->lid = 0;
254         port_attr->sm_lid = 0;
255         port_attr->lmc = 0;
256         port_attr->max_vl_num = 4;
257         port_attr->sm_sl = 0;
258         port_attr->subnet_timeout = 0;
259         port_attr->init_type_reply = 0;
260         port_attr->active_speed = rdev->active_speed;
261         port_attr->active_width = rdev->active_width;
262
263         return 0;
264 }
265
266 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
267                                struct ib_port_immutable *immutable)
268 {
269         struct ib_port_attr port_attr;
270
271         if (bnxt_re_query_port(ibdev, port_num, &port_attr))
272                 return -EINVAL;
273
274         immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
275         immutable->gid_tbl_len = port_attr.gid_tbl_len;
276         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
277         immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
278         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
279         return 0;
280 }
281
282 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
283 {
284         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
285
286         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
287                  rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
288                  rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
289 }
290
291 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
292                        u16 index, u16 *pkey)
293 {
294         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
295
296         /* Ignore port_num */
297
298         memset(pkey, 0, sizeof(*pkey));
299         return bnxt_qplib_get_pkey(&rdev->qplib_res,
300                                    &rdev->qplib_res.pkey_tbl, index, pkey);
301 }
302
303 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
304                       int index, union ib_gid *gid)
305 {
306         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
307         int rc = 0;
308
309         /* Ignore port_num */
310         memset(gid, 0, sizeof(*gid));
311         rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
312                                  &rdev->qplib_res.sgid_tbl, index,
313                                  (struct bnxt_qplib_gid *)gid);
314         return rc;
315 }
316
317 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
318 {
319         int rc = 0;
320         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
321         struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
322         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
323         struct bnxt_qplib_gid *gid_to_del;
324
325         /* Delete the entry from the hardware */
326         ctx = *context;
327         if (!ctx)
328                 return -EINVAL;
329
330         if (sgid_tbl && sgid_tbl->active) {
331                 if (ctx->idx >= sgid_tbl->max)
332                         return -EINVAL;
333                 gid_to_del = &sgid_tbl->tbl[ctx->idx];
334                 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
335                  * or via the ib_unregister_device path. In the former case QP1
336                  * may not be destroyed yet, in which case just return as FW
337                  * needs that entry to be present and will fail it's deletion.
338                  * We could get invoked again after QP1 is destroyed OR get an
339                  * ADD_GID call with a different GID value for the same index
340                  * where we issue MODIFY_GID cmd to update the GID entry -- TBD
341                  */
342                 if (ctx->idx == 0 &&
343                     rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
344                     ctx->refcnt == 1 && rdev->qp1_sqp) {
345                         dev_dbg(rdev_to_dev(rdev),
346                                 "Trying to delete GID0 while QP1 is alive\n");
347                         return -EFAULT;
348                 }
349                 ctx->refcnt--;
350                 if (!ctx->refcnt) {
351                         rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
352                         if (rc) {
353                                 dev_err(rdev_to_dev(rdev),
354                                         "Failed to remove GID: %#x", rc);
355                         } else {
356                                 ctx_tbl = sgid_tbl->ctx;
357                                 ctx_tbl[ctx->idx] = NULL;
358                                 kfree(ctx);
359                         }
360                 }
361         } else {
362                 return -EINVAL;
363         }
364         return rc;
365 }
366
367 int bnxt_re_add_gid(const union ib_gid *gid,
368                     const struct ib_gid_attr *attr, void **context)
369 {
370         int rc;
371         u32 tbl_idx = 0;
372         u16 vlan_id = 0xFFFF;
373         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
374         struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
375         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
376
377         if ((attr->ndev) && is_vlan_dev(attr->ndev))
378                 vlan_id = vlan_dev_vlan_id(attr->ndev);
379
380         rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
381                                  rdev->qplib_res.netdev->dev_addr,
382                                  vlan_id, true, &tbl_idx);
383         if (rc == -EALREADY) {
384                 ctx_tbl = sgid_tbl->ctx;
385                 ctx_tbl[tbl_idx]->refcnt++;
386                 *context = ctx_tbl[tbl_idx];
387                 return 0;
388         }
389
390         if (rc < 0) {
391                 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
392                 return rc;
393         }
394
395         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
396         if (!ctx)
397                 return -ENOMEM;
398         ctx_tbl = sgid_tbl->ctx;
399         ctx->idx = tbl_idx;
400         ctx->refcnt = 1;
401         ctx_tbl[tbl_idx] = ctx;
402         *context = ctx;
403
404         return rc;
405 }
406
407 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
408                                             u8 port_num)
409 {
410         return IB_LINK_LAYER_ETHERNET;
411 }
412
413 #define BNXT_RE_FENCE_PBL_SIZE  DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
414
415 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
416 {
417         struct bnxt_re_fence_data *fence = &pd->fence;
418         struct ib_mr *ib_mr = &fence->mr->ib_mr;
419         struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
420
421         memset(wqe, 0, sizeof(*wqe));
422         wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
423         wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
424         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
425         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
426         wqe->bind.zero_based = false;
427         wqe->bind.parent_l_key = ib_mr->lkey;
428         wqe->bind.va = (u64)(unsigned long)fence->va;
429         wqe->bind.length = fence->size;
430         wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
431         wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
432
433         /* Save the initial rkey in fence structure for now;
434          * wqe->bind.r_key will be set at (re)bind time.
435          */
436         fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
437 }
438
439 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
440 {
441         struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
442                                              qplib_qp);
443         struct ib_pd *ib_pd = qp->ib_qp.pd;
444         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
445         struct bnxt_re_fence_data *fence = &pd->fence;
446         struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
447         struct bnxt_qplib_swqe wqe;
448         int rc;
449
450         memcpy(&wqe, fence_wqe, sizeof(wqe));
451         wqe.bind.r_key = fence->bind_rkey;
452         fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
453
454         dev_dbg(rdev_to_dev(qp->rdev),
455                 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
456                 wqe.bind.r_key, qp->qplib_qp.id, pd);
457         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
458         if (rc) {
459                 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
460                 return rc;
461         }
462         bnxt_qplib_post_send_db(&qp->qplib_qp);
463
464         return rc;
465 }
466
467 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
468 {
469         struct bnxt_re_fence_data *fence = &pd->fence;
470         struct bnxt_re_dev *rdev = pd->rdev;
471         struct device *dev = &rdev->en_dev->pdev->dev;
472         struct bnxt_re_mr *mr = fence->mr;
473
474         if (fence->mw) {
475                 bnxt_re_dealloc_mw(fence->mw);
476                 fence->mw = NULL;
477         }
478         if (mr) {
479                 if (mr->ib_mr.rkey)
480                         bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
481                                              true);
482                 if (mr->ib_mr.lkey)
483                         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
484                 kfree(mr);
485                 fence->mr = NULL;
486         }
487         if (fence->dma_addr) {
488                 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
489                                  DMA_BIDIRECTIONAL);
490                 fence->dma_addr = 0;
491         }
492 }
493
494 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
495 {
496         int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
497         struct bnxt_re_fence_data *fence = &pd->fence;
498         struct bnxt_re_dev *rdev = pd->rdev;
499         struct device *dev = &rdev->en_dev->pdev->dev;
500         struct bnxt_re_mr *mr = NULL;
501         dma_addr_t dma_addr = 0;
502         struct ib_mw *mw;
503         u64 pbl_tbl;
504         int rc;
505
506         dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
507                                   DMA_BIDIRECTIONAL);
508         rc = dma_mapping_error(dev, dma_addr);
509         if (rc) {
510                 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
511                 rc = -EIO;
512                 fence->dma_addr = 0;
513                 goto fail;
514         }
515         fence->dma_addr = dma_addr;
516
517         /* Allocate a MR */
518         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
519         if (!mr) {
520                 rc = -ENOMEM;
521                 goto fail;
522         }
523         fence->mr = mr;
524         mr->rdev = rdev;
525         mr->qplib_mr.pd = &pd->qplib_pd;
526         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
527         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
528         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
529         if (rc) {
530                 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
531                 goto fail;
532         }
533
534         /* Register MR */
535         mr->ib_mr.lkey = mr->qplib_mr.lkey;
536         mr->qplib_mr.va = (u64)(unsigned long)fence->va;
537         mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
538         pbl_tbl = dma_addr;
539         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
540                                BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
541         if (rc) {
542                 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
543                 goto fail;
544         }
545         mr->ib_mr.rkey = mr->qplib_mr.rkey;
546
547         /* Create a fence MW only for kernel consumers */
548         mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
549         if (IS_ERR(mw)) {
550                 dev_err(rdev_to_dev(rdev),
551                         "Failed to create fence-MW for PD: %p\n", pd);
552                 rc = PTR_ERR(mw);
553                 goto fail;
554         }
555         fence->mw = mw;
556
557         bnxt_re_create_fence_wqe(pd);
558         return 0;
559
560 fail:
561         bnxt_re_destroy_fence_mr(pd);
562         return rc;
563 }
564
565 /* Protection Domains */
566 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
567 {
568         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
569         struct bnxt_re_dev *rdev = pd->rdev;
570         int rc;
571
572         bnxt_re_destroy_fence_mr(pd);
573
574         if (pd->qplib_pd.id) {
575                 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
576                                            &rdev->qplib_res.pd_tbl,
577                                            &pd->qplib_pd);
578                 if (rc)
579                         dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
580         }
581
582         kfree(pd);
583         return 0;
584 }
585
586 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
587                                struct ib_ucontext *ucontext,
588                                struct ib_udata *udata)
589 {
590         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
591         struct bnxt_re_ucontext *ucntx = container_of(ucontext,
592                                                       struct bnxt_re_ucontext,
593                                                       ib_uctx);
594         struct bnxt_re_pd *pd;
595         int rc;
596
597         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
598         if (!pd)
599                 return ERR_PTR(-ENOMEM);
600
601         pd->rdev = rdev;
602         if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
603                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
604                 rc = -ENOMEM;
605                 goto fail;
606         }
607
608         if (udata) {
609                 struct bnxt_re_pd_resp resp;
610
611                 if (!ucntx->dpi.dbr) {
612                         /* Allocate DPI in alloc_pd to avoid failing of
613                          * ibv_devinfo and family of application when DPIs
614                          * are depleted.
615                          */
616                         if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
617                                                  &ucntx->dpi, ucntx)) {
618                                 rc = -ENOMEM;
619                                 goto dbfail;
620                         }
621                 }
622
623                 resp.pdid = pd->qplib_pd.id;
624                 /* Still allow mapping this DBR to the new user PD. */
625                 resp.dpi = ucntx->dpi.dpi;
626                 resp.dbr = (u64)ucntx->dpi.umdbr;
627
628                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
629                 if (rc) {
630                         dev_err(rdev_to_dev(rdev),
631                                 "Failed to copy user response\n");
632                         goto dbfail;
633                 }
634         }
635
636         if (!udata)
637                 if (bnxt_re_create_fence_mr(pd))
638                         dev_warn(rdev_to_dev(rdev),
639                                  "Failed to create Fence-MR\n");
640         return &pd->ib_pd;
641 dbfail:
642         (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
643                                     &pd->qplib_pd);
644 fail:
645         kfree(pd);
646         return ERR_PTR(rc);
647 }
648
649 /* Address Handles */
650 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
651 {
652         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
653         struct bnxt_re_dev *rdev = ah->rdev;
654         int rc;
655
656         rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
657         if (rc) {
658                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
659                 return rc;
660         }
661         kfree(ah);
662         return 0;
663 }
664
665 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
666                                 struct rdma_ah_attr *ah_attr,
667                                 struct ib_udata *udata)
668 {
669         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
670         struct bnxt_re_dev *rdev = pd->rdev;
671         struct bnxt_re_ah *ah;
672         const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
673         int rc;
674         u8 nw_type;
675
676         struct ib_gid_attr sgid_attr;
677
678         if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
679                 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
680                 return ERR_PTR(-EINVAL);
681         }
682         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
683         if (!ah)
684                 return ERR_PTR(-ENOMEM);
685
686         ah->rdev = rdev;
687         ah->qplib_ah.pd = &pd->qplib_pd;
688
689         /* Supply the configuration for the HW */
690         memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
691                sizeof(union ib_gid));
692         /*
693          * If RoCE V2 is enabled, stack will have two entries for
694          * each GID entry. Avoiding this duplicte entry in HW. Dividing
695          * the GID index by 2 for RoCE V2
696          */
697         ah->qplib_ah.sgid_index = grh->sgid_index / 2;
698         ah->qplib_ah.host_sgid_index = grh->sgid_index;
699         ah->qplib_ah.traffic_class = grh->traffic_class;
700         ah->qplib_ah.flow_label = grh->flow_label;
701         ah->qplib_ah.hop_limit = grh->hop_limit;
702         ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
703         if (ib_pd->uobject &&
704             !rdma_is_multicast_addr((struct in6_addr *)
705                                     grh->dgid.raw) &&
706             !rdma_link_local_addr((struct in6_addr *)
707                                   grh->dgid.raw)) {
708                 union ib_gid sgid;
709
710                 rc = ib_get_cached_gid(&rdev->ibdev, 1,
711                                        grh->sgid_index, &sgid,
712                                        &sgid_attr);
713                 if (rc) {
714                         dev_err(rdev_to_dev(rdev),
715                                 "Failed to query gid at index %d",
716                                 grh->sgid_index);
717                         goto fail;
718                 }
719                 dev_put(sgid_attr.ndev);
720                 /* Get network header type for this GID */
721                 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
722                 switch (nw_type) {
723                 case RDMA_NETWORK_IPV4:
724                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
725                         break;
726                 case RDMA_NETWORK_IPV6:
727                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
728                         break;
729                 default:
730                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
731                         break;
732                 }
733         }
734
735         memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
736         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
737         if (rc) {
738                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
739                 goto fail;
740         }
741
742         /* Write AVID to shared page. */
743         if (ib_pd->uobject) {
744                 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
745                 struct bnxt_re_ucontext *uctx;
746                 unsigned long flag;
747                 u32 *wrptr;
748
749                 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
750                 spin_lock_irqsave(&uctx->sh_lock, flag);
751                 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
752                 *wrptr = ah->qplib_ah.id;
753                 wmb(); /* make sure cache is updated. */
754                 spin_unlock_irqrestore(&uctx->sh_lock, flag);
755         }
756
757         return &ah->ib_ah;
758
759 fail:
760         kfree(ah);
761         return ERR_PTR(rc);
762 }
763
764 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
765 {
766         return 0;
767 }
768
769 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
770 {
771         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
772
773         ah_attr->type = ib_ah->type;
774         rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
775         memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
776         rdma_ah_set_grh(ah_attr, NULL, 0,
777                         ah->qplib_ah.host_sgid_index,
778                         0, ah->qplib_ah.traffic_class);
779         rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
780         rdma_ah_set_port_num(ah_attr, 1);
781         rdma_ah_set_static_rate(ah_attr, 0);
782         return 0;
783 }
784
785 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
786         __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
787 {
788         unsigned long flags;
789
790         spin_lock_irqsave(&qp->scq->cq_lock, flags);
791         if (qp->rcq != qp->scq)
792                 spin_lock(&qp->rcq->cq_lock);
793         else
794                 __acquire(&qp->rcq->cq_lock);
795
796         return flags;
797 }
798
799 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
800                         unsigned long flags)
801         __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
802 {
803         if (qp->rcq != qp->scq)
804                 spin_unlock(&qp->rcq->cq_lock);
805         else
806                 __release(&qp->rcq->cq_lock);
807         spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
808 }
809
810 /* Queue Pairs */
811 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
812 {
813         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
814         struct bnxt_re_dev *rdev = qp->rdev;
815         int rc;
816         unsigned int flags;
817
818         bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
819         rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
820         if (rc) {
821                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
822                 return rc;
823         }
824
825         flags = bnxt_re_lock_cqs(qp);
826         bnxt_qplib_clean_qp(&qp->qplib_qp);
827         bnxt_re_unlock_cqs(qp, flags);
828         bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
829
830         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
831                 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
832                                            &rdev->sqp_ah->qplib_ah);
833                 if (rc) {
834                         dev_err(rdev_to_dev(rdev),
835                                 "Failed to destroy HW AH for shadow QP");
836                         return rc;
837                 }
838
839                 bnxt_qplib_clean_qp(&qp->qplib_qp);
840                 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
841                                            &rdev->qp1_sqp->qplib_qp);
842                 if (rc) {
843                         dev_err(rdev_to_dev(rdev),
844                                 "Failed to destroy Shadow QP");
845                         return rc;
846                 }
847                 mutex_lock(&rdev->qp_lock);
848                 list_del(&rdev->qp1_sqp->list);
849                 atomic_dec(&rdev->qp_count);
850                 mutex_unlock(&rdev->qp_lock);
851
852                 kfree(rdev->sqp_ah);
853                 kfree(rdev->qp1_sqp);
854                 rdev->qp1_sqp = NULL;
855                 rdev->sqp_ah = NULL;
856         }
857
858         if (!IS_ERR_OR_NULL(qp->rumem))
859                 ib_umem_release(qp->rumem);
860         if (!IS_ERR_OR_NULL(qp->sumem))
861                 ib_umem_release(qp->sumem);
862
863         mutex_lock(&rdev->qp_lock);
864         list_del(&qp->list);
865         atomic_dec(&rdev->qp_count);
866         mutex_unlock(&rdev->qp_lock);
867         kfree(qp);
868         return 0;
869 }
870
871 static u8 __from_ib_qp_type(enum ib_qp_type type)
872 {
873         switch (type) {
874         case IB_QPT_GSI:
875                 return CMDQ_CREATE_QP1_TYPE_GSI;
876         case IB_QPT_RC:
877                 return CMDQ_CREATE_QP_TYPE_RC;
878         case IB_QPT_UD:
879                 return CMDQ_CREATE_QP_TYPE_UD;
880         default:
881                 return IB_QPT_MAX;
882         }
883 }
884
885 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
886                                 struct bnxt_re_qp *qp, struct ib_udata *udata)
887 {
888         struct bnxt_re_qp_req ureq;
889         struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
890         struct ib_umem *umem;
891         int bytes = 0;
892         struct ib_ucontext *context = pd->ib_pd.uobject->context;
893         struct bnxt_re_ucontext *cntx = container_of(context,
894                                                      struct bnxt_re_ucontext,
895                                                      ib_uctx);
896         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
897                 return -EFAULT;
898
899         bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
900         /* Consider mapping PSN search memory only for RC QPs. */
901         if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
902                 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
903         bytes = PAGE_ALIGN(bytes);
904         umem = ib_umem_get(context, ureq.qpsva, bytes,
905                            IB_ACCESS_LOCAL_WRITE, 1);
906         if (IS_ERR(umem))
907                 return PTR_ERR(umem);
908
909         qp->sumem = umem;
910         qplib_qp->sq.sglist = umem->sg_head.sgl;
911         qplib_qp->sq.nmap = umem->nmap;
912         qplib_qp->qp_handle = ureq.qp_handle;
913
914         if (!qp->qplib_qp.srq) {
915                 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
916                 bytes = PAGE_ALIGN(bytes);
917                 umem = ib_umem_get(context, ureq.qprva, bytes,
918                                    IB_ACCESS_LOCAL_WRITE, 1);
919                 if (IS_ERR(umem))
920                         goto rqfail;
921                 qp->rumem = umem;
922                 qplib_qp->rq.sglist = umem->sg_head.sgl;
923                 qplib_qp->rq.nmap = umem->nmap;
924         }
925
926         qplib_qp->dpi = &cntx->dpi;
927         return 0;
928 rqfail:
929         ib_umem_release(qp->sumem);
930         qp->sumem = NULL;
931         qplib_qp->sq.sglist = NULL;
932         qplib_qp->sq.nmap = 0;
933
934         return PTR_ERR(umem);
935 }
936
937 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
938                                 (struct bnxt_re_pd *pd,
939                                  struct bnxt_qplib_res *qp1_res,
940                                  struct bnxt_qplib_qp *qp1_qp)
941 {
942         struct bnxt_re_dev *rdev = pd->rdev;
943         struct bnxt_re_ah *ah;
944         union ib_gid sgid;
945         int rc;
946
947         ah = kzalloc(sizeof(*ah), GFP_KERNEL);
948         if (!ah)
949                 return NULL;
950
951         ah->rdev = rdev;
952         ah->qplib_ah.pd = &pd->qplib_pd;
953
954         rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
955         if (rc)
956                 goto fail;
957
958         /* supply the dgid data same as sgid */
959         memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
960                sizeof(union ib_gid));
961         ah->qplib_ah.sgid_index = 0;
962
963         ah->qplib_ah.traffic_class = 0;
964         ah->qplib_ah.flow_label = 0;
965         ah->qplib_ah.hop_limit = 1;
966         ah->qplib_ah.sl = 0;
967         /* Have DMAC same as SMAC */
968         ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
969
970         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
971         if (rc) {
972                 dev_err(rdev_to_dev(rdev),
973                         "Failed to allocate HW AH for Shadow QP");
974                 goto fail;
975         }
976
977         return ah;
978
979 fail:
980         kfree(ah);
981         return NULL;
982 }
983
984 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
985                                 (struct bnxt_re_pd *pd,
986                                  struct bnxt_qplib_res *qp1_res,
987                                  struct bnxt_qplib_qp *qp1_qp)
988 {
989         struct bnxt_re_dev *rdev = pd->rdev;
990         struct bnxt_re_qp *qp;
991         int rc;
992
993         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
994         if (!qp)
995                 return NULL;
996
997         qp->rdev = rdev;
998
999         /* Initialize the shadow QP structure from the QP1 values */
1000         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1001
1002         qp->qplib_qp.pd = &pd->qplib_pd;
1003         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1004         qp->qplib_qp.type = IB_QPT_UD;
1005
1006         qp->qplib_qp.max_inline_data = 0;
1007         qp->qplib_qp.sig_type = true;
1008
1009         /* Shadow QP SQ depth should be same as QP1 RQ depth */
1010         qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1011         qp->qplib_qp.sq.max_sge = 2;
1012         /* Q full delta can be 1 since it is internal QP */
1013         qp->qplib_qp.sq.q_full_delta = 1;
1014
1015         qp->qplib_qp.scq = qp1_qp->scq;
1016         qp->qplib_qp.rcq = qp1_qp->rcq;
1017
1018         qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1019         qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1020         /* Q full delta can be 1 since it is internal QP */
1021         qp->qplib_qp.rq.q_full_delta = 1;
1022
1023         qp->qplib_qp.mtu = qp1_qp->mtu;
1024
1025         qp->qplib_qp.sq_hdr_buf_size = 0;
1026         qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1027         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1028
1029         rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1030         if (rc)
1031                 goto fail;
1032
1033         rdev->sqp_id = qp->qplib_qp.id;
1034
1035         spin_lock_init(&qp->sq_lock);
1036         INIT_LIST_HEAD(&qp->list);
1037         mutex_lock(&rdev->qp_lock);
1038         list_add_tail(&qp->list, &rdev->qp_list);
1039         atomic_inc(&rdev->qp_count);
1040         mutex_unlock(&rdev->qp_lock);
1041         return qp;
1042 fail:
1043         kfree(qp);
1044         return NULL;
1045 }
1046
1047 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1048                                 struct ib_qp_init_attr *qp_init_attr,
1049                                 struct ib_udata *udata)
1050 {
1051         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1052         struct bnxt_re_dev *rdev = pd->rdev;
1053         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1054         struct bnxt_re_qp *qp;
1055         struct bnxt_re_cq *cq;
1056         struct bnxt_re_srq *srq;
1057         int rc, entries;
1058
1059         if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1060             (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1061             (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1062             (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1063             (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1064                 return ERR_PTR(-EINVAL);
1065
1066         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1067         if (!qp)
1068                 return ERR_PTR(-ENOMEM);
1069
1070         qp->rdev = rdev;
1071         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1072         qp->qplib_qp.pd = &pd->qplib_pd;
1073         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1074         qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1075         if (qp->qplib_qp.type == IB_QPT_MAX) {
1076                 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1077                         qp->qplib_qp.type);
1078                 rc = -EINVAL;
1079                 goto fail;
1080         }
1081         qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1082         qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1083                                   IB_SIGNAL_ALL_WR) ? true : false);
1084
1085         qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1086         if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1087                 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1088
1089         if (qp_init_attr->send_cq) {
1090                 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1091                                   ib_cq);
1092                 if (!cq) {
1093                         dev_err(rdev_to_dev(rdev), "Send CQ not found");
1094                         rc = -EINVAL;
1095                         goto fail;
1096                 }
1097                 qp->qplib_qp.scq = &cq->qplib_cq;
1098                 qp->scq = cq;
1099         }
1100
1101         if (qp_init_attr->recv_cq) {
1102                 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1103                                   ib_cq);
1104                 if (!cq) {
1105                         dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1106                         rc = -EINVAL;
1107                         goto fail;
1108                 }
1109                 qp->qplib_qp.rcq = &cq->qplib_cq;
1110                 qp->rcq = cq;
1111         }
1112
1113         if (qp_init_attr->srq) {
1114                 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1115                                    ib_srq);
1116                 if (!srq) {
1117                         dev_err(rdev_to_dev(rdev), "SRQ not found");
1118                         rc = -EINVAL;
1119                         goto fail;
1120                 }
1121                 qp->qplib_qp.srq = &srq->qplib_srq;
1122                 qp->qplib_qp.rq.max_wqe = 0;
1123         } else {
1124                 /* Allocate 1 more than what's provided so posting max doesn't
1125                  * mean empty
1126                  */
1127                 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1128                 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1129                                                 dev_attr->max_qp_wqes + 1);
1130
1131                 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1132                                                 qp_init_attr->cap.max_recv_wr;
1133
1134                 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1135                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1136                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1137         }
1138
1139         qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1140
1141         if (qp_init_attr->qp_type == IB_QPT_GSI) {
1142                 /* Allocate 1 more than what's provided */
1143                 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1144                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1145                                                 dev_attr->max_qp_wqes + 1);
1146                 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1147                                                 qp_init_attr->cap.max_send_wr;
1148                 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1149                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1150                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1151                 qp->qplib_qp.sq.max_sge++;
1152                 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1153                         qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1154
1155                 qp->qplib_qp.rq_hdr_buf_size =
1156                                         BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1157
1158                 qp->qplib_qp.sq_hdr_buf_size =
1159                                         BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1160                 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1161                 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1162                 if (rc) {
1163                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1164                         goto fail;
1165                 }
1166                 /* Create a shadow QP to handle the QP1 traffic */
1167                 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1168                                                          &qp->qplib_qp);
1169                 if (!rdev->qp1_sqp) {
1170                         rc = -EINVAL;
1171                         dev_err(rdev_to_dev(rdev),
1172                                 "Failed to create Shadow QP for QP1");
1173                         goto qp_destroy;
1174                 }
1175                 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1176                                                            &qp->qplib_qp);
1177                 if (!rdev->sqp_ah) {
1178                         bnxt_qplib_destroy_qp(&rdev->qplib_res,
1179                                               &rdev->qp1_sqp->qplib_qp);
1180                         rc = -EINVAL;
1181                         dev_err(rdev_to_dev(rdev),
1182                                 "Failed to create AH entry for ShadowQP");
1183                         goto qp_destroy;
1184                 }
1185
1186         } else {
1187                 /* Allocate 128 + 1 more than what's provided */
1188                 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1189                                              BNXT_QPLIB_RESERVED_QP_WRS + 1);
1190                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1191                                                 dev_attr->max_qp_wqes +
1192                                                 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1193                 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1194
1195                 /*
1196                  * Reserving one slot for Phantom WQE. Application can
1197                  * post one extra entry in this case. But allowing this to avoid
1198                  * unexpected Queue full condition
1199                  */
1200
1201                 qp->qplib_qp.sq.q_full_delta -= 1;
1202
1203                 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1204                 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1205                 if (udata) {
1206                         rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1207                         if (rc)
1208                                 goto fail;
1209                 } else {
1210                         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1211                 }
1212
1213                 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1214                 if (rc) {
1215                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1216                         goto free_umem;
1217                 }
1218         }
1219
1220         qp->ib_qp.qp_num = qp->qplib_qp.id;
1221         spin_lock_init(&qp->sq_lock);
1222         spin_lock_init(&qp->rq_lock);
1223
1224         if (udata) {
1225                 struct bnxt_re_qp_resp resp;
1226
1227                 resp.qpid = qp->ib_qp.qp_num;
1228                 resp.rsvd = 0;
1229                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1230                 if (rc) {
1231                         dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1232                         goto qp_destroy;
1233                 }
1234         }
1235         INIT_LIST_HEAD(&qp->list);
1236         mutex_lock(&rdev->qp_lock);
1237         list_add_tail(&qp->list, &rdev->qp_list);
1238         atomic_inc(&rdev->qp_count);
1239         mutex_unlock(&rdev->qp_lock);
1240
1241         return &qp->ib_qp;
1242 qp_destroy:
1243         bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1244 free_umem:
1245         if (udata) {
1246                 if (qp->rumem)
1247                         ib_umem_release(qp->rumem);
1248                 if (qp->sumem)
1249                         ib_umem_release(qp->sumem);
1250         }
1251 fail:
1252         kfree(qp);
1253         return ERR_PTR(rc);
1254 }
1255
1256 static u8 __from_ib_qp_state(enum ib_qp_state state)
1257 {
1258         switch (state) {
1259         case IB_QPS_RESET:
1260                 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1261         case IB_QPS_INIT:
1262                 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1263         case IB_QPS_RTR:
1264                 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1265         case IB_QPS_RTS:
1266                 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1267         case IB_QPS_SQD:
1268                 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1269         case IB_QPS_SQE:
1270                 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1271         case IB_QPS_ERR:
1272         default:
1273                 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1274         }
1275 }
1276
1277 static enum ib_qp_state __to_ib_qp_state(u8 state)
1278 {
1279         switch (state) {
1280         case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1281                 return IB_QPS_RESET;
1282         case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1283                 return IB_QPS_INIT;
1284         case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1285                 return IB_QPS_RTR;
1286         case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1287                 return IB_QPS_RTS;
1288         case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1289                 return IB_QPS_SQD;
1290         case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1291                 return IB_QPS_SQE;
1292         case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1293         default:
1294                 return IB_QPS_ERR;
1295         }
1296 }
1297
1298 static u32 __from_ib_mtu(enum ib_mtu mtu)
1299 {
1300         switch (mtu) {
1301         case IB_MTU_256:
1302                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1303         case IB_MTU_512:
1304                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1305         case IB_MTU_1024:
1306                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1307         case IB_MTU_2048:
1308                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1309         case IB_MTU_4096:
1310                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1311         default:
1312                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1313         }
1314 }
1315
1316 static enum ib_mtu __to_ib_mtu(u32 mtu)
1317 {
1318         switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1319         case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1320                 return IB_MTU_256;
1321         case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1322                 return IB_MTU_512;
1323         case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1324                 return IB_MTU_1024;
1325         case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1326                 return IB_MTU_2048;
1327         case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1328                 return IB_MTU_4096;
1329         default:
1330                 return IB_MTU_2048;
1331         }
1332 }
1333
1334 /* Shared Receive Queues */
1335 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1336 {
1337         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1338                                                ib_srq);
1339         struct bnxt_re_dev *rdev = srq->rdev;
1340         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1341         struct bnxt_qplib_nq *nq = NULL;
1342         int rc;
1343
1344         if (qplib_srq->cq)
1345                 nq = qplib_srq->cq->nq;
1346         rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1347         if (rc) {
1348                 dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1349                 return rc;
1350         }
1351
1352         if (srq->umem)
1353                 ib_umem_release(srq->umem);
1354         kfree(srq);
1355         atomic_dec(&rdev->srq_count);
1356         if (nq)
1357                 nq->budget--;
1358         return 0;
1359 }
1360
1361 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1362                                  struct bnxt_re_pd *pd,
1363                                  struct bnxt_re_srq *srq,
1364                                  struct ib_udata *udata)
1365 {
1366         struct bnxt_re_srq_req ureq;
1367         struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1368         struct ib_umem *umem;
1369         int bytes = 0;
1370         struct ib_ucontext *context = pd->ib_pd.uobject->context;
1371         struct bnxt_re_ucontext *cntx = container_of(context,
1372                                                      struct bnxt_re_ucontext,
1373                                                      ib_uctx);
1374         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1375                 return -EFAULT;
1376
1377         bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1378         bytes = PAGE_ALIGN(bytes);
1379         umem = ib_umem_get(context, ureq.srqva, bytes,
1380                            IB_ACCESS_LOCAL_WRITE, 1);
1381         if (IS_ERR(umem))
1382                 return PTR_ERR(umem);
1383
1384         srq->umem = umem;
1385         qplib_srq->nmap = umem->nmap;
1386         qplib_srq->sglist = umem->sg_head.sgl;
1387         qplib_srq->srq_handle = ureq.srq_handle;
1388         qplib_srq->dpi = &cntx->dpi;
1389
1390         return 0;
1391 }
1392
1393 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1394                                   struct ib_srq_init_attr *srq_init_attr,
1395                                   struct ib_udata *udata)
1396 {
1397         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1398         struct bnxt_re_dev *rdev = pd->rdev;
1399         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1400         struct bnxt_re_srq *srq;
1401         struct bnxt_qplib_nq *nq = NULL;
1402         int rc, entries;
1403
1404         if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1405                 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1406                 rc = -EINVAL;
1407                 goto exit;
1408         }
1409
1410         if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1411                 rc = -ENOTSUPP;
1412                 goto exit;
1413         }
1414
1415         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1416         if (!srq) {
1417                 rc = -ENOMEM;
1418                 goto exit;
1419         }
1420         srq->rdev = rdev;
1421         srq->qplib_srq.pd = &pd->qplib_pd;
1422         srq->qplib_srq.dpi = &rdev->dpi_privileged;
1423         /* Allocate 1 more than what's provided so posting max doesn't
1424          * mean empty
1425          */
1426         entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1427         if (entries > dev_attr->max_srq_wqes + 1)
1428                 entries = dev_attr->max_srq_wqes + 1;
1429
1430         srq->qplib_srq.max_wqe = entries;
1431         srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1432         srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1433         srq->srq_limit = srq_init_attr->attr.srq_limit;
1434         srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1435         nq = &rdev->nq[0];
1436
1437         if (udata) {
1438                 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1439                 if (rc)
1440                         goto fail;
1441         }
1442
1443         rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1444         if (rc) {
1445                 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1446                 goto fail;
1447         }
1448
1449         if (udata) {
1450                 struct bnxt_re_srq_resp resp;
1451
1452                 resp.srqid = srq->qplib_srq.id;
1453                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1454                 if (rc) {
1455                         dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1456                         bnxt_qplib_destroy_srq(&rdev->qplib_res,
1457                                                &srq->qplib_srq);
1458                         goto exit;
1459                 }
1460         }
1461         if (nq)
1462                 nq->budget++;
1463         atomic_inc(&rdev->srq_count);
1464
1465         return &srq->ib_srq;
1466
1467 fail:
1468         if (srq->umem)
1469                 ib_umem_release(srq->umem);
1470         kfree(srq);
1471 exit:
1472         return ERR_PTR(rc);
1473 }
1474
1475 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1476                        enum ib_srq_attr_mask srq_attr_mask,
1477                        struct ib_udata *udata)
1478 {
1479         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1480                                                ib_srq);
1481         struct bnxt_re_dev *rdev = srq->rdev;
1482         int rc;
1483
1484         switch (srq_attr_mask) {
1485         case IB_SRQ_MAX_WR:
1486                 /* SRQ resize is not supported */
1487                 break;
1488         case IB_SRQ_LIMIT:
1489                 /* Change the SRQ threshold */
1490                 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1491                         return -EINVAL;
1492
1493                 srq->qplib_srq.threshold = srq_attr->srq_limit;
1494                 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1495                 if (rc) {
1496                         dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1497                         return rc;
1498                 }
1499                 /* On success, update the shadow */
1500                 srq->srq_limit = srq_attr->srq_limit;
1501                 /* No need to Build and send response back to udata */
1502                 break;
1503         default:
1504                 dev_err(rdev_to_dev(rdev),
1505                         "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1506                 return -EINVAL;
1507         }
1508         return 0;
1509 }
1510
1511 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1512 {
1513         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1514                                                ib_srq);
1515         struct bnxt_re_srq tsrq;
1516         struct bnxt_re_dev *rdev = srq->rdev;
1517         int rc;
1518
1519         /* Get live SRQ attr */
1520         tsrq.qplib_srq.id = srq->qplib_srq.id;
1521         rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1522         if (rc) {
1523                 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1524                 return rc;
1525         }
1526         srq_attr->max_wr = srq->qplib_srq.max_wqe;
1527         srq_attr->max_sge = srq->qplib_srq.max_sge;
1528         srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1529
1530         return 0;
1531 }
1532
1533 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
1534                           struct ib_recv_wr **bad_wr)
1535 {
1536         struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1537                                                ib_srq);
1538         struct bnxt_qplib_swqe wqe;
1539         unsigned long flags;
1540         int rc = 0;
1541
1542         spin_lock_irqsave(&srq->lock, flags);
1543         while (wr) {
1544                 /* Transcribe each ib_recv_wr to qplib_swqe */
1545                 wqe.num_sge = wr->num_sge;
1546                 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1547                 wqe.wr_id = wr->wr_id;
1548                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1549
1550                 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1551                 if (rc) {
1552                         *bad_wr = wr;
1553                         break;
1554                 }
1555                 wr = wr->next;
1556         }
1557         spin_unlock_irqrestore(&srq->lock, flags);
1558
1559         return rc;
1560 }
1561 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1562                                     struct bnxt_re_qp *qp1_qp,
1563                                     int qp_attr_mask)
1564 {
1565         struct bnxt_re_qp *qp = rdev->qp1_sqp;
1566         int rc = 0;
1567
1568         if (qp_attr_mask & IB_QP_STATE) {
1569                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1570                 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1571         }
1572         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1573                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1574                 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1575         }
1576
1577         if (qp_attr_mask & IB_QP_QKEY) {
1578                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1579                 /* Using a Random  QKEY */
1580                 qp->qplib_qp.qkey = 0x81818181;
1581         }
1582         if (qp_attr_mask & IB_QP_SQ_PSN) {
1583                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1584                 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1585         }
1586
1587         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1588         if (rc)
1589                 dev_err(rdev_to_dev(rdev),
1590                         "Failed to modify Shadow QP for QP1");
1591         return rc;
1592 }
1593
1594 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1595                       int qp_attr_mask, struct ib_udata *udata)
1596 {
1597         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1598         struct bnxt_re_dev *rdev = qp->rdev;
1599         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1600         enum ib_qp_state curr_qp_state, new_qp_state;
1601         int rc, entries;
1602         int status;
1603         union ib_gid sgid;
1604         struct ib_gid_attr sgid_attr;
1605         unsigned int flags;
1606         u8 nw_type;
1607
1608         qp->qplib_qp.modify_flags = 0;
1609         if (qp_attr_mask & IB_QP_STATE) {
1610                 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1611                 new_qp_state = qp_attr->qp_state;
1612                 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1613                                         ib_qp->qp_type, qp_attr_mask,
1614                                         IB_LINK_LAYER_ETHERNET)) {
1615                         dev_err(rdev_to_dev(rdev),
1616                                 "Invalid attribute mask: %#x specified ",
1617                                 qp_attr_mask);
1618                         dev_err(rdev_to_dev(rdev),
1619                                 "for qpn: %#x type: %#x",
1620                                 ib_qp->qp_num, ib_qp->qp_type);
1621                         dev_err(rdev_to_dev(rdev),
1622                                 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1623                                 curr_qp_state, new_qp_state);
1624                         return -EINVAL;
1625                 }
1626                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1627                 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1628
1629                 if (!qp->sumem &&
1630                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1631                         dev_dbg(rdev_to_dev(rdev),
1632                                 "Move QP = %p to flush list\n",
1633                                 qp);
1634                         flags = bnxt_re_lock_cqs(qp);
1635                         bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1636                         bnxt_re_unlock_cqs(qp, flags);
1637                 }
1638                 if (!qp->sumem &&
1639                     qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1640                         dev_dbg(rdev_to_dev(rdev),
1641                                 "Move QP = %p out of flush list\n",
1642                                 qp);
1643                         flags = bnxt_re_lock_cqs(qp);
1644                         bnxt_qplib_clean_qp(&qp->qplib_qp);
1645                         bnxt_re_unlock_cqs(qp, flags);
1646                 }
1647         }
1648         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1649                 qp->qplib_qp.modify_flags |=
1650                                 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1651                 qp->qplib_qp.en_sqd_async_notify = true;
1652         }
1653         if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1654                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1655                 qp->qplib_qp.access =
1656                         __from_ib_access_flags(qp_attr->qp_access_flags);
1657                 /* LOCAL_WRITE access must be set to allow RC receive */
1658                 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1659         }
1660         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1661                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1662                 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1663         }
1664         if (qp_attr_mask & IB_QP_QKEY) {
1665                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1666                 qp->qplib_qp.qkey = qp_attr->qkey;
1667         }
1668         if (qp_attr_mask & IB_QP_AV) {
1669                 const struct ib_global_route *grh =
1670                         rdma_ah_read_grh(&qp_attr->ah_attr);
1671
1672                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1673                                      CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1674                                      CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1675                                      CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1676                                      CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1677                                      CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1678                                      CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1679                 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1680                        sizeof(qp->qplib_qp.ah.dgid.data));
1681                 qp->qplib_qp.ah.flow_label = grh->flow_label;
1682                 /* If RoCE V2 is enabled, stack will have two entries for
1683                  * each GID entry. Avoiding this duplicte entry in HW. Dividing
1684                  * the GID index by 2 for RoCE V2
1685                  */
1686                 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1687                 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1688                 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1689                 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1690                 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1691                 ether_addr_copy(qp->qplib_qp.ah.dmac,
1692                                 qp_attr->ah_attr.roce.dmac);
1693
1694                 status = ib_get_cached_gid(&rdev->ibdev, 1,
1695                                            grh->sgid_index,
1696                                            &sgid, &sgid_attr);
1697                 if (!status) {
1698                         memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1699                                ETH_ALEN);
1700                         dev_put(sgid_attr.ndev);
1701                         nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1702                                                          &sgid);
1703                         switch (nw_type) {
1704                         case RDMA_NETWORK_IPV4:
1705                                 qp->qplib_qp.nw_type =
1706                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1707                                 break;
1708                         case RDMA_NETWORK_IPV6:
1709                                 qp->qplib_qp.nw_type =
1710                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1711                                 break;
1712                         default:
1713                                 qp->qplib_qp.nw_type =
1714                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1715                                 break;
1716                         }
1717                 }
1718         }
1719
1720         if (qp_attr_mask & IB_QP_PATH_MTU) {
1721                 qp->qplib_qp.modify_flags |=
1722                                 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1723                 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1724                 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1725         } else if (qp_attr->qp_state == IB_QPS_RTR) {
1726                 qp->qplib_qp.modify_flags |=
1727                         CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1728                 qp->qplib_qp.path_mtu =
1729                         __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1730                 qp->qplib_qp.mtu =
1731                         ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1732         }
1733
1734         if (qp_attr_mask & IB_QP_TIMEOUT) {
1735                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1736                 qp->qplib_qp.timeout = qp_attr->timeout;
1737         }
1738         if (qp_attr_mask & IB_QP_RETRY_CNT) {
1739                 qp->qplib_qp.modify_flags |=
1740                                 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1741                 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1742         }
1743         if (qp_attr_mask & IB_QP_RNR_RETRY) {
1744                 qp->qplib_qp.modify_flags |=
1745                                 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1746                 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1747         }
1748         if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1749                 qp->qplib_qp.modify_flags |=
1750                                 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1751                 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1752         }
1753         if (qp_attr_mask & IB_QP_RQ_PSN) {
1754                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1755                 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1756         }
1757         if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1758                 qp->qplib_qp.modify_flags |=
1759                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1760                 /* Cap the max_rd_atomic to device max */
1761                 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1762                                                    dev_attr->max_qp_rd_atom);
1763         }
1764         if (qp_attr_mask & IB_QP_SQ_PSN) {
1765                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1766                 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1767         }
1768         if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1769                 if (qp_attr->max_dest_rd_atomic >
1770                     dev_attr->max_qp_init_rd_atom) {
1771                         dev_err(rdev_to_dev(rdev),
1772                                 "max_dest_rd_atomic requested%d is > dev_max%d",
1773                                 qp_attr->max_dest_rd_atomic,
1774                                 dev_attr->max_qp_init_rd_atom);
1775                         return -EINVAL;
1776                 }
1777
1778                 qp->qplib_qp.modify_flags |=
1779                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1780                 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1781         }
1782         if (qp_attr_mask & IB_QP_CAP) {
1783                 qp->qplib_qp.modify_flags |=
1784                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1785                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1786                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1787                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1788                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1789                 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1790                     (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1791                     (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1792                     (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1793                     (qp_attr->cap.max_inline_data >=
1794                                                 dev_attr->max_inline_data)) {
1795                         dev_err(rdev_to_dev(rdev),
1796                                 "Create QP failed - max exceeded");
1797                         return -EINVAL;
1798                 }
1799                 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1800                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1801                                                 dev_attr->max_qp_wqes + 1);
1802                 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1803                                                 qp_attr->cap.max_send_wr;
1804                 /*
1805                  * Reserving one slot for Phantom WQE. Some application can
1806                  * post one extra entry in this case. Allowing this to avoid
1807                  * unexpected Queue full condition
1808                  */
1809                 qp->qplib_qp.sq.q_full_delta -= 1;
1810                 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1811                 if (qp->qplib_qp.rq.max_wqe) {
1812                         entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1813                         qp->qplib_qp.rq.max_wqe =
1814                                 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1815                         qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1816                                                        qp_attr->cap.max_recv_wr;
1817                         qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1818                 } else {
1819                         /* SRQ was used prior, just ignore the RQ caps */
1820                 }
1821         }
1822         if (qp_attr_mask & IB_QP_DEST_QPN) {
1823                 qp->qplib_qp.modify_flags |=
1824                                 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1825                 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1826         }
1827         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1828         if (rc) {
1829                 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1830                 return rc;
1831         }
1832         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1833                 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1834         return rc;
1835 }
1836
1837 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1838                      int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1839 {
1840         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1841         struct bnxt_re_dev *rdev = qp->rdev;
1842         struct bnxt_qplib_qp *qplib_qp;
1843         int rc;
1844
1845         qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1846         if (!qplib_qp)
1847                 return -ENOMEM;
1848
1849         qplib_qp->id = qp->qplib_qp.id;
1850         qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1851
1852         rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1853         if (rc) {
1854                 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1855                 goto out;
1856         }
1857         qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1858         qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1859         qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1860         qp_attr->pkey_index = qplib_qp->pkey_index;
1861         qp_attr->qkey = qplib_qp->qkey;
1862         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1863         rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1864                         qplib_qp->ah.host_sgid_index,
1865                         qplib_qp->ah.hop_limit,
1866                         qplib_qp->ah.traffic_class);
1867         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1868         rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1869         ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1870         qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1871         qp_attr->timeout = qplib_qp->timeout;
1872         qp_attr->retry_cnt = qplib_qp->retry_cnt;
1873         qp_attr->rnr_retry = qplib_qp->rnr_retry;
1874         qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1875         qp_attr->rq_psn = qplib_qp->rq.psn;
1876         qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1877         qp_attr->sq_psn = qplib_qp->sq.psn;
1878         qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1879         qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1880                                                          IB_SIGNAL_REQ_WR;
1881         qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1882
1883         qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1884         qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1885         qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1886         qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1887         qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1888         qp_init_attr->cap = qp_attr->cap;
1889
1890 out:
1891         kfree(qplib_qp);
1892         return rc;
1893 }
1894
1895 /* Routine for sending QP1 packets for RoCE V1 an V2
1896  */
1897 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1898                                      struct ib_send_wr *wr,
1899                                      struct bnxt_qplib_swqe *wqe,
1900                                      int payload_size)
1901 {
1902         struct ib_device *ibdev = &qp->rdev->ibdev;
1903         struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1904                                              ib_ah);
1905         struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1906         struct bnxt_qplib_sge sge;
1907         union ib_gid sgid;
1908         u8 nw_type;
1909         u16 ether_type;
1910         struct ib_gid_attr sgid_attr;
1911         union ib_gid dgid;
1912         bool is_eth = false;
1913         bool is_vlan = false;
1914         bool is_grh = false;
1915         bool is_udp = false;
1916         u8 ip_version = 0;
1917         u16 vlan_id = 0xFFFF;
1918         void *buf;
1919         int i, rc = 0;
1920
1921         memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1922
1923         rc = ib_get_cached_gid(ibdev, 1,
1924                                qplib_ah->host_sgid_index, &sgid,
1925                                &sgid_attr);
1926         if (rc) {
1927                 dev_err(rdev_to_dev(qp->rdev),
1928                         "Failed to query gid at index %d",
1929                         qplib_ah->host_sgid_index);
1930                 return rc;
1931         }
1932         if (sgid_attr.ndev) {
1933                 if (is_vlan_dev(sgid_attr.ndev))
1934                         vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1935                 dev_put(sgid_attr.ndev);
1936         }
1937         /* Get network header type for this GID */
1938         nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1939         switch (nw_type) {
1940         case RDMA_NETWORK_IPV4:
1941                 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1942                 break;
1943         case RDMA_NETWORK_IPV6:
1944                 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1945                 break;
1946         default:
1947                 nw_type = BNXT_RE_ROCE_V1_PACKET;
1948                 break;
1949         }
1950         memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1951         is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1952         if (is_udp) {
1953                 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1954                         ip_version = 4;
1955                         ether_type = ETH_P_IP;
1956                 } else {
1957                         ip_version = 6;
1958                         ether_type = ETH_P_IPV6;
1959                 }
1960                 is_grh = false;
1961         } else {
1962                 ether_type = ETH_P_IBOE;
1963                 is_grh = true;
1964         }
1965
1966         is_eth = true;
1967         is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1968
1969         ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1970                           ip_version, is_udp, 0, &qp->qp1_hdr);
1971
1972         /* ETH */
1973         ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1974         ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1975
1976         /* For vlan, check the sgid for vlan existence */
1977
1978         if (!is_vlan) {
1979                 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1980         } else {
1981                 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1982                 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1983         }
1984
1985         if (is_grh || (ip_version == 6)) {
1986                 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1987                 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1988                        sizeof(sgid));
1989                 qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
1990         }
1991
1992         if (ip_version == 4) {
1993                 qp->qp1_hdr.ip4.tos = 0;
1994                 qp->qp1_hdr.ip4.id = 0;
1995                 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1996                 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1997
1998                 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1999                 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2000                 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2001         }
2002
2003         if (is_udp) {
2004                 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2005                 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2006                 qp->qp1_hdr.udp.csum = 0;
2007         }
2008
2009         /* BTH */
2010         if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2011                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2012                 qp->qp1_hdr.immediate_present = 1;
2013         } else {
2014                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2015         }
2016         if (wr->send_flags & IB_SEND_SOLICITED)
2017                 qp->qp1_hdr.bth.solicited_event = 1;
2018         /* pad_count */
2019         qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2020
2021         /* P_key for QP1 is for all members */
2022         qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2023         qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2024         qp->qp1_hdr.bth.ack_req = 0;
2025         qp->send_psn++;
2026         qp->send_psn &= BTH_PSN_MASK;
2027         qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2028         /* DETH */
2029         /* Use the priviledged Q_Key for QP1 */
2030         qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2031         qp->qp1_hdr.deth.source_qpn = IB_QP1;
2032
2033         /* Pack the QP1 to the transmit buffer */
2034         buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2035         if (buf) {
2036                 ib_ud_header_pack(&qp->qp1_hdr, buf);
2037                 for (i = wqe->num_sge; i; i--) {
2038                         wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2039                         wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2040                         wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2041                 }
2042
2043                 /*
2044                  * Max Header buf size for IPV6 RoCE V2 is 86,
2045                  * which is same as the QP1 SQ header buffer.
2046                  * Header buf size for IPV4 RoCE V2 can be 66.
2047                  * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2048                  * Subtract 20 bytes from QP1 SQ header buf size
2049                  */
2050                 if (is_udp && ip_version == 4)
2051                         sge.size -= 20;
2052                 /*
2053                  * Max Header buf size for RoCE V1 is 78.
2054                  * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2055                  * Subtract 8 bytes from QP1 SQ header buf size
2056                  */
2057                 if (!is_udp)
2058                         sge.size -= 8;
2059
2060                 /* Subtract 4 bytes for non vlan packets */
2061                 if (!is_vlan)
2062                         sge.size -= 4;
2063
2064                 wqe->sg_list[0].addr = sge.addr;
2065                 wqe->sg_list[0].lkey = sge.lkey;
2066                 wqe->sg_list[0].size = sge.size;
2067                 wqe->num_sge++;
2068
2069         } else {
2070                 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2071                 rc = -ENOMEM;
2072         }
2073         return rc;
2074 }
2075
2076 /* For the MAD layer, it only provides the recv SGE the size of
2077  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2078  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2079  * receive packet (334 bytes) with no VLAN and then copy the GRH
2080  * and the MAD datagram out to the provided SGE.
2081  */
2082 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2083                                             struct ib_recv_wr *wr,
2084                                             struct bnxt_qplib_swqe *wqe,
2085                                             int payload_size)
2086 {
2087         struct bnxt_qplib_sge ref, sge;
2088         u32 rq_prod_index;
2089         struct bnxt_re_sqp_entries *sqp_entry;
2090
2091         rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2092
2093         if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2094                 return -ENOMEM;
2095
2096         /* Create 1 SGE to receive the entire
2097          * ethernet packet
2098          */
2099         /* Save the reference from ULP */
2100         ref.addr = wqe->sg_list[0].addr;
2101         ref.lkey = wqe->sg_list[0].lkey;
2102         ref.size = wqe->sg_list[0].size;
2103
2104         sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2105
2106         /* SGE 1 */
2107         wqe->sg_list[0].addr = sge.addr;
2108         wqe->sg_list[0].lkey = sge.lkey;
2109         wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2110         sge.size -= wqe->sg_list[0].size;
2111
2112         sqp_entry->sge.addr = ref.addr;
2113         sqp_entry->sge.lkey = ref.lkey;
2114         sqp_entry->sge.size = ref.size;
2115         /* Store the wrid for reporting completion */
2116         sqp_entry->wrid = wqe->wr_id;
2117         /* change the wqe->wrid to table index */
2118         wqe->wr_id = rq_prod_index;
2119         return 0;
2120 }
2121
2122 static int is_ud_qp(struct bnxt_re_qp *qp)
2123 {
2124         return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2125 }
2126
2127 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2128                                   struct ib_send_wr *wr,
2129                                   struct bnxt_qplib_swqe *wqe)
2130 {
2131         struct bnxt_re_ah *ah = NULL;
2132
2133         if (is_ud_qp(qp)) {
2134                 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2135                 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2136                 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2137                 wqe->send.avid = ah->qplib_ah.id;
2138         }
2139         switch (wr->opcode) {
2140         case IB_WR_SEND:
2141                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2142                 break;
2143         case IB_WR_SEND_WITH_IMM:
2144                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2145                 wqe->send.imm_data = wr->ex.imm_data;
2146                 break;
2147         case IB_WR_SEND_WITH_INV:
2148                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2149                 wqe->send.inv_key = wr->ex.invalidate_rkey;
2150                 break;
2151         default:
2152                 return -EINVAL;
2153         }
2154         if (wr->send_flags & IB_SEND_SIGNALED)
2155                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2156         if (wr->send_flags & IB_SEND_FENCE)
2157                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2158         if (wr->send_flags & IB_SEND_SOLICITED)
2159                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2160         if (wr->send_flags & IB_SEND_INLINE)
2161                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2162
2163         return 0;
2164 }
2165
2166 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
2167                                   struct bnxt_qplib_swqe *wqe)
2168 {
2169         switch (wr->opcode) {
2170         case IB_WR_RDMA_WRITE:
2171                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2172                 break;
2173         case IB_WR_RDMA_WRITE_WITH_IMM:
2174                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2175                 wqe->rdma.imm_data = wr->ex.imm_data;
2176                 break;
2177         case IB_WR_RDMA_READ:
2178                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2179                 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2180                 break;
2181         default:
2182                 return -EINVAL;
2183         }
2184         wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2185         wqe->rdma.r_key = rdma_wr(wr)->rkey;
2186         if (wr->send_flags & IB_SEND_SIGNALED)
2187                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2188         if (wr->send_flags & IB_SEND_FENCE)
2189                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2190         if (wr->send_flags & IB_SEND_SOLICITED)
2191                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2192         if (wr->send_flags & IB_SEND_INLINE)
2193                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2194
2195         return 0;
2196 }
2197
2198 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
2199                                     struct bnxt_qplib_swqe *wqe)
2200 {
2201         switch (wr->opcode) {
2202         case IB_WR_ATOMIC_CMP_AND_SWP:
2203                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2204                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2205                 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2206                 break;
2207         case IB_WR_ATOMIC_FETCH_AND_ADD:
2208                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2209                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2210                 break;
2211         default:
2212                 return -EINVAL;
2213         }
2214         wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2215         wqe->atomic.r_key = atomic_wr(wr)->rkey;
2216         if (wr->send_flags & IB_SEND_SIGNALED)
2217                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2218         if (wr->send_flags & IB_SEND_FENCE)
2219                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2220         if (wr->send_flags & IB_SEND_SOLICITED)
2221                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2222         return 0;
2223 }
2224
2225 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2226                                  struct bnxt_qplib_swqe *wqe)
2227 {
2228         wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2229         wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2230
2231         /* Need unconditional fence for local invalidate
2232          * opcode to work as expected.
2233          */
2234         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2235
2236         if (wr->send_flags & IB_SEND_SIGNALED)
2237                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2238         if (wr->send_flags & IB_SEND_SOLICITED)
2239                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2240
2241         return 0;
2242 }
2243
2244 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2245                                  struct bnxt_qplib_swqe *wqe)
2246 {
2247         struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2248         struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2249         int access = wr->access;
2250
2251         wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2252         wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2253         wqe->frmr.page_list = mr->pages;
2254         wqe->frmr.page_list_len = mr->npages;
2255         wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2256         wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2257
2258         /* Need unconditional fence for reg_mr
2259          * opcode to function as expected.
2260          */
2261
2262         wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2263
2264         if (wr->wr.send_flags & IB_SEND_SIGNALED)
2265                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2266
2267         if (access & IB_ACCESS_LOCAL_WRITE)
2268                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2269         if (access & IB_ACCESS_REMOTE_READ)
2270                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2271         if (access & IB_ACCESS_REMOTE_WRITE)
2272                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2273         if (access & IB_ACCESS_REMOTE_ATOMIC)
2274                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2275         if (access & IB_ACCESS_MW_BIND)
2276                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2277
2278         wqe->frmr.l_key = wr->key;
2279         wqe->frmr.length = wr->mr->length;
2280         wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2281         wqe->frmr.va = wr->mr->iova;
2282         return 0;
2283 }
2284
2285 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2286                                     struct ib_send_wr *wr,
2287                                     struct bnxt_qplib_swqe *wqe)
2288 {
2289         /*  Copy the inline data to the data  field */
2290         u8 *in_data;
2291         u32 i, sge_len;
2292         void *sge_addr;
2293
2294         in_data = wqe->inline_data;
2295         for (i = 0; i < wr->num_sge; i++) {
2296                 sge_addr = (void *)(unsigned long)
2297                                 wr->sg_list[i].addr;
2298                 sge_len = wr->sg_list[i].length;
2299
2300                 if ((sge_len + wqe->inline_len) >
2301                     BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2302                         dev_err(rdev_to_dev(rdev),
2303                                 "Inline data size requested > supported value");
2304                         return -EINVAL;
2305                 }
2306                 sge_len = wr->sg_list[i].length;
2307
2308                 memcpy(in_data, sge_addr, sge_len);
2309                 in_data += wr->sg_list[i].length;
2310                 wqe->inline_len += wr->sg_list[i].length;
2311         }
2312         return wqe->inline_len;
2313 }
2314
2315 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2316                                    struct ib_send_wr *wr,
2317                                    struct bnxt_qplib_swqe *wqe)
2318 {
2319         int payload_sz = 0;
2320
2321         if (wr->send_flags & IB_SEND_INLINE)
2322                 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2323         else
2324                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2325                                                wqe->num_sge);
2326
2327         return payload_sz;
2328 }
2329
2330 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2331 {
2332         if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2333              qp->ib_qp.qp_type == IB_QPT_GSI ||
2334              qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2335              qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2336                 int qp_attr_mask;
2337                 struct ib_qp_attr qp_attr;
2338
2339                 qp_attr_mask = IB_QP_STATE;
2340                 qp_attr.qp_state = IB_QPS_RTS;
2341                 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2342                 qp->qplib_qp.wqe_cnt = 0;
2343         }
2344 }
2345
2346 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2347                                        struct bnxt_re_qp *qp,
2348                                 struct ib_send_wr *wr)
2349 {
2350         struct bnxt_qplib_swqe wqe;
2351         int rc = 0, payload_sz = 0;
2352         unsigned long flags;
2353
2354         spin_lock_irqsave(&qp->sq_lock, flags);
2355         memset(&wqe, 0, sizeof(wqe));
2356         while (wr) {
2357                 /* House keeping */
2358                 memset(&wqe, 0, sizeof(wqe));
2359
2360                 /* Common */
2361                 wqe.num_sge = wr->num_sge;
2362                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2363                         dev_err(rdev_to_dev(rdev),
2364                                 "Limit exceeded for Send SGEs");
2365                         rc = -EINVAL;
2366                         goto bad;
2367                 }
2368
2369                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2370                 if (payload_sz < 0) {
2371                         rc = -EINVAL;
2372                         goto bad;
2373                 }
2374                 wqe.wr_id = wr->wr_id;
2375
2376                 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2377
2378                 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2379                 if (!rc)
2380                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2381 bad:
2382                 if (rc) {
2383                         dev_err(rdev_to_dev(rdev),
2384                                 "Post send failed opcode = %#x rc = %d",
2385                                 wr->opcode, rc);
2386                         break;
2387                 }
2388                 wr = wr->next;
2389         }
2390         bnxt_qplib_post_send_db(&qp->qplib_qp);
2391         bnxt_ud_qp_hw_stall_workaround(qp);
2392         spin_unlock_irqrestore(&qp->sq_lock, flags);
2393         return rc;
2394 }
2395
2396 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2397                       struct ib_send_wr **bad_wr)
2398 {
2399         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2400         struct bnxt_qplib_swqe wqe;
2401         int rc = 0, payload_sz = 0;
2402         unsigned long flags;
2403
2404         spin_lock_irqsave(&qp->sq_lock, flags);
2405         while (wr) {
2406                 /* House keeping */
2407                 memset(&wqe, 0, sizeof(wqe));
2408
2409                 /* Common */
2410                 wqe.num_sge = wr->num_sge;
2411                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2412                         dev_err(rdev_to_dev(qp->rdev),
2413                                 "Limit exceeded for Send SGEs");
2414                         rc = -EINVAL;
2415                         goto bad;
2416                 }
2417
2418                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2419                 if (payload_sz < 0) {
2420                         rc = -EINVAL;
2421                         goto bad;
2422                 }
2423                 wqe.wr_id = wr->wr_id;
2424
2425                 switch (wr->opcode) {
2426                 case IB_WR_SEND:
2427                 case IB_WR_SEND_WITH_IMM:
2428                         if (ib_qp->qp_type == IB_QPT_GSI) {
2429                                 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2430                                                                payload_sz);
2431                                 if (rc)
2432                                         goto bad;
2433                                 wqe.rawqp1.lflags |=
2434                                         SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2435                         }
2436                         switch (wr->send_flags) {
2437                         case IB_SEND_IP_CSUM:
2438                                 wqe.rawqp1.lflags |=
2439                                         SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2440                                 break;
2441                         default:
2442                                 break;
2443                         }
2444                         /* Fall thru to build the wqe */
2445                 case IB_WR_SEND_WITH_INV:
2446                         rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2447                         break;
2448                 case IB_WR_RDMA_WRITE:
2449                 case IB_WR_RDMA_WRITE_WITH_IMM:
2450                 case IB_WR_RDMA_READ:
2451                         rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2452                         break;
2453                 case IB_WR_ATOMIC_CMP_AND_SWP:
2454                 case IB_WR_ATOMIC_FETCH_AND_ADD:
2455                         rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2456                         break;
2457                 case IB_WR_RDMA_READ_WITH_INV:
2458                         dev_err(rdev_to_dev(qp->rdev),
2459                                 "RDMA Read with Invalidate is not supported");
2460                         rc = -EINVAL;
2461                         goto bad;
2462                 case IB_WR_LOCAL_INV:
2463                         rc = bnxt_re_build_inv_wqe(wr, &wqe);
2464                         break;
2465                 case IB_WR_REG_MR:
2466                         rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2467                         break;
2468                 default:
2469                         /* Unsupported WRs */
2470                         dev_err(rdev_to_dev(qp->rdev),
2471                                 "WR (%#x) is not supported", wr->opcode);
2472                         rc = -EINVAL;
2473                         goto bad;
2474                 }
2475                 if (!rc)
2476                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2477 bad:
2478                 if (rc) {
2479                         dev_err(rdev_to_dev(qp->rdev),
2480                                 "post_send failed op:%#x qps = %#x rc = %d\n",
2481                                 wr->opcode, qp->qplib_qp.state, rc);
2482                         *bad_wr = wr;
2483                         break;
2484                 }
2485                 wr = wr->next;
2486         }
2487         bnxt_qplib_post_send_db(&qp->qplib_qp);
2488         bnxt_ud_qp_hw_stall_workaround(qp);
2489         spin_unlock_irqrestore(&qp->sq_lock, flags);
2490
2491         return rc;
2492 }
2493
2494 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2495                                        struct bnxt_re_qp *qp,
2496                                        struct ib_recv_wr *wr)
2497 {
2498         struct bnxt_qplib_swqe wqe;
2499         int rc = 0;
2500
2501         memset(&wqe, 0, sizeof(wqe));
2502         while (wr) {
2503                 /* House keeping */
2504                 memset(&wqe, 0, sizeof(wqe));
2505
2506                 /* Common */
2507                 wqe.num_sge = wr->num_sge;
2508                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2509                         dev_err(rdev_to_dev(rdev),
2510                                 "Limit exceeded for Receive SGEs");
2511                         rc = -EINVAL;
2512                         break;
2513                 }
2514                 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2515                 wqe.wr_id = wr->wr_id;
2516                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2517
2518                 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2519                 if (rc)
2520                         break;
2521
2522                 wr = wr->next;
2523         }
2524         if (!rc)
2525                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2526         return rc;
2527 }
2528
2529 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2530                       struct ib_recv_wr **bad_wr)
2531 {
2532         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2533         struct bnxt_qplib_swqe wqe;
2534         int rc = 0, payload_sz = 0;
2535         unsigned long flags;
2536         u32 count = 0;
2537
2538         spin_lock_irqsave(&qp->rq_lock, flags);
2539         while (wr) {
2540                 /* House keeping */
2541                 memset(&wqe, 0, sizeof(wqe));
2542
2543                 /* Common */
2544                 wqe.num_sge = wr->num_sge;
2545                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2546                         dev_err(rdev_to_dev(qp->rdev),
2547                                 "Limit exceeded for Receive SGEs");
2548                         rc = -EINVAL;
2549                         *bad_wr = wr;
2550                         break;
2551                 }
2552
2553                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2554                                                wr->num_sge);
2555                 wqe.wr_id = wr->wr_id;
2556                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2557
2558                 if (ib_qp->qp_type == IB_QPT_GSI)
2559                         rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2560                                                               payload_sz);
2561                 if (!rc)
2562                         rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2563                 if (rc) {
2564                         *bad_wr = wr;
2565                         break;
2566                 }
2567
2568                 /* Ring DB if the RQEs posted reaches a threshold value */
2569                 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2570                         bnxt_qplib_post_recv_db(&qp->qplib_qp);
2571                         count = 0;
2572                 }
2573
2574                 wr = wr->next;
2575         }
2576
2577         if (count)
2578                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2579
2580         spin_unlock_irqrestore(&qp->rq_lock, flags);
2581
2582         return rc;
2583 }
2584
2585 /* Completion Queues */
2586 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2587 {
2588         int rc;
2589         struct bnxt_re_cq *cq;
2590         struct bnxt_qplib_nq *nq;
2591         struct bnxt_re_dev *rdev;
2592
2593         cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2594         rdev = cq->rdev;
2595         nq = cq->qplib_cq.nq;
2596
2597         rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2598         if (rc) {
2599                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2600                 return rc;
2601         }
2602         if (!IS_ERR_OR_NULL(cq->umem))
2603                 ib_umem_release(cq->umem);
2604
2605         atomic_dec(&rdev->cq_count);
2606         nq->budget--;
2607         kfree(cq->cql);
2608         kfree(cq);
2609
2610         return 0;
2611 }
2612
2613 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2614                                 const struct ib_cq_init_attr *attr,
2615                                 struct ib_ucontext *context,
2616                                 struct ib_udata *udata)
2617 {
2618         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2619         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2620         struct bnxt_re_cq *cq = NULL;
2621         int rc, entries;
2622         int cqe = attr->cqe;
2623         struct bnxt_qplib_nq *nq = NULL;
2624         unsigned int nq_alloc_cnt;
2625
2626         /* Validate CQ fields */
2627         if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2628                 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2629                 return ERR_PTR(-EINVAL);
2630         }
2631         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2632         if (!cq)
2633                 return ERR_PTR(-ENOMEM);
2634
2635         cq->rdev = rdev;
2636         cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2637
2638         entries = roundup_pow_of_two(cqe + 1);
2639         if (entries > dev_attr->max_cq_wqes + 1)
2640                 entries = dev_attr->max_cq_wqes + 1;
2641
2642         if (context) {
2643                 struct bnxt_re_cq_req req;
2644                 struct bnxt_re_ucontext *uctx = container_of
2645                                                 (context,
2646                                                  struct bnxt_re_ucontext,
2647                                                  ib_uctx);
2648                 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2649                         rc = -EFAULT;
2650                         goto fail;
2651                 }
2652
2653                 cq->umem = ib_umem_get(context, req.cq_va,
2654                                        entries * sizeof(struct cq_base),
2655                                        IB_ACCESS_LOCAL_WRITE, 1);
2656                 if (IS_ERR(cq->umem)) {
2657                         rc = PTR_ERR(cq->umem);
2658                         goto fail;
2659                 }
2660                 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2661                 cq->qplib_cq.nmap = cq->umem->nmap;
2662                 cq->qplib_cq.dpi = &uctx->dpi;
2663         } else {
2664                 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2665                 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2666                                   GFP_KERNEL);
2667                 if (!cq->cql) {
2668                         rc = -ENOMEM;
2669                         goto fail;
2670                 }
2671
2672                 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2673                 cq->qplib_cq.sghead = NULL;
2674                 cq->qplib_cq.nmap = 0;
2675         }
2676         /*
2677          * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2678          * used for getting the NQ index.
2679          */
2680         nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2681         nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2682         cq->qplib_cq.max_wqe = entries;
2683         cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2684         cq->qplib_cq.nq = nq;
2685
2686         rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2687         if (rc) {
2688                 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2689                 goto fail;
2690         }
2691
2692         cq->ib_cq.cqe = entries;
2693         cq->cq_period = cq->qplib_cq.period;
2694         nq->budget++;
2695
2696         atomic_inc(&rdev->cq_count);
2697
2698         if (context) {
2699                 struct bnxt_re_cq_resp resp;
2700
2701                 resp.cqid = cq->qplib_cq.id;
2702                 resp.tail = cq->qplib_cq.hwq.cons;
2703                 resp.phase = cq->qplib_cq.period;
2704                 resp.rsvd = 0;
2705                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2706                 if (rc) {
2707                         dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2708                         bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2709                         goto c2fail;
2710                 }
2711         }
2712
2713         return &cq->ib_cq;
2714
2715 c2fail:
2716         if (context)
2717                 ib_umem_release(cq->umem);
2718 fail:
2719         kfree(cq->cql);
2720         kfree(cq);
2721         return ERR_PTR(rc);
2722 }
2723
2724 static u8 __req_to_ib_wc_status(u8 qstatus)
2725 {
2726         switch (qstatus) {
2727         case CQ_REQ_STATUS_OK:
2728                 return IB_WC_SUCCESS;
2729         case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2730                 return IB_WC_BAD_RESP_ERR;
2731         case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2732                 return IB_WC_LOC_LEN_ERR;
2733         case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2734                 return IB_WC_LOC_QP_OP_ERR;
2735         case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2736                 return IB_WC_LOC_PROT_ERR;
2737         case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2738                 return IB_WC_GENERAL_ERR;
2739         case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2740                 return IB_WC_REM_INV_REQ_ERR;
2741         case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2742                 return IB_WC_REM_ACCESS_ERR;
2743         case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2744                 return IB_WC_REM_OP_ERR;
2745         case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2746                 return IB_WC_RNR_RETRY_EXC_ERR;
2747         case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2748                 return IB_WC_RETRY_EXC_ERR;
2749         case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2750                 return IB_WC_WR_FLUSH_ERR;
2751         default:
2752                 return IB_WC_GENERAL_ERR;
2753         }
2754         return 0;
2755 }
2756
2757 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2758 {
2759         switch (qstatus) {
2760         case CQ_RES_RAWETH_QP1_STATUS_OK:
2761                 return IB_WC_SUCCESS;
2762         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2763                 return IB_WC_LOC_ACCESS_ERR;
2764         case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2765                 return IB_WC_LOC_LEN_ERR;
2766         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2767                 return IB_WC_LOC_PROT_ERR;
2768         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2769                 return IB_WC_LOC_QP_OP_ERR;
2770         case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2771                 return IB_WC_GENERAL_ERR;
2772         case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2773                 return IB_WC_WR_FLUSH_ERR;
2774         case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2775                 return IB_WC_WR_FLUSH_ERR;
2776         default:
2777                 return IB_WC_GENERAL_ERR;
2778         }
2779 }
2780
2781 static u8 __rc_to_ib_wc_status(u8 qstatus)
2782 {
2783         switch (qstatus) {
2784         case CQ_RES_RC_STATUS_OK:
2785                 return IB_WC_SUCCESS;
2786         case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2787                 return IB_WC_LOC_ACCESS_ERR;
2788         case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2789                 return IB_WC_LOC_LEN_ERR;
2790         case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2791                 return IB_WC_LOC_PROT_ERR;
2792         case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2793                 return IB_WC_LOC_QP_OP_ERR;
2794         case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2795                 return IB_WC_GENERAL_ERR;
2796         case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2797                 return IB_WC_REM_INV_REQ_ERR;
2798         case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2799                 return IB_WC_WR_FLUSH_ERR;
2800         case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2801                 return IB_WC_WR_FLUSH_ERR;
2802         default:
2803                 return IB_WC_GENERAL_ERR;
2804         }
2805 }
2806
2807 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2808 {
2809         switch (cqe->type) {
2810         case BNXT_QPLIB_SWQE_TYPE_SEND:
2811                 wc->opcode = IB_WC_SEND;
2812                 break;
2813         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2814                 wc->opcode = IB_WC_SEND;
2815                 wc->wc_flags |= IB_WC_WITH_IMM;
2816                 break;
2817         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2818                 wc->opcode = IB_WC_SEND;
2819                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2820                 break;
2821         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2822                 wc->opcode = IB_WC_RDMA_WRITE;
2823                 break;
2824         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2825                 wc->opcode = IB_WC_RDMA_WRITE;
2826                 wc->wc_flags |= IB_WC_WITH_IMM;
2827                 break;
2828         case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2829                 wc->opcode = IB_WC_RDMA_READ;
2830                 break;
2831         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2832                 wc->opcode = IB_WC_COMP_SWAP;
2833                 break;
2834         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2835                 wc->opcode = IB_WC_FETCH_ADD;
2836                 break;
2837         case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2838                 wc->opcode = IB_WC_LOCAL_INV;
2839                 break;
2840         case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2841                 wc->opcode = IB_WC_REG_MR;
2842                 break;
2843         default:
2844                 wc->opcode = IB_WC_SEND;
2845                 break;
2846         }
2847
2848         wc->status = __req_to_ib_wc_status(cqe->status);
2849 }
2850
2851 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2852                                      u16 raweth_qp1_flags2)
2853 {
2854         bool is_ipv6 = false, is_ipv4 = false;
2855
2856         /* raweth_qp1_flags Bit 9-6 indicates itype */
2857         if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2858             != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2859                 return -1;
2860
2861         if (raweth_qp1_flags2 &
2862             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2863             raweth_qp1_flags2 &
2864             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2865                 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2866                 (raweth_qp1_flags2 &
2867                  CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2868                         (is_ipv6 = true) : (is_ipv4 = true);
2869                 return ((is_ipv6) ?
2870                          BNXT_RE_ROCEV2_IPV6_PACKET :
2871                          BNXT_RE_ROCEV2_IPV4_PACKET);
2872         } else {
2873                 return BNXT_RE_ROCE_V1_PACKET;
2874         }
2875 }
2876
2877 static int bnxt_re_to_ib_nw_type(int nw_type)
2878 {
2879         u8 nw_hdr_type = 0xFF;
2880
2881         switch (nw_type) {
2882         case BNXT_RE_ROCE_V1_PACKET:
2883                 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2884                 break;
2885         case BNXT_RE_ROCEV2_IPV4_PACKET:
2886                 nw_hdr_type = RDMA_NETWORK_IPV4;
2887                 break;
2888         case BNXT_RE_ROCEV2_IPV6_PACKET:
2889                 nw_hdr_type = RDMA_NETWORK_IPV6;
2890                 break;
2891         }
2892         return nw_hdr_type;
2893 }
2894
2895 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2896                                        void *rq_hdr_buf)
2897 {
2898         u8 *tmp_buf = NULL;
2899         struct ethhdr *eth_hdr;
2900         u16 eth_type;
2901         bool rc = false;
2902
2903         tmp_buf = (u8 *)rq_hdr_buf;
2904         /*
2905          * If dest mac is not same as I/F mac, this could be a
2906          * loopback address or multicast address, check whether
2907          * it is a loopback packet
2908          */
2909         if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2910                 tmp_buf += 4;
2911                 /* Check the  ether type */
2912                 eth_hdr = (struct ethhdr *)tmp_buf;
2913                 eth_type = ntohs(eth_hdr->h_proto);
2914                 switch (eth_type) {
2915                 case ETH_P_IBOE:
2916                         rc = true;
2917                         break;
2918                 case ETH_P_IP:
2919                 case ETH_P_IPV6: {
2920                         u32 len;
2921                         struct udphdr *udp_hdr;
2922
2923                         len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2924                                                       sizeof(struct ipv6hdr));
2925                         tmp_buf += sizeof(struct ethhdr) + len;
2926                         udp_hdr = (struct udphdr *)tmp_buf;
2927                         if (ntohs(udp_hdr->dest) ==
2928                                     ROCE_V2_UDP_DPORT)
2929                                 rc = true;
2930                         break;
2931                         }
2932                 default:
2933                         break;
2934                 }
2935         }
2936
2937         return rc;
2938 }
2939
2940 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2941                                          struct bnxt_qplib_cqe *cqe)
2942 {
2943         struct bnxt_re_dev *rdev = qp1_qp->rdev;
2944         struct bnxt_re_sqp_entries *sqp_entry = NULL;
2945         struct bnxt_re_qp *qp = rdev->qp1_sqp;
2946         struct ib_send_wr *swr;
2947         struct ib_ud_wr udwr;
2948         struct ib_recv_wr rwr;
2949         int pkt_type = 0;
2950         u32 tbl_idx;
2951         void *rq_hdr_buf;
2952         dma_addr_t rq_hdr_buf_map;
2953         dma_addr_t shrq_hdr_buf_map;
2954         u32 offset = 0;
2955         u32 skip_bytes = 0;
2956         struct ib_sge s_sge[2];
2957         struct ib_sge r_sge[2];
2958         int rc;
2959
2960         memset(&udwr, 0, sizeof(udwr));
2961         memset(&rwr, 0, sizeof(rwr));
2962         memset(&s_sge, 0, sizeof(s_sge));
2963         memset(&r_sge, 0, sizeof(r_sge));
2964
2965         swr = &udwr.wr;
2966         tbl_idx = cqe->wr_id;
2967
2968         rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2969                         (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2970         rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2971                                                           tbl_idx);
2972
2973         /* Shadow QP header buffer */
2974         shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2975                                                             tbl_idx);
2976         sqp_entry = &rdev->sqp_tbl[tbl_idx];
2977
2978         /* Store this cqe */
2979         memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2980         sqp_entry->qp1_qp = qp1_qp;
2981
2982         /* Find packet type from the cqe */
2983
2984         pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2985                                              cqe->raweth_qp1_flags2);
2986         if (pkt_type < 0) {
2987                 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2988                 return -EINVAL;
2989         }
2990
2991         /* Adjust the offset for the user buffer and post in the rq */
2992
2993         if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2994                 offset = 20;
2995
2996         /*
2997          * QP1 loopback packet has 4 bytes of internal header before
2998          * ether header. Skip these four bytes.
2999          */
3000         if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3001                 skip_bytes = 4;
3002
3003         /* First send SGE . Skip the ether header*/
3004         s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3005                         + skip_bytes;
3006         s_sge[0].lkey = 0xFFFFFFFF;
3007         s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3008                                 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3009
3010         /* Second Send SGE */
3011         s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3012                         BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3013         if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3014                 s_sge[1].addr += 8;
3015         s_sge[1].lkey = 0xFFFFFFFF;
3016         s_sge[1].length = 256;
3017
3018         /* First recv SGE */
3019
3020         r_sge[0].addr = shrq_hdr_buf_map;
3021         r_sge[0].lkey = 0xFFFFFFFF;
3022         r_sge[0].length = 40;
3023
3024         r_sge[1].addr = sqp_entry->sge.addr + offset;
3025         r_sge[1].lkey = sqp_entry->sge.lkey;
3026         r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3027
3028         /* Create receive work request */
3029         rwr.num_sge = 2;
3030         rwr.sg_list = r_sge;
3031         rwr.wr_id = tbl_idx;
3032         rwr.next = NULL;
3033
3034         rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
3035         if (rc) {
3036                 dev_err(rdev_to_dev(rdev),
3037                         "Failed to post Rx buffers to shadow QP");
3038                 return -ENOMEM;
3039         }
3040
3041         swr->num_sge = 2;
3042         swr->sg_list = s_sge;
3043         swr->wr_id = tbl_idx;
3044         swr->opcode = IB_WR_SEND;
3045         swr->next = NULL;
3046
3047         udwr.ah = &rdev->sqp_ah->ib_ah;
3048         udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3049         udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3050
3051         /* post data received  in the send queue */
3052         rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3053
3054         return 0;
3055 }
3056
3057 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3058                                           struct bnxt_qplib_cqe *cqe)
3059 {
3060         wc->opcode = IB_WC_RECV;
3061         wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3062         wc->wc_flags |= IB_WC_GRH;
3063 }
3064
3065 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3066                                 u16 *vid, u8 *sl)
3067 {
3068         bool ret = false;
3069         u32 metadata;
3070         u16 tpid;
3071
3072         metadata = orig_cqe->raweth_qp1_metadata;
3073         if (orig_cqe->raweth_qp1_flags2 &
3074                 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3075                 tpid = ((metadata &
3076                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3077                          CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3078                 if (tpid == ETH_P_8021Q) {
3079                         *vid = metadata &
3080                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3081                         *sl = (metadata &
3082                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3083                                CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3084                         ret = true;
3085                 }
3086         }
3087
3088         return ret;
3089 }
3090
3091 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3092                                       struct bnxt_qplib_cqe *cqe)
3093 {
3094         wc->opcode = IB_WC_RECV;
3095         wc->status = __rc_to_ib_wc_status(cqe->status);
3096
3097         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3098                 wc->wc_flags |= IB_WC_WITH_IMM;
3099         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3100                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3101         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3102             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3103                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3104 }
3105
3106 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3107                                              struct ib_wc *wc,
3108                                              struct bnxt_qplib_cqe *cqe)
3109 {
3110         struct bnxt_re_dev *rdev = qp->rdev;
3111         struct bnxt_re_qp *qp1_qp = NULL;
3112         struct bnxt_qplib_cqe *orig_cqe = NULL;
3113         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3114         int nw_type;
3115         u32 tbl_idx;
3116         u16 vlan_id;
3117         u8 sl;
3118
3119         tbl_idx = cqe->wr_id;
3120
3121         sqp_entry = &rdev->sqp_tbl[tbl_idx];
3122         qp1_qp = sqp_entry->qp1_qp;
3123         orig_cqe = &sqp_entry->cqe;
3124
3125         wc->wr_id = sqp_entry->wrid;
3126         wc->byte_len = orig_cqe->length;
3127         wc->qp = &qp1_qp->ib_qp;
3128
3129         wc->ex.imm_data = orig_cqe->immdata;
3130         wc->src_qp = orig_cqe->src_qp;
3131         memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3132         if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3133                 wc->vlan_id = vlan_id;
3134                 wc->sl = sl;
3135                 wc->wc_flags |= IB_WC_WITH_VLAN;
3136         }
3137         wc->port_num = 1;
3138         wc->vendor_err = orig_cqe->status;
3139
3140         wc->opcode = IB_WC_RECV;
3141         wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3142         wc->wc_flags |= IB_WC_GRH;
3143
3144         nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3145                                             orig_cqe->raweth_qp1_flags2);
3146         if (nw_type >= 0) {
3147                 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3148                 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3149         }
3150 }
3151
3152 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3153                                       struct bnxt_qplib_cqe *cqe)
3154 {
3155         wc->opcode = IB_WC_RECV;
3156         wc->status = __rc_to_ib_wc_status(cqe->status);
3157
3158         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3159                 wc->wc_flags |= IB_WC_WITH_IMM;
3160         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3161                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3162         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3163             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3164                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3165 }
3166
3167 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3168 {
3169         struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3170         unsigned long flags;
3171         int rc = 0;
3172
3173         spin_lock_irqsave(&qp->sq_lock, flags);
3174
3175         rc = bnxt_re_bind_fence_mw(lib_qp);
3176         if (!rc) {
3177                 lib_qp->sq.phantom_wqe_cnt++;
3178                 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3179                         "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3180                         lib_qp->id, lib_qp->sq.hwq.prod,
3181                         HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3182                         lib_qp->sq.phantom_wqe_cnt);
3183         }
3184
3185         spin_unlock_irqrestore(&qp->sq_lock, flags);
3186         return rc;
3187 }
3188
3189 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3190 {
3191         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3192         struct bnxt_re_qp *qp;
3193         struct bnxt_qplib_cqe *cqe;
3194         int i, ncqe, budget;
3195         struct bnxt_qplib_q *sq;
3196         struct bnxt_qplib_qp *lib_qp;
3197         u32 tbl_idx;
3198         struct bnxt_re_sqp_entries *sqp_entry = NULL;
3199         unsigned long flags;
3200
3201         spin_lock_irqsave(&cq->cq_lock, flags);
3202         budget = min_t(u32, num_entries, cq->max_cql);
3203         num_entries = budget;
3204         if (!cq->cql) {
3205                 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3206                 goto exit;
3207         }
3208         cqe = &cq->cql[0];
3209         while (budget) {
3210                 lib_qp = NULL;
3211                 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3212                 if (lib_qp) {
3213                         sq = &lib_qp->sq;
3214                         if (sq->send_phantom) {
3215                                 qp = container_of(lib_qp,
3216                                                   struct bnxt_re_qp, qplib_qp);
3217                                 if (send_phantom_wqe(qp) == -ENOMEM)
3218                                         dev_err(rdev_to_dev(cq->rdev),
3219                                                 "Phantom failed! Scheduled to send again\n");
3220                                 else
3221                                         sq->send_phantom = false;
3222                         }
3223                 }
3224                 if (ncqe < budget)
3225                         ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3226                                                               cqe + ncqe,
3227                                                               budget - ncqe);
3228
3229                 if (!ncqe)
3230                         break;
3231
3232                 for (i = 0; i < ncqe; i++, cqe++) {
3233                         /* Transcribe each qplib_wqe back to ib_wc */
3234                         memset(wc, 0, sizeof(*wc));
3235
3236                         wc->wr_id = cqe->wr_id;
3237                         wc->byte_len = cqe->length;
3238                         qp = container_of
3239                                 ((struct bnxt_qplib_qp *)
3240                                  (unsigned long)(cqe->qp_handle),
3241                                  struct bnxt_re_qp, qplib_qp);
3242                         if (!qp) {
3243                                 dev_err(rdev_to_dev(cq->rdev),
3244                                         "POLL CQ : bad QP handle");
3245                                 continue;
3246                         }
3247                         wc->qp = &qp->ib_qp;
3248                         wc->ex.imm_data = cqe->immdata;
3249                         wc->src_qp = cqe->src_qp;
3250                         memcpy(wc->smac, cqe->smac, ETH_ALEN);
3251                         wc->port_num = 1;
3252                         wc->vendor_err = cqe->status;
3253
3254                         switch (cqe->opcode) {
3255                         case CQ_BASE_CQE_TYPE_REQ:
3256                                 if (qp->qplib_qp.id ==
3257                                     qp->rdev->qp1_sqp->qplib_qp.id) {
3258                                         /* Handle this completion with
3259                                          * the stored completion
3260                                          */
3261                                         memset(wc, 0, sizeof(*wc));
3262                                         continue;
3263                                 }
3264                                 bnxt_re_process_req_wc(wc, cqe);
3265                                 break;
3266                         case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3267                                 if (!cqe->status) {
3268                                         int rc = 0;
3269
3270                                         rc = bnxt_re_process_raw_qp_pkt_rx
3271                                                                 (qp, cqe);
3272                                         if (!rc) {
3273                                                 memset(wc, 0, sizeof(*wc));
3274                                                 continue;
3275                                         }
3276                                         cqe->status = -1;
3277                                 }
3278                                 /* Errors need not be looped back.
3279                                  * But change the wr_id to the one
3280                                  * stored in the table
3281                                  */
3282                                 tbl_idx = cqe->wr_id;
3283                                 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3284                                 wc->wr_id = sqp_entry->wrid;
3285                                 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3286                                 break;
3287                         case CQ_BASE_CQE_TYPE_RES_RC:
3288                                 bnxt_re_process_res_rc_wc(wc, cqe);
3289                                 break;
3290                         case CQ_BASE_CQE_TYPE_RES_UD:
3291                                 if (qp->qplib_qp.id ==
3292                                     qp->rdev->qp1_sqp->qplib_qp.id) {
3293                                         /* Handle this completion with
3294                                          * the stored completion
3295                                          */
3296                                         if (cqe->status) {
3297                                                 continue;
3298                                         } else {
3299                                                 bnxt_re_process_res_shadow_qp_wc
3300                                                                 (qp, wc, cqe);
3301                                                 break;
3302                                         }
3303                                 }
3304                                 bnxt_re_process_res_ud_wc(wc, cqe);
3305                                 break;
3306                         default:
3307                                 dev_err(rdev_to_dev(cq->rdev),
3308                                         "POLL CQ : type 0x%x not handled",
3309                                         cqe->opcode);
3310                                 continue;
3311                         }
3312                         wc++;
3313                         budget--;
3314                 }
3315         }
3316 exit:
3317         spin_unlock_irqrestore(&cq->cq_lock, flags);
3318         return num_entries - budget;
3319 }
3320
3321 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3322                           enum ib_cq_notify_flags ib_cqn_flags)
3323 {
3324         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3325         int type = 0, rc = 0;
3326         unsigned long flags;
3327
3328         spin_lock_irqsave(&cq->cq_lock, flags);
3329         /* Trigger on the very next completion */
3330         if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3331                 type = DBR_DBR_TYPE_CQ_ARMALL;
3332         /* Trigger on the next solicited completion */
3333         else if (ib_cqn_flags & IB_CQ_SOLICITED)
3334                 type = DBR_DBR_TYPE_CQ_ARMSE;
3335
3336         /* Poll to see if there are missed events */
3337         if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3338             !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3339                 rc = 1;
3340                 goto exit;
3341         }
3342         bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3343
3344 exit:
3345         spin_unlock_irqrestore(&cq->cq_lock, flags);
3346         return rc;
3347 }
3348
3349 /* Memory Regions */
3350 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3351 {
3352         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3353         struct bnxt_re_dev *rdev = pd->rdev;
3354         struct bnxt_re_mr *mr;
3355         u64 pbl = 0;
3356         int rc;
3357
3358         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3359         if (!mr)
3360                 return ERR_PTR(-ENOMEM);
3361
3362         mr->rdev = rdev;
3363         mr->qplib_mr.pd = &pd->qplib_pd;
3364         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3365         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3366
3367         /* Allocate and register 0 as the address */
3368         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3369         if (rc)
3370                 goto fail;
3371
3372         mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3373         mr->qplib_mr.total_size = -1; /* Infinte length */
3374         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3375                                PAGE_SIZE);
3376         if (rc)
3377                 goto fail_mr;
3378
3379         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3380         if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3381                                IB_ACCESS_REMOTE_ATOMIC))
3382                 mr->ib_mr.rkey = mr->ib_mr.lkey;
3383         atomic_inc(&rdev->mr_count);
3384
3385         return &mr->ib_mr;
3386
3387 fail_mr:
3388         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3389 fail:
3390         kfree(mr);
3391         return ERR_PTR(rc);
3392 }
3393
3394 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3395 {
3396         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3397         struct bnxt_re_dev *rdev = mr->rdev;
3398         int rc;
3399
3400         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3401         if (rc)
3402                 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3403
3404         if (mr->pages) {
3405                 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3406                                                         &mr->qplib_frpl);
3407                 kfree(mr->pages);
3408                 mr->npages = 0;
3409                 mr->pages = NULL;
3410         }
3411         if (!IS_ERR_OR_NULL(mr->ib_umem))
3412                 ib_umem_release(mr->ib_umem);
3413
3414         kfree(mr);
3415         atomic_dec(&rdev->mr_count);
3416         return rc;
3417 }
3418
3419 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3420 {
3421         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3422
3423         if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3424                 return -ENOMEM;
3425
3426         mr->pages[mr->npages++] = addr;
3427         return 0;
3428 }
3429
3430 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3431                       unsigned int *sg_offset)
3432 {
3433         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3434
3435         mr->npages = 0;
3436         return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3437 }
3438
3439 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3440                                u32 max_num_sg)
3441 {
3442         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3443         struct bnxt_re_dev *rdev = pd->rdev;
3444         struct bnxt_re_mr *mr = NULL;
3445         int rc;
3446
3447         if (type != IB_MR_TYPE_MEM_REG) {
3448                 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3449                 return ERR_PTR(-EINVAL);
3450         }
3451         if (max_num_sg > MAX_PBL_LVL_1_PGS)
3452                 return ERR_PTR(-EINVAL);
3453
3454         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3455         if (!mr)
3456                 return ERR_PTR(-ENOMEM);
3457
3458         mr->rdev = rdev;
3459         mr->qplib_mr.pd = &pd->qplib_pd;
3460         mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3461         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3462
3463         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3464         if (rc)
3465                 goto bail;
3466
3467         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3468         mr->ib_mr.rkey = mr->ib_mr.lkey;
3469
3470         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3471         if (!mr->pages) {
3472                 rc = -ENOMEM;
3473                 goto fail;
3474         }
3475         rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3476                                                  &mr->qplib_frpl, max_num_sg);
3477         if (rc) {
3478                 dev_err(rdev_to_dev(rdev),
3479                         "Failed to allocate HW FR page list");
3480                 goto fail_mr;
3481         }
3482
3483         atomic_inc(&rdev->mr_count);
3484         return &mr->ib_mr;
3485
3486 fail_mr:
3487         kfree(mr->pages);
3488 fail:
3489         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3490 bail:
3491         kfree(mr);
3492         return ERR_PTR(rc);
3493 }
3494
3495 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3496                                struct ib_udata *udata)
3497 {
3498         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3499         struct bnxt_re_dev *rdev = pd->rdev;
3500         struct bnxt_re_mw *mw;
3501         int rc;
3502
3503         mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3504         if (!mw)
3505                 return ERR_PTR(-ENOMEM);
3506         mw->rdev = rdev;
3507         mw->qplib_mw.pd = &pd->qplib_pd;
3508
3509         mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3510                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3511                                CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3512         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3513         if (rc) {
3514                 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3515                 goto fail;
3516         }
3517         mw->ib_mw.rkey = mw->qplib_mw.rkey;
3518
3519         atomic_inc(&rdev->mw_count);
3520         return &mw->ib_mw;
3521
3522 fail:
3523         kfree(mw);
3524         return ERR_PTR(rc);
3525 }
3526
3527 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3528 {
3529         struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3530         struct bnxt_re_dev *rdev = mw->rdev;
3531         int rc;
3532
3533         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3534         if (rc) {
3535                 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3536                 return rc;
3537         }
3538
3539         kfree(mw);
3540         atomic_dec(&rdev->mw_count);
3541         return rc;
3542 }
3543
3544 static int bnxt_re_page_size_ok(int page_shift)
3545 {
3546         switch (page_shift) {
3547         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3548         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3549         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3550         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3551         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3552         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3553         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3554         case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3555                 return 1;
3556         default:
3557                 return 0;
3558         }
3559 }
3560
3561 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3562                              int page_shift)
3563 {
3564         u64 *pbl_tbl = pbl_tbl_orig;
3565         u64 paddr;
3566         u64 page_mask = (1ULL << page_shift) - 1;
3567         int i, pages;
3568         struct scatterlist *sg;
3569         int entry;
3570
3571         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3572                 pages = sg_dma_len(sg) >> PAGE_SHIFT;
3573                 for (i = 0; i < pages; i++) {
3574                         paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3575                         if (pbl_tbl == pbl_tbl_orig)
3576                                 *pbl_tbl++ = paddr & ~page_mask;
3577                         else if ((paddr & page_mask) == 0)
3578                                 *pbl_tbl++ = paddr;
3579                 }
3580         }
3581         return pbl_tbl - pbl_tbl_orig;
3582 }
3583
3584 /* uverbs */
3585 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3586                                   u64 virt_addr, int mr_access_flags,
3587                                   struct ib_udata *udata)
3588 {
3589         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3590         struct bnxt_re_dev *rdev = pd->rdev;
3591         struct bnxt_re_mr *mr;
3592         struct ib_umem *umem;
3593         u64 *pbl_tbl = NULL;
3594         int umem_pgs, page_shift, rc;
3595
3596         if (length > BNXT_RE_MAX_MR_SIZE) {
3597                 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3598                         length, BNXT_RE_MAX_MR_SIZE);
3599                 return ERR_PTR(-ENOMEM);
3600         }
3601
3602         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3603         if (!mr)
3604                 return ERR_PTR(-ENOMEM);
3605
3606         mr->rdev = rdev;
3607         mr->qplib_mr.pd = &pd->qplib_pd;
3608         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3609         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3610
3611         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3612         if (rc) {
3613                 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3614                 goto free_mr;
3615         }
3616         /* The fixed portion of the rkey is the same as the lkey */
3617         mr->ib_mr.rkey = mr->qplib_mr.rkey;
3618
3619         umem = ib_umem_get(ib_pd->uobject->context, start, length,
3620                            mr_access_flags, 0);
3621         if (IS_ERR(umem)) {
3622                 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3623                 rc = -EFAULT;
3624                 goto free_mrw;
3625         }
3626         mr->ib_umem = umem;
3627
3628         mr->qplib_mr.va = virt_addr;
3629         umem_pgs = ib_umem_page_count(umem);
3630         if (!umem_pgs) {
3631                 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3632                 rc = -EINVAL;
3633                 goto free_umem;
3634         }
3635         mr->qplib_mr.total_size = length;
3636
3637         pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3638         if (!pbl_tbl) {
3639                 rc = -ENOMEM;
3640                 goto free_umem;
3641         }
3642
3643         page_shift = umem->page_shift;
3644
3645         if (!bnxt_re_page_size_ok(page_shift)) {
3646                 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3647                 rc = -EFAULT;
3648                 goto fail;
3649         }
3650
3651         if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3652                 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3653                         length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3654                 rc = -EINVAL;
3655                 goto fail;
3656         }
3657         if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3658                 page_shift = BNXT_RE_PAGE_SHIFT_2M;
3659                 dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3660                          1 << page_shift);
3661         }
3662
3663         /* Map umem buf ptrs to the PBL */
3664         umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3665         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3666                                umem_pgs, false, 1 << page_shift);
3667         if (rc) {
3668                 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3669                 goto fail;
3670         }
3671
3672         kfree(pbl_tbl);
3673
3674         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3675         mr->ib_mr.rkey = mr->qplib_mr.lkey;
3676         atomic_inc(&rdev->mr_count);
3677
3678         return &mr->ib_mr;
3679 fail:
3680         kfree(pbl_tbl);
3681 free_umem:
3682         ib_umem_release(umem);
3683 free_mrw:
3684         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3685 free_mr:
3686         kfree(mr);
3687         return ERR_PTR(rc);
3688 }
3689
3690 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3691                                            struct ib_udata *udata)
3692 {
3693         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3694         struct bnxt_re_uctx_resp resp;
3695         struct bnxt_re_ucontext *uctx;
3696         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3697         int rc;
3698
3699         dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3700                 ibdev->uverbs_abi_ver);
3701
3702         if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3703                 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3704                         BNXT_RE_ABI_VERSION);
3705                 return ERR_PTR(-EPERM);
3706         }
3707
3708         uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3709         if (!uctx)
3710                 return ERR_PTR(-ENOMEM);
3711
3712         uctx->rdev = rdev;
3713
3714         uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3715         if (!uctx->shpg) {
3716                 rc = -ENOMEM;
3717                 goto fail;
3718         }
3719         spin_lock_init(&uctx->sh_lock);
3720
3721         resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3722         resp.max_qp = rdev->qplib_ctx.qpc_count;
3723         resp.pg_size = PAGE_SIZE;
3724         resp.cqe_sz = sizeof(struct cq_base);
3725         resp.max_cqd = dev_attr->max_cq_wqes;
3726         resp.rsvd    = 0;
3727
3728         rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3729         if (rc) {
3730                 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3731                 rc = -EFAULT;
3732                 goto cfail;
3733         }
3734
3735         return &uctx->ib_uctx;
3736 cfail:
3737         free_page((unsigned long)uctx->shpg);
3738         uctx->shpg = NULL;
3739 fail:
3740         kfree(uctx);
3741         return ERR_PTR(rc);
3742 }
3743
3744 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3745 {
3746         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3747                                                    struct bnxt_re_ucontext,
3748                                                    ib_uctx);
3749
3750         struct bnxt_re_dev *rdev = uctx->rdev;
3751         int rc = 0;
3752
3753         if (uctx->shpg)
3754                 free_page((unsigned long)uctx->shpg);
3755
3756         if (uctx->dpi.dbr) {
3757                 /* Free DPI only if this is the first PD allocated by the
3758                  * application and mark the context dpi as NULL
3759                  */
3760                 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3761                                             &rdev->qplib_res.dpi_tbl,
3762                                             &uctx->dpi);
3763                 if (rc)
3764                         dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3765                         /* Don't fail, continue*/
3766                 uctx->dpi.dbr = NULL;
3767         }
3768
3769         kfree(uctx);
3770         return 0;
3771 }
3772
3773 /* Helper function to mmap the virtual memory from user app */
3774 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3775 {
3776         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3777                                                    struct bnxt_re_ucontext,
3778                                                    ib_uctx);
3779         struct bnxt_re_dev *rdev = uctx->rdev;
3780         u64 pfn;
3781
3782         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3783                 return -EINVAL;
3784
3785         if (vma->vm_pgoff) {
3786                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3787                 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3788                                        PAGE_SIZE, vma->vm_page_prot)) {
3789                         dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3790                         return -EAGAIN;
3791                 }
3792         } else {
3793                 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3794                 if (remap_pfn_range(vma, vma->vm_start,
3795                                     pfn, PAGE_SIZE, vma->vm_page_prot)) {
3796                         dev_err(rdev_to_dev(rdev),
3797                                 "Failed to map shared page");
3798                         return -EAGAIN;
3799                 }
3800         }
3801
3802         return 0;
3803 }