Merge branch 'vmwgfx-fixes-5.1' of git://people.freedesktop.org/~thomash/linux into...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
50
51 #include "ocrdma.h"
52 #include "ocrdma_hw.h"
53 #include "ocrdma_verbs.h"
54 #include <rdma/ocrdma-abi.h>
55
56 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57 {
58         if (index > 0)
59                 return -EINVAL;
60
61         *pkey = 0xffff;
62         return 0;
63 }
64
65 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
66                         struct ib_udata *uhw)
67 {
68         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
69
70         if (uhw->inlen || uhw->outlen)
71                 return -EINVAL;
72
73         memset(attr, 0, sizeof *attr);
74         memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
75                min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
76         ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
77         attr->max_mr_size = dev->attr.max_mr_size;
78         attr->page_size_cap = 0xffff000;
79         attr->vendor_id = dev->nic_info.pdev->vendor;
80         attr->vendor_part_id = dev->nic_info.pdev->device;
81         attr->hw_ver = dev->asic_id;
82         attr->max_qp = dev->attr.max_qp;
83         attr->max_ah = OCRDMA_MAX_AH;
84         attr->max_qp_wr = dev->attr.max_wqe;
85
86         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
87                                         IB_DEVICE_RC_RNR_NAK_GEN |
88                                         IB_DEVICE_SHUTDOWN_PORT |
89                                         IB_DEVICE_SYS_IMAGE_GUID |
90                                         IB_DEVICE_LOCAL_DMA_LKEY |
91                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
92         attr->max_send_sge = dev->attr.max_send_sge;
93         attr->max_recv_sge = dev->attr.max_recv_sge;
94         attr->max_sge_rd = dev->attr.max_rdma_sge;
95         attr->max_cq = dev->attr.max_cq;
96         attr->max_cqe = dev->attr.max_cqe;
97         attr->max_mr = dev->attr.max_mr;
98         attr->max_mw = dev->attr.max_mw;
99         attr->max_pd = dev->attr.max_pd;
100         attr->atomic_cap = 0;
101         attr->max_fmr = 0;
102         attr->max_map_per_fmr = 0;
103         attr->max_qp_rd_atom =
104             min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
105         attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
106         attr->max_srq = dev->attr.max_srq;
107         attr->max_srq_sge = dev->attr.max_srq_sge;
108         attr->max_srq_wr = dev->attr.max_rqe;
109         attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
110         attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
111         attr->max_pkeys = 1;
112         return 0;
113 }
114
115 struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
116 {
117         struct ocrdma_dev *dev;
118         struct net_device *ndev = NULL;
119
120         rcu_read_lock();
121
122         dev = get_ocrdma_dev(ibdev);
123         if (dev)
124                 ndev = dev->nic_info.netdev;
125         if (ndev)
126                 dev_hold(ndev);
127
128         rcu_read_unlock();
129
130         return ndev;
131 }
132
133 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
134                                             u8 *ib_speed, u8 *ib_width)
135 {
136         int status;
137         u8 speed;
138
139         status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
140         if (status)
141                 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
142
143         switch (speed) {
144         case OCRDMA_PHYS_LINK_SPEED_1GBPS:
145                 *ib_speed = IB_SPEED_SDR;
146                 *ib_width = IB_WIDTH_1X;
147                 break;
148
149         case OCRDMA_PHYS_LINK_SPEED_10GBPS:
150                 *ib_speed = IB_SPEED_QDR;
151                 *ib_width = IB_WIDTH_1X;
152                 break;
153
154         case OCRDMA_PHYS_LINK_SPEED_20GBPS:
155                 *ib_speed = IB_SPEED_DDR;
156                 *ib_width = IB_WIDTH_4X;
157                 break;
158
159         case OCRDMA_PHYS_LINK_SPEED_40GBPS:
160                 *ib_speed = IB_SPEED_QDR;
161                 *ib_width = IB_WIDTH_4X;
162                 break;
163
164         default:
165                 /* Unsupported */
166                 *ib_speed = IB_SPEED_SDR;
167                 *ib_width = IB_WIDTH_1X;
168         }
169 }
170
171 int ocrdma_query_port(struct ib_device *ibdev,
172                       u8 port, struct ib_port_attr *props)
173 {
174         enum ib_port_state port_state;
175         struct ocrdma_dev *dev;
176         struct net_device *netdev;
177
178         /* props being zeroed by the caller, avoid zeroing it here */
179         dev = get_ocrdma_dev(ibdev);
180         netdev = dev->nic_info.netdev;
181         if (netif_running(netdev) && netif_oper_up(netdev)) {
182                 port_state = IB_PORT_ACTIVE;
183                 props->phys_state = 5;
184         } else {
185                 port_state = IB_PORT_DOWN;
186                 props->phys_state = 3;
187         }
188         props->max_mtu = IB_MTU_4096;
189         props->active_mtu = iboe_get_mtu(netdev->mtu);
190         props->lid = 0;
191         props->lmc = 0;
192         props->sm_lid = 0;
193         props->sm_sl = 0;
194         props->state = port_state;
195         props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
196                                 IB_PORT_DEVICE_MGMT_SUP |
197                                 IB_PORT_VENDOR_CLASS_SUP;
198         props->ip_gids = true;
199         props->gid_tbl_len = OCRDMA_MAX_SGID;
200         props->pkey_tbl_len = 1;
201         props->bad_pkey_cntr = 0;
202         props->qkey_viol_cntr = 0;
203         get_link_speed_and_width(dev, &props->active_speed,
204                                  &props->active_width);
205         props->max_msg_sz = 0x80000000;
206         props->max_vl_num = 4;
207         return 0;
208 }
209
210 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
211                        struct ib_port_modify *props)
212 {
213         return 0;
214 }
215
216 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
217                            unsigned long len)
218 {
219         struct ocrdma_mm *mm;
220
221         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
222         if (mm == NULL)
223                 return -ENOMEM;
224         mm->key.phy_addr = phy_addr;
225         mm->key.len = len;
226         INIT_LIST_HEAD(&mm->entry);
227
228         mutex_lock(&uctx->mm_list_lock);
229         list_add_tail(&mm->entry, &uctx->mm_head);
230         mutex_unlock(&uctx->mm_list_lock);
231         return 0;
232 }
233
234 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
235                             unsigned long len)
236 {
237         struct ocrdma_mm *mm, *tmp;
238
239         mutex_lock(&uctx->mm_list_lock);
240         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
241                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
242                         continue;
243
244                 list_del(&mm->entry);
245                 kfree(mm);
246                 break;
247         }
248         mutex_unlock(&uctx->mm_list_lock);
249 }
250
251 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
252                               unsigned long len)
253 {
254         bool found = false;
255         struct ocrdma_mm *mm;
256
257         mutex_lock(&uctx->mm_list_lock);
258         list_for_each_entry(mm, &uctx->mm_head, entry) {
259                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
260                         continue;
261
262                 found = true;
263                 break;
264         }
265         mutex_unlock(&uctx->mm_list_lock);
266         return found;
267 }
268
269
270 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
271 {
272         u16 pd_bitmap_idx = 0;
273         const unsigned long *pd_bitmap;
274
275         if (dpp_pool) {
276                 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
277                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
278                                                     dev->pd_mgr->max_dpp_pd);
279                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
280                 dev->pd_mgr->pd_dpp_count++;
281                 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
282                         dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
283         } else {
284                 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
285                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
286                                                     dev->pd_mgr->max_normal_pd);
287                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
288                 dev->pd_mgr->pd_norm_count++;
289                 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
290                         dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
291         }
292         return pd_bitmap_idx;
293 }
294
295 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
296                                         bool dpp_pool)
297 {
298         u16 pd_count;
299         u16 pd_bit_index;
300
301         pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
302                               dev->pd_mgr->pd_norm_count;
303         if (pd_count == 0)
304                 return -EINVAL;
305
306         if (dpp_pool) {
307                 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
308                 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
309                         return -EINVAL;
310                 } else {
311                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
312                         dev->pd_mgr->pd_dpp_count--;
313                 }
314         } else {
315                 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
316                 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
317                         return -EINVAL;
318                 } else {
319                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
320                         dev->pd_mgr->pd_norm_count--;
321                 }
322         }
323
324         return 0;
325 }
326
327 static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
328                                    bool dpp_pool)
329 {
330         int status;
331
332         mutex_lock(&dev->dev_lock);
333         status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
334         mutex_unlock(&dev->dev_lock);
335         return status;
336 }
337
338 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
339 {
340         u16 pd_idx = 0;
341         int status = 0;
342
343         mutex_lock(&dev->dev_lock);
344         if (pd->dpp_enabled) {
345                 /* try allocating DPP PD, if not available then normal PD */
346                 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
347                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
348                         pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
349                         pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
350                 } else if (dev->pd_mgr->pd_norm_count <
351                            dev->pd_mgr->max_normal_pd) {
352                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
353                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
354                         pd->dpp_enabled = false;
355                 } else {
356                         status = -EINVAL;
357                 }
358         } else {
359                 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
360                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
361                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
362                 } else {
363                         status = -EINVAL;
364                 }
365         }
366         mutex_unlock(&dev->dev_lock);
367         return status;
368 }
369
370 static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
371                             struct ocrdma_ucontext *uctx,
372                             struct ib_udata *udata)
373 {
374         int status;
375
376         if (udata && uctx && dev->attr.max_dpp_pds) {
377                 pd->dpp_enabled =
378                         ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
379                 pd->num_dpp_qp =
380                         pd->dpp_enabled ? (dev->nic_info.db_page_size /
381                                            dev->attr.wqe_size) : 0;
382         }
383
384         if (dev->pd_mgr->pd_prealloc_valid)
385                 return ocrdma_get_pd_num(dev, pd);
386
387 retry:
388         status = ocrdma_mbx_alloc_pd(dev, pd);
389         if (status) {
390                 if (pd->dpp_enabled) {
391                         pd->dpp_enabled = false;
392                         pd->num_dpp_qp = 0;
393                         goto retry;
394                 }
395                 return status;
396         }
397
398         return 0;
399 }
400
401 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
402                                  struct ocrdma_pd *pd)
403 {
404         return (uctx->cntxt_pd == pd);
405 }
406
407 static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
408                               struct ocrdma_pd *pd)
409 {
410         if (dev->pd_mgr->pd_prealloc_valid)
411                 ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
412         else
413                 ocrdma_mbx_dealloc_pd(dev, pd);
414 }
415
416 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
417                                     struct ocrdma_ucontext *uctx,
418                                     struct ib_udata *udata)
419 {
420         struct ib_device *ibdev = &dev->ibdev;
421         struct ib_pd *pd;
422         int status;
423
424         pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
425         if (!pd)
426                 return -ENOMEM;
427
428         pd->device  = ibdev;
429         uctx->cntxt_pd = get_ocrdma_pd(pd);
430
431         status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
432         if (status) {
433                 kfree(uctx->cntxt_pd);
434                 goto err;
435         }
436
437         uctx->cntxt_pd->uctx = uctx;
438         uctx->cntxt_pd->ibpd.device = &dev->ibdev;
439 err:
440         return status;
441 }
442
443 static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
444 {
445         struct ocrdma_pd *pd = uctx->cntxt_pd;
446         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
447
448         if (uctx->pd_in_use) {
449                 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
450                        __func__, dev->id, pd->id);
451         }
452         kfree(uctx->cntxt_pd);
453         uctx->cntxt_pd = NULL;
454         _ocrdma_dealloc_pd(dev, pd);
455 }
456
457 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
458 {
459         struct ocrdma_pd *pd = NULL;
460
461         mutex_lock(&uctx->mm_list_lock);
462         if (!uctx->pd_in_use) {
463                 uctx->pd_in_use = true;
464                 pd = uctx->cntxt_pd;
465         }
466         mutex_unlock(&uctx->mm_list_lock);
467
468         return pd;
469 }
470
471 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
472 {
473         mutex_lock(&uctx->mm_list_lock);
474         uctx->pd_in_use = false;
475         mutex_unlock(&uctx->mm_list_lock);
476 }
477
478 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
479 {
480         struct ib_device *ibdev = uctx->device;
481         int status;
482         struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
483         struct ocrdma_alloc_ucontext_resp resp = {};
484         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
485         struct pci_dev *pdev = dev->nic_info.pdev;
486         u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
487
488         if (!udata)
489                 return -EFAULT;
490         INIT_LIST_HEAD(&ctx->mm_head);
491         mutex_init(&ctx->mm_list_lock);
492
493         ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
494                                             &ctx->ah_tbl.pa, GFP_KERNEL);
495         if (!ctx->ah_tbl.va)
496                 return -ENOMEM;
497
498         ctx->ah_tbl.len = map_len;
499
500         resp.ah_tbl_len = ctx->ah_tbl.len;
501         resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
502
503         status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
504         if (status)
505                 goto map_err;
506
507         status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
508         if (status)
509                 goto pd_err;
510
511         resp.dev_id = dev->id;
512         resp.max_inline_data = dev->attr.max_inline_data;
513         resp.wqe_size = dev->attr.wqe_size;
514         resp.rqe_size = dev->attr.rqe_size;
515         resp.dpp_wqe_size = dev->attr.wqe_size;
516
517         memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
518         status = ib_copy_to_udata(udata, &resp, sizeof(resp));
519         if (status)
520                 goto cpy_err;
521         return 0;
522
523 cpy_err:
524         ocrdma_dealloc_ucontext_pd(ctx);
525 pd_err:
526         ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
527 map_err:
528         dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
529                           ctx->ah_tbl.pa);
530         return status;
531 }
532
533 void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
534 {
535         struct ocrdma_mm *mm, *tmp;
536         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
537         struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
538         struct pci_dev *pdev = dev->nic_info.pdev;
539
540         ocrdma_dealloc_ucontext_pd(uctx);
541
542         ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
543         dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
544                           uctx->ah_tbl.pa);
545
546         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
547                 list_del(&mm->entry);
548                 kfree(mm);
549         }
550 }
551
552 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
553 {
554         struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
555         struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
556         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
557         u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
558         unsigned long len = (vma->vm_end - vma->vm_start);
559         int status;
560         bool found;
561
562         if (vma->vm_start & (PAGE_SIZE - 1))
563                 return -EINVAL;
564         found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
565         if (!found)
566                 return -EINVAL;
567
568         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
569                 dev->nic_info.db_total_size)) &&
570                 (len <= dev->nic_info.db_page_size)) {
571                 if (vma->vm_flags & VM_READ)
572                         return -EPERM;
573
574                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
575                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
576                                             len, vma->vm_page_prot);
577         } else if (dev->nic_info.dpp_unmapped_len &&
578                 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
579                 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
580                         dev->nic_info.dpp_unmapped_len)) &&
581                 (len <= dev->nic_info.dpp_unmapped_len)) {
582                 if (vma->vm_flags & VM_READ)
583                         return -EPERM;
584
585                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
586                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
587                                             len, vma->vm_page_prot);
588         } else {
589                 status = remap_pfn_range(vma, vma->vm_start,
590                                          vma->vm_pgoff, len, vma->vm_page_prot);
591         }
592         return status;
593 }
594
595 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
596                                 struct ib_ucontext *ib_ctx,
597                                 struct ib_udata *udata)
598 {
599         int status;
600         u64 db_page_addr;
601         u64 dpp_page_addr = 0;
602         u32 db_page_size;
603         struct ocrdma_alloc_pd_uresp rsp;
604         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
605
606         memset(&rsp, 0, sizeof(rsp));
607         rsp.id = pd->id;
608         rsp.dpp_enabled = pd->dpp_enabled;
609         db_page_addr = ocrdma_get_db_addr(dev, pd->id);
610         db_page_size = dev->nic_info.db_page_size;
611
612         status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
613         if (status)
614                 return status;
615
616         if (pd->dpp_enabled) {
617                 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
618                                 (pd->id * PAGE_SIZE);
619                 status = ocrdma_add_mmap(uctx, dpp_page_addr,
620                                  PAGE_SIZE);
621                 if (status)
622                         goto dpp_map_err;
623                 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
624                 rsp.dpp_page_addr_lo = dpp_page_addr;
625         }
626
627         status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
628         if (status)
629                 goto ucopy_err;
630
631         pd->uctx = uctx;
632         return 0;
633
634 ucopy_err:
635         if (pd->dpp_enabled)
636                 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
637 dpp_map_err:
638         ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
639         return status;
640 }
641
642 int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
643                     struct ib_udata *udata)
644 {
645         struct ib_device *ibdev = ibpd->device;
646         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
647         struct ocrdma_pd *pd;
648         struct ocrdma_ucontext *uctx = NULL;
649         int status;
650         u8 is_uctx_pd = false;
651
652         if (udata && context) {
653                 uctx = get_ocrdma_ucontext(context);
654                 pd = ocrdma_get_ucontext_pd(uctx);
655                 if (pd) {
656                         is_uctx_pd = true;
657                         goto pd_mapping;
658                 }
659         }
660
661         pd = get_ocrdma_pd(ibpd);
662         status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
663         if (status)
664                 goto exit;
665
666 pd_mapping:
667         if (udata && context) {
668                 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
669                 if (status)
670                         goto err;
671         }
672         return 0;
673
674 err:
675         if (is_uctx_pd)
676                 ocrdma_release_ucontext_pd(uctx);
677         else
678                 _ocrdma_dealloc_pd(dev, pd);
679 exit:
680         return status;
681 }
682
683 void ocrdma_dealloc_pd(struct ib_pd *ibpd)
684 {
685         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
686         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
687         struct ocrdma_ucontext *uctx = NULL;
688         u64 usr_db;
689
690         uctx = pd->uctx;
691         if (uctx) {
692                 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
693                         (pd->id * PAGE_SIZE);
694                 if (pd->dpp_enabled)
695                         ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
696                 usr_db = ocrdma_get_db_addr(dev, pd->id);
697                 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
698
699                 if (is_ucontext_pd(uctx, pd)) {
700                         ocrdma_release_ucontext_pd(uctx);
701                         return;
702                 }
703         }
704         _ocrdma_dealloc_pd(dev, pd);
705 }
706
707 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
708                             u32 pdid, int acc, u32 num_pbls, u32 addr_check)
709 {
710         int status;
711
712         mr->hwmr.fr_mr = 0;
713         mr->hwmr.local_rd = 1;
714         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
715         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
716         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
717         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
718         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
719         mr->hwmr.num_pbls = num_pbls;
720
721         status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
722         if (status)
723                 return status;
724
725         mr->ibmr.lkey = mr->hwmr.lkey;
726         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
727                 mr->ibmr.rkey = mr->hwmr.lkey;
728         return 0;
729 }
730
731 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
732 {
733         int status;
734         struct ocrdma_mr *mr;
735         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
736         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
737
738         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
739                 pr_err("%s err, invalid access rights\n", __func__);
740                 return ERR_PTR(-EINVAL);
741         }
742
743         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
744         if (!mr)
745                 return ERR_PTR(-ENOMEM);
746
747         status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
748                                    OCRDMA_ADDR_CHECK_DISABLE);
749         if (status) {
750                 kfree(mr);
751                 return ERR_PTR(status);
752         }
753
754         return &mr->ibmr;
755 }
756
757 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
758                                    struct ocrdma_hw_mr *mr)
759 {
760         struct pci_dev *pdev = dev->nic_info.pdev;
761         int i = 0;
762
763         if (mr->pbl_table) {
764                 for (i = 0; i < mr->num_pbls; i++) {
765                         if (!mr->pbl_table[i].va)
766                                 continue;
767                         dma_free_coherent(&pdev->dev, mr->pbl_size,
768                                           mr->pbl_table[i].va,
769                                           mr->pbl_table[i].pa);
770                 }
771                 kfree(mr->pbl_table);
772                 mr->pbl_table = NULL;
773         }
774 }
775
776 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
777                               u32 num_pbes)
778 {
779         u32 num_pbls = 0;
780         u32 idx = 0;
781         int status = 0;
782         u32 pbl_size;
783
784         do {
785                 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
786                 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
787                         status = -EFAULT;
788                         break;
789                 }
790                 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
791                 num_pbls = num_pbls / (pbl_size / sizeof(u64));
792                 idx++;
793         } while (num_pbls >= dev->attr.max_num_mr_pbl);
794
795         mr->hwmr.num_pbes = num_pbes;
796         mr->hwmr.num_pbls = num_pbls;
797         mr->hwmr.pbl_size = pbl_size;
798         return status;
799 }
800
801 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
802 {
803         int status = 0;
804         int i;
805         u32 dma_len = mr->pbl_size;
806         struct pci_dev *pdev = dev->nic_info.pdev;
807         void *va;
808         dma_addr_t pa;
809
810         mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
811                                 GFP_KERNEL);
812
813         if (!mr->pbl_table)
814                 return -ENOMEM;
815
816         for (i = 0; i < mr->num_pbls; i++) {
817                 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
818                 if (!va) {
819                         ocrdma_free_mr_pbl_tbl(dev, mr);
820                         status = -ENOMEM;
821                         break;
822                 }
823                 mr->pbl_table[i].va = va;
824                 mr->pbl_table[i].pa = pa;
825         }
826         return status;
827 }
828
829 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
830                             u32 num_pbes)
831 {
832         struct ocrdma_pbe *pbe;
833         struct sg_dma_page_iter sg_iter;
834         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
835         struct ib_umem *umem = mr->umem;
836         int pbe_cnt, total_num_pbes = 0;
837         u64 pg_addr;
838
839         if (!mr->hwmr.num_pbes)
840                 return;
841
842         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
843         pbe_cnt = 0;
844
845         for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
846                 /* store the page address in pbe */
847                 pg_addr = sg_page_iter_dma_address(&sg_iter);
848                 pbe->pa_lo = cpu_to_le32(pg_addr);
849                 pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
850                 pbe_cnt += 1;
851                 total_num_pbes += 1;
852                 pbe++;
853
854                 /* if done building pbes, issue the mbx cmd. */
855                 if (total_num_pbes == num_pbes)
856                         return;
857
858                 /* if the given pbl is full storing the pbes,
859                  * move to next pbl.
860                  */
861                 if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
862                         pbl_tbl++;
863                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
864                         pbe_cnt = 0;
865                 }
866         }
867 }
868
869 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
870                                  u64 usr_addr, int acc, struct ib_udata *udata)
871 {
872         int status = -ENOMEM;
873         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
874         struct ocrdma_mr *mr;
875         struct ocrdma_pd *pd;
876         u32 num_pbes;
877
878         pd = get_ocrdma_pd(ibpd);
879
880         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
881                 return ERR_PTR(-EINVAL);
882
883         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
884         if (!mr)
885                 return ERR_PTR(status);
886         mr->umem = ib_umem_get(udata, start, len, acc, 0);
887         if (IS_ERR(mr->umem)) {
888                 status = -EFAULT;
889                 goto umem_err;
890         }
891         num_pbes = ib_umem_page_count(mr->umem);
892         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
893         if (status)
894                 goto umem_err;
895
896         mr->hwmr.pbe_size = PAGE_SIZE;
897         mr->hwmr.fbo = ib_umem_offset(mr->umem);
898         mr->hwmr.va = usr_addr;
899         mr->hwmr.len = len;
900         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
901         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
902         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
903         mr->hwmr.local_rd = 1;
904         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
905         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
906         if (status)
907                 goto umem_err;
908         build_user_pbes(dev, mr, num_pbes);
909         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
910         if (status)
911                 goto mbx_err;
912         mr->ibmr.lkey = mr->hwmr.lkey;
913         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
914                 mr->ibmr.rkey = mr->hwmr.lkey;
915
916         return &mr->ibmr;
917
918 mbx_err:
919         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
920 umem_err:
921         kfree(mr);
922         return ERR_PTR(status);
923 }
924
925 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
926 {
927         struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
928         struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
929
930         (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
931
932         kfree(mr->pages);
933         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
934
935         /* it could be user registered memory. */
936         if (mr->umem)
937                 ib_umem_release(mr->umem);
938         kfree(mr);
939
940         /* Don't stop cleanup, in case FW is unresponsive */
941         if (dev->mqe_ctx.fw_error_state) {
942                 pr_err("%s(%d) fw not responding.\n",
943                        __func__, dev->id);
944         }
945         return 0;
946 }
947
948 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
949                                 struct ib_udata *udata,
950                                 struct ib_ucontext *ib_ctx)
951 {
952         int status;
953         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
954         struct ocrdma_create_cq_uresp uresp;
955
956         memset(&uresp, 0, sizeof(uresp));
957         uresp.cq_id = cq->id;
958         uresp.page_size = PAGE_ALIGN(cq->len);
959         uresp.num_pages = 1;
960         uresp.max_hw_cqe = cq->max_hw_cqe;
961         uresp.page_addr[0] = virt_to_phys(cq->va);
962         uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
963         uresp.db_page_size = dev->nic_info.db_page_size;
964         uresp.phase_change = cq->phase_change ? 1 : 0;
965         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
966         if (status) {
967                 pr_err("%s(%d) copy error cqid=0x%x.\n",
968                        __func__, dev->id, cq->id);
969                 goto err;
970         }
971         status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
972         if (status)
973                 goto err;
974         status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
975         if (status) {
976                 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
977                 goto err;
978         }
979         cq->ucontext = uctx;
980 err:
981         return status;
982 }
983
984 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
985                                const struct ib_cq_init_attr *attr,
986                                struct ib_ucontext *ib_ctx,
987                                struct ib_udata *udata)
988 {
989         int entries = attr->cqe;
990         struct ocrdma_cq *cq;
991         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
992         struct ocrdma_ucontext *uctx = NULL;
993         u16 pd_id = 0;
994         int status;
995         struct ocrdma_create_cq_ureq ureq;
996
997         if (attr->flags)
998                 return ERR_PTR(-EINVAL);
999
1000         if (udata) {
1001                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1002                         return ERR_PTR(-EFAULT);
1003         } else
1004                 ureq.dpp_cq = 0;
1005         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1006         if (!cq)
1007                 return ERR_PTR(-ENOMEM);
1008
1009         spin_lock_init(&cq->cq_lock);
1010         spin_lock_init(&cq->comp_handler_lock);
1011         INIT_LIST_HEAD(&cq->sq_head);
1012         INIT_LIST_HEAD(&cq->rq_head);
1013
1014         if (ib_ctx) {
1015                 uctx = get_ocrdma_ucontext(ib_ctx);
1016                 pd_id = uctx->cntxt_pd->id;
1017         }
1018
1019         status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1020         if (status) {
1021                 kfree(cq);
1022                 return ERR_PTR(status);
1023         }
1024         if (ib_ctx) {
1025                 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1026                 if (status)
1027                         goto ctx_err;
1028         }
1029         cq->phase = OCRDMA_CQE_VALID;
1030         dev->cq_tbl[cq->id] = cq;
1031         return &cq->ibcq;
1032
1033 ctx_err:
1034         ocrdma_mbx_destroy_cq(dev, cq);
1035         kfree(cq);
1036         return ERR_PTR(status);
1037 }
1038
1039 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1040                      struct ib_udata *udata)
1041 {
1042         int status = 0;
1043         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1044
1045         if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1046                 status = -EINVAL;
1047                 return status;
1048         }
1049         ibcq->cqe = new_cnt;
1050         return status;
1051 }
1052
1053 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1054 {
1055         int cqe_cnt;
1056         int valid_count = 0;
1057         unsigned long flags;
1058
1059         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1060         struct ocrdma_cqe *cqe = NULL;
1061
1062         cqe = cq->va;
1063         cqe_cnt = cq->cqe_cnt;
1064
1065         /* Last irq might have scheduled a polling thread
1066          * sync-up with it before hard flushing.
1067          */
1068         spin_lock_irqsave(&cq->cq_lock, flags);
1069         while (cqe_cnt) {
1070                 if (is_cqe_valid(cq, cqe))
1071                         valid_count++;
1072                 cqe++;
1073                 cqe_cnt--;
1074         }
1075         ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1076         spin_unlock_irqrestore(&cq->cq_lock, flags);
1077 }
1078
1079 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1080 {
1081         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1082         struct ocrdma_eq *eq = NULL;
1083         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1084         int pdid = 0;
1085         u32 irq, indx;
1086
1087         dev->cq_tbl[cq->id] = NULL;
1088         indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1089         BUG_ON(indx == -EINVAL);
1090
1091         eq = &dev->eq_tbl[indx];
1092         irq = ocrdma_get_irq(dev, eq);
1093         synchronize_irq(irq);
1094         ocrdma_flush_cq(cq);
1095
1096         (void)ocrdma_mbx_destroy_cq(dev, cq);
1097         if (cq->ucontext) {
1098                 pdid = cq->ucontext->cntxt_pd->id;
1099                 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1100                                 PAGE_ALIGN(cq->len));
1101                 ocrdma_del_mmap(cq->ucontext,
1102                                 ocrdma_get_db_addr(dev, pdid),
1103                                 dev->nic_info.db_page_size);
1104         }
1105
1106         kfree(cq);
1107         return 0;
1108 }
1109
1110 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1111 {
1112         int status = -EINVAL;
1113
1114         if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1115                 dev->qp_tbl[qp->id] = qp;
1116                 status = 0;
1117         }
1118         return status;
1119 }
1120
1121 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1122 {
1123         dev->qp_tbl[qp->id] = NULL;
1124 }
1125
1126 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1127                                   struct ib_qp_init_attr *attrs,
1128                                   struct ib_udata *udata)
1129 {
1130         if ((attrs->qp_type != IB_QPT_GSI) &&
1131             (attrs->qp_type != IB_QPT_RC) &&
1132             (attrs->qp_type != IB_QPT_UC) &&
1133             (attrs->qp_type != IB_QPT_UD)) {
1134                 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1135                        __func__, dev->id, attrs->qp_type);
1136                 return -EINVAL;
1137         }
1138         /* Skip the check for QP1 to support CM size of 128 */
1139         if ((attrs->qp_type != IB_QPT_GSI) &&
1140             (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1141                 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1142                        __func__, dev->id, attrs->cap.max_send_wr);
1143                 pr_err("%s(%d) supported send_wr=0x%x\n",
1144                        __func__, dev->id, dev->attr.max_wqe);
1145                 return -EINVAL;
1146         }
1147         if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1148                 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1149                        __func__, dev->id, attrs->cap.max_recv_wr);
1150                 pr_err("%s(%d) supported recv_wr=0x%x\n",
1151                        __func__, dev->id, dev->attr.max_rqe);
1152                 return -EINVAL;
1153         }
1154         if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1155                 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1156                        __func__, dev->id, attrs->cap.max_inline_data);
1157                 pr_err("%s(%d) supported inline data size=0x%x\n",
1158                        __func__, dev->id, dev->attr.max_inline_data);
1159                 return -EINVAL;
1160         }
1161         if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1162                 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1163                        __func__, dev->id, attrs->cap.max_send_sge);
1164                 pr_err("%s(%d) supported send_sge=0x%x\n",
1165                        __func__, dev->id, dev->attr.max_send_sge);
1166                 return -EINVAL;
1167         }
1168         if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1169                 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1170                        __func__, dev->id, attrs->cap.max_recv_sge);
1171                 pr_err("%s(%d) supported recv_sge=0x%x\n",
1172                        __func__, dev->id, dev->attr.max_recv_sge);
1173                 return -EINVAL;
1174         }
1175         /* unprivileged user space cannot create special QP */
1176         if (udata && attrs->qp_type == IB_QPT_GSI) {
1177                 pr_err
1178                     ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1179                      __func__, dev->id, attrs->qp_type);
1180                 return -EINVAL;
1181         }
1182         /* allow creating only one GSI type of QP */
1183         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1184                 pr_err("%s(%d) GSI special QPs already created.\n",
1185                        __func__, dev->id);
1186                 return -EINVAL;
1187         }
1188         /* verify consumer QPs are not trying to use GSI QP's CQ */
1189         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1190                 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1191                         (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1192                         pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1193                                 __func__, dev->id);
1194                         return -EINVAL;
1195                 }
1196         }
1197         return 0;
1198 }
1199
1200 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1201                                 struct ib_udata *udata, int dpp_offset,
1202                                 int dpp_credit_lmt, int srq)
1203 {
1204         int status;
1205         u64 usr_db;
1206         struct ocrdma_create_qp_uresp uresp;
1207         struct ocrdma_pd *pd = qp->pd;
1208         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1209
1210         memset(&uresp, 0, sizeof(uresp));
1211         usr_db = dev->nic_info.unmapped_db +
1212                         (pd->id * dev->nic_info.db_page_size);
1213         uresp.qp_id = qp->id;
1214         uresp.sq_dbid = qp->sq.dbid;
1215         uresp.num_sq_pages = 1;
1216         uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1217         uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1218         uresp.num_wqe_allocated = qp->sq.max_cnt;
1219         if (!srq) {
1220                 uresp.rq_dbid = qp->rq.dbid;
1221                 uresp.num_rq_pages = 1;
1222                 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1223                 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1224                 uresp.num_rqe_allocated = qp->rq.max_cnt;
1225         }
1226         uresp.db_page_addr = usr_db;
1227         uresp.db_page_size = dev->nic_info.db_page_size;
1228         uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1229         uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1230         uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1231
1232         if (qp->dpp_enabled) {
1233                 uresp.dpp_credit = dpp_credit_lmt;
1234                 uresp.dpp_offset = dpp_offset;
1235         }
1236         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1237         if (status) {
1238                 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1239                 goto err;
1240         }
1241         status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1242                                  uresp.sq_page_size);
1243         if (status)
1244                 goto err;
1245
1246         if (!srq) {
1247                 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1248                                          uresp.rq_page_size);
1249                 if (status)
1250                         goto rq_map_err;
1251         }
1252         return status;
1253 rq_map_err:
1254         ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1255 err:
1256         return status;
1257 }
1258
1259 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1260                              struct ocrdma_pd *pd)
1261 {
1262         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1263                 qp->sq_db = dev->nic_info.db +
1264                         (pd->id * dev->nic_info.db_page_size) +
1265                         OCRDMA_DB_GEN2_SQ_OFFSET;
1266                 qp->rq_db = dev->nic_info.db +
1267                         (pd->id * dev->nic_info.db_page_size) +
1268                         OCRDMA_DB_GEN2_RQ_OFFSET;
1269         } else {
1270                 qp->sq_db = dev->nic_info.db +
1271                         (pd->id * dev->nic_info.db_page_size) +
1272                         OCRDMA_DB_SQ_OFFSET;
1273                 qp->rq_db = dev->nic_info.db +
1274                         (pd->id * dev->nic_info.db_page_size) +
1275                         OCRDMA_DB_RQ_OFFSET;
1276         }
1277 }
1278
1279 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1280 {
1281         qp->wqe_wr_id_tbl =
1282             kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
1283                     GFP_KERNEL);
1284         if (qp->wqe_wr_id_tbl == NULL)
1285                 return -ENOMEM;
1286         qp->rqe_wr_id_tbl =
1287             kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
1288         if (qp->rqe_wr_id_tbl == NULL)
1289                 return -ENOMEM;
1290
1291         return 0;
1292 }
1293
1294 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1295                                       struct ocrdma_pd *pd,
1296                                       struct ib_qp_init_attr *attrs)
1297 {
1298         qp->pd = pd;
1299         spin_lock_init(&qp->q_lock);
1300         INIT_LIST_HEAD(&qp->sq_entry);
1301         INIT_LIST_HEAD(&qp->rq_entry);
1302
1303         qp->qp_type = attrs->qp_type;
1304         qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1305         qp->max_inline_data = attrs->cap.max_inline_data;
1306         qp->sq.max_sges = attrs->cap.max_send_sge;
1307         qp->rq.max_sges = attrs->cap.max_recv_sge;
1308         qp->state = OCRDMA_QPS_RST;
1309         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1310 }
1311
1312 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1313                                    struct ib_qp_init_attr *attrs)
1314 {
1315         if (attrs->qp_type == IB_QPT_GSI) {
1316                 dev->gsi_qp_created = 1;
1317                 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1318                 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1319         }
1320 }
1321
1322 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1323                                struct ib_qp_init_attr *attrs,
1324                                struct ib_udata *udata)
1325 {
1326         int status;
1327         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1328         struct ocrdma_qp *qp;
1329         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1330         struct ocrdma_create_qp_ureq ureq;
1331         u16 dpp_credit_lmt, dpp_offset;
1332
1333         status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
1334         if (status)
1335                 goto gen_err;
1336
1337         memset(&ureq, 0, sizeof(ureq));
1338         if (udata) {
1339                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1340                         return ERR_PTR(-EFAULT);
1341         }
1342         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1343         if (!qp) {
1344                 status = -ENOMEM;
1345                 goto gen_err;
1346         }
1347         ocrdma_set_qp_init_params(qp, pd, attrs);
1348         if (udata == NULL)
1349                 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1350                                         OCRDMA_QP_FAST_REG);
1351
1352         mutex_lock(&dev->dev_lock);
1353         status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1354                                         ureq.dpp_cq_id,
1355                                         &dpp_offset, &dpp_credit_lmt);
1356         if (status)
1357                 goto mbx_err;
1358
1359         /* user space QP's wr_id table are managed in library */
1360         if (udata == NULL) {
1361                 status = ocrdma_alloc_wr_id_tbl(qp);
1362                 if (status)
1363                         goto map_err;
1364         }
1365
1366         status = ocrdma_add_qpn_map(dev, qp);
1367         if (status)
1368                 goto map_err;
1369         ocrdma_set_qp_db(dev, qp, pd);
1370         if (udata) {
1371                 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1372                                               dpp_credit_lmt,
1373                                               (attrs->srq != NULL));
1374                 if (status)
1375                         goto cpy_err;
1376         }
1377         ocrdma_store_gsi_qp_cq(dev, attrs);
1378         qp->ibqp.qp_num = qp->id;
1379         mutex_unlock(&dev->dev_lock);
1380         return &qp->ibqp;
1381
1382 cpy_err:
1383         ocrdma_del_qpn_map(dev, qp);
1384 map_err:
1385         ocrdma_mbx_destroy_qp(dev, qp);
1386 mbx_err:
1387         mutex_unlock(&dev->dev_lock);
1388         kfree(qp->wqe_wr_id_tbl);
1389         kfree(qp->rqe_wr_id_tbl);
1390         kfree(qp);
1391         pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1392 gen_err:
1393         return ERR_PTR(status);
1394 }
1395
1396 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1397                       int attr_mask)
1398 {
1399         int status = 0;
1400         struct ocrdma_qp *qp;
1401         struct ocrdma_dev *dev;
1402         enum ib_qp_state old_qps;
1403
1404         qp = get_ocrdma_qp(ibqp);
1405         dev = get_ocrdma_dev(ibqp->device);
1406         if (attr_mask & IB_QP_STATE)
1407                 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1408         /* if new and previous states are same hw doesn't need to
1409          * know about it.
1410          */
1411         if (status < 0)
1412                 return status;
1413         return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1414 }
1415
1416 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1417                      int attr_mask, struct ib_udata *udata)
1418 {
1419         unsigned long flags;
1420         int status = -EINVAL;
1421         struct ocrdma_qp *qp;
1422         struct ocrdma_dev *dev;
1423         enum ib_qp_state old_qps, new_qps;
1424
1425         qp = get_ocrdma_qp(ibqp);
1426         dev = get_ocrdma_dev(ibqp->device);
1427
1428         /* syncronize with multiple context trying to change, retrive qps */
1429         mutex_lock(&dev->dev_lock);
1430         /* syncronize with wqe, rqe posting and cqe processing contexts */
1431         spin_lock_irqsave(&qp->q_lock, flags);
1432         old_qps = get_ibqp_state(qp->state);
1433         if (attr_mask & IB_QP_STATE)
1434                 new_qps = attr->qp_state;
1435         else
1436                 new_qps = old_qps;
1437         spin_unlock_irqrestore(&qp->q_lock, flags);
1438
1439         if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
1440                 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1441                        "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1442                        __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1443                        old_qps, new_qps);
1444                 goto param_err;
1445         }
1446
1447         status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1448         if (status > 0)
1449                 status = 0;
1450 param_err:
1451         mutex_unlock(&dev->dev_lock);
1452         return status;
1453 }
1454
1455 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1456 {
1457         switch (mtu) {
1458         case 256:
1459                 return IB_MTU_256;
1460         case 512:
1461                 return IB_MTU_512;
1462         case 1024:
1463                 return IB_MTU_1024;
1464         case 2048:
1465                 return IB_MTU_2048;
1466         case 4096:
1467                 return IB_MTU_4096;
1468         default:
1469                 return IB_MTU_1024;
1470         }
1471 }
1472
1473 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1474 {
1475         int ib_qp_acc_flags = 0;
1476
1477         if (qp_cap_flags & OCRDMA_QP_INB_WR)
1478                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1479         if (qp_cap_flags & OCRDMA_QP_INB_RD)
1480                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1481         return ib_qp_acc_flags;
1482 }
1483
1484 int ocrdma_query_qp(struct ib_qp *ibqp,
1485                     struct ib_qp_attr *qp_attr,
1486                     int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1487 {
1488         int status;
1489         u32 qp_state;
1490         struct ocrdma_qp_params params;
1491         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1492         struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1493
1494         memset(&params, 0, sizeof(params));
1495         mutex_lock(&dev->dev_lock);
1496         status = ocrdma_mbx_query_qp(dev, qp, &params);
1497         mutex_unlock(&dev->dev_lock);
1498         if (status)
1499                 goto mbx_err;
1500         if (qp->qp_type == IB_QPT_UD)
1501                 qp_attr->qkey = params.qkey;
1502         qp_attr->path_mtu =
1503                 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1504                                 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1505                                 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1506         qp_attr->path_mig_state = IB_MIG_MIGRATED;
1507         qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1508         qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1509         qp_attr->dest_qp_num =
1510             params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1511
1512         qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1513         qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1514         qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1515         qp_attr->cap.max_send_sge = qp->sq.max_sges;
1516         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1517         qp_attr->cap.max_inline_data = qp->max_inline_data;
1518         qp_init_attr->cap = qp_attr->cap;
1519         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1520
1521         rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
1522                         params.rnt_rc_sl_fl &
1523                           OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
1524                         qp->sgid_idx,
1525                         (params.hop_lmt_rq_psn &
1526                          OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1527                          OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
1528                         (params.tclass_sq_psn &
1529                          OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1530                          OCRDMA_QP_PARAMS_TCLASS_SHIFT);
1531         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
1532
1533         rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
1534         rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
1535                                            OCRDMA_QP_PARAMS_SL_MASK) >>
1536                                            OCRDMA_QP_PARAMS_SL_SHIFT);
1537         qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1538                             OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1539                                 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1540         qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1541                               OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1542                                 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1543         qp_attr->retry_cnt =
1544             (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1545                 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1546         qp_attr->min_rnr_timer = 0;
1547         qp_attr->pkey_index = 0;
1548         qp_attr->port_num = 1;
1549         rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
1550         rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
1551         qp_attr->alt_pkey_index = 0;
1552         qp_attr->alt_port_num = 0;
1553         qp_attr->alt_timeout = 0;
1554         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1555         qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1556                     OCRDMA_QP_PARAMS_STATE_SHIFT;
1557         qp_attr->qp_state = get_ibqp_state(qp_state);
1558         qp_attr->cur_qp_state = qp_attr->qp_state;
1559         qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1560         qp_attr->max_dest_rd_atomic =
1561             params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1562         qp_attr->max_rd_atomic =
1563             params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1564         qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1565                                 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1566         /* Sync driver QP state with FW */
1567         ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1568 mbx_err:
1569         return status;
1570 }
1571
1572 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1573 {
1574         unsigned int i = idx / 32;
1575         u32 mask = (1U << (idx % 32));
1576
1577         srq->idx_bit_fields[i] ^= mask;
1578 }
1579
1580 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1581 {
1582         return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1583 }
1584
1585 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1586 {
1587         return (qp->sq.tail == qp->sq.head);
1588 }
1589
1590 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1591 {
1592         return (qp->rq.tail == qp->rq.head);
1593 }
1594
1595 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1596 {
1597         return q->va + (q->head * q->entry_size);
1598 }
1599
1600 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1601                                       u32 idx)
1602 {
1603         return q->va + (idx * q->entry_size);
1604 }
1605
1606 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1607 {
1608         q->head = (q->head + 1) & q->max_wqe_idx;
1609 }
1610
1611 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1612 {
1613         q->tail = (q->tail + 1) & q->max_wqe_idx;
1614 }
1615
1616 /* discard the cqe for a given QP */
1617 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1618 {
1619         unsigned long cq_flags;
1620         unsigned long flags;
1621         int discard_cnt = 0;
1622         u32 cur_getp, stop_getp;
1623         struct ocrdma_cqe *cqe;
1624         u32 qpn = 0, wqe_idx = 0;
1625
1626         spin_lock_irqsave(&cq->cq_lock, cq_flags);
1627
1628         /* traverse through the CQEs in the hw CQ,
1629          * find the matching CQE for a given qp,
1630          * mark the matching one discarded by clearing qpn.
1631          * ring the doorbell in the poll_cq() as
1632          * we don't complete out of order cqe.
1633          */
1634
1635         cur_getp = cq->getp;
1636         /* find upto when do we reap the cq. */
1637         stop_getp = cur_getp;
1638         do {
1639                 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1640                         break;
1641
1642                 cqe = cq->va + cur_getp;
1643                 /* if (a) done reaping whole hw cq, or
1644                  *    (b) qp_xq becomes empty.
1645                  * then exit
1646                  */
1647                 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1648                 /* if previously discarded cqe found, skip that too. */
1649                 /* check for matching qp */
1650                 if (qpn == 0 || qpn != qp->id)
1651                         goto skip_cqe;
1652
1653                 if (is_cqe_for_sq(cqe)) {
1654                         ocrdma_hwq_inc_tail(&qp->sq);
1655                 } else {
1656                         if (qp->srq) {
1657                                 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1658                                         OCRDMA_CQE_BUFTAG_SHIFT) &
1659                                         qp->srq->rq.max_wqe_idx;
1660                                 BUG_ON(wqe_idx < 1);
1661                                 spin_lock_irqsave(&qp->srq->q_lock, flags);
1662                                 ocrdma_hwq_inc_tail(&qp->srq->rq);
1663                                 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1664                                 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1665
1666                         } else {
1667                                 ocrdma_hwq_inc_tail(&qp->rq);
1668                         }
1669                 }
1670                 /* mark cqe discarded so that it is not picked up later
1671                  * in the poll_cq().
1672                  */
1673                 discard_cnt += 1;
1674                 cqe->cmn.qpn = 0;
1675 skip_cqe:
1676                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1677         } while (cur_getp != stop_getp);
1678         spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1679 }
1680
1681 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1682 {
1683         int found = false;
1684         unsigned long flags;
1685         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1686         /* sync with any active CQ poll */
1687
1688         spin_lock_irqsave(&dev->flush_q_lock, flags);
1689         found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1690         if (found)
1691                 list_del(&qp->sq_entry);
1692         if (!qp->srq) {
1693                 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1694                 if (found)
1695                         list_del(&qp->rq_entry);
1696         }
1697         spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1698 }
1699
1700 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1701 {
1702         struct ocrdma_pd *pd;
1703         struct ocrdma_qp *qp;
1704         struct ocrdma_dev *dev;
1705         struct ib_qp_attr attrs;
1706         int attr_mask;
1707         unsigned long flags;
1708
1709         qp = get_ocrdma_qp(ibqp);
1710         dev = get_ocrdma_dev(ibqp->device);
1711
1712         pd = qp->pd;
1713
1714         /* change the QP state to ERROR */
1715         if (qp->state != OCRDMA_QPS_RST) {
1716                 attrs.qp_state = IB_QPS_ERR;
1717                 attr_mask = IB_QP_STATE;
1718                 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1719         }
1720         /* ensure that CQEs for newly created QP (whose id may be same with
1721          * one which just getting destroyed are same), dont get
1722          * discarded until the old CQEs are discarded.
1723          */
1724         mutex_lock(&dev->dev_lock);
1725         (void) ocrdma_mbx_destroy_qp(dev, qp);
1726
1727         /*
1728          * acquire CQ lock while destroy is in progress, in order to
1729          * protect against proessing in-flight CQEs for this QP.
1730          */
1731         spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1732         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
1733                 spin_lock(&qp->rq_cq->cq_lock);
1734                 ocrdma_del_qpn_map(dev, qp);
1735                 spin_unlock(&qp->rq_cq->cq_lock);
1736         } else {
1737                 ocrdma_del_qpn_map(dev, qp);
1738         }
1739         spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1740
1741         if (!pd->uctx) {
1742                 ocrdma_discard_cqes(qp, qp->sq_cq);
1743                 ocrdma_discard_cqes(qp, qp->rq_cq);
1744         }
1745         mutex_unlock(&dev->dev_lock);
1746
1747         if (pd->uctx) {
1748                 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1749                                 PAGE_ALIGN(qp->sq.len));
1750                 if (!qp->srq)
1751                         ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1752                                         PAGE_ALIGN(qp->rq.len));
1753         }
1754
1755         ocrdma_del_flush_qp(qp);
1756
1757         kfree(qp->wqe_wr_id_tbl);
1758         kfree(qp->rqe_wr_id_tbl);
1759         kfree(qp);
1760         return 0;
1761 }
1762
1763 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1764                                 struct ib_udata *udata)
1765 {
1766         int status;
1767         struct ocrdma_create_srq_uresp uresp;
1768
1769         memset(&uresp, 0, sizeof(uresp));
1770         uresp.rq_dbid = srq->rq.dbid;
1771         uresp.num_rq_pages = 1;
1772         uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1773         uresp.rq_page_size = srq->rq.len;
1774         uresp.db_page_addr = dev->nic_info.unmapped_db +
1775             (srq->pd->id * dev->nic_info.db_page_size);
1776         uresp.db_page_size = dev->nic_info.db_page_size;
1777         uresp.num_rqe_allocated = srq->rq.max_cnt;
1778         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1779                 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1780                 uresp.db_shift = 24;
1781         } else {
1782                 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1783                 uresp.db_shift = 16;
1784         }
1785
1786         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1787         if (status)
1788                 return status;
1789         status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1790                                  uresp.rq_page_size);
1791         if (status)
1792                 return status;
1793         return status;
1794 }
1795
1796 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1797                                  struct ib_srq_init_attr *init_attr,
1798                                  struct ib_udata *udata)
1799 {
1800         int status = -ENOMEM;
1801         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1802         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1803         struct ocrdma_srq *srq;
1804
1805         if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1806                 return ERR_PTR(-EINVAL);
1807         if (init_attr->attr.max_wr > dev->attr.max_rqe)
1808                 return ERR_PTR(-EINVAL);
1809
1810         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1811         if (!srq)
1812                 return ERR_PTR(status);
1813
1814         spin_lock_init(&srq->q_lock);
1815         srq->pd = pd;
1816         srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1817         status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1818         if (status)
1819                 goto err;
1820
1821         if (udata == NULL) {
1822                 status = -ENOMEM;
1823                 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
1824                                              GFP_KERNEL);
1825                 if (srq->rqe_wr_id_tbl == NULL)
1826                         goto arm_err;
1827
1828                 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1829                     (srq->rq.max_cnt % 32 ? 1 : 0);
1830                 srq->idx_bit_fields =
1831                     kmalloc_array(srq->bit_fields_len, sizeof(u32),
1832                                   GFP_KERNEL);
1833                 if (srq->idx_bit_fields == NULL)
1834                         goto arm_err;
1835                 memset(srq->idx_bit_fields, 0xff,
1836                        srq->bit_fields_len * sizeof(u32));
1837         }
1838
1839         if (init_attr->attr.srq_limit) {
1840                 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1841                 if (status)
1842                         goto arm_err;
1843         }
1844
1845         if (udata) {
1846                 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1847                 if (status)
1848                         goto arm_err;
1849         }
1850
1851         return &srq->ibsrq;
1852
1853 arm_err:
1854         ocrdma_mbx_destroy_srq(dev, srq);
1855 err:
1856         kfree(srq->rqe_wr_id_tbl);
1857         kfree(srq->idx_bit_fields);
1858         kfree(srq);
1859         return ERR_PTR(status);
1860 }
1861
1862 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1863                       struct ib_srq_attr *srq_attr,
1864                       enum ib_srq_attr_mask srq_attr_mask,
1865                       struct ib_udata *udata)
1866 {
1867         int status;
1868         struct ocrdma_srq *srq;
1869
1870         srq = get_ocrdma_srq(ibsrq);
1871         if (srq_attr_mask & IB_SRQ_MAX_WR)
1872                 status = -EINVAL;
1873         else
1874                 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1875         return status;
1876 }
1877
1878 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1879 {
1880         int status;
1881         struct ocrdma_srq *srq;
1882
1883         srq = get_ocrdma_srq(ibsrq);
1884         status = ocrdma_mbx_query_srq(srq, srq_attr);
1885         return status;
1886 }
1887
1888 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1889 {
1890         int status;
1891         struct ocrdma_srq *srq;
1892         struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1893
1894         srq = get_ocrdma_srq(ibsrq);
1895
1896         status = ocrdma_mbx_destroy_srq(dev, srq);
1897
1898         if (srq->pd->uctx)
1899                 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1900                                 PAGE_ALIGN(srq->rq.len));
1901
1902         kfree(srq->idx_bit_fields);
1903         kfree(srq->rqe_wr_id_tbl);
1904         kfree(srq);
1905         return status;
1906 }
1907
1908 /* unprivileged verbs and their support functions. */
1909 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1910                                 struct ocrdma_hdr_wqe *hdr,
1911                                 const struct ib_send_wr *wr)
1912 {
1913         struct ocrdma_ewqe_ud_hdr *ud_hdr =
1914                 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1915         struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1916
1917         ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
1918         if (qp->qp_type == IB_QPT_GSI)
1919                 ud_hdr->qkey = qp->qkey;
1920         else
1921                 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
1922         ud_hdr->rsvd_ahid = ah->id;
1923         ud_hdr->hdr_type = ah->hdr_type;
1924         if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1925                 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1926 }
1927
1928 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1929                               struct ocrdma_sge *sge, int num_sge,
1930                               struct ib_sge *sg_list)
1931 {
1932         int i;
1933
1934         for (i = 0; i < num_sge; i++) {
1935                 sge[i].lrkey = sg_list[i].lkey;
1936                 sge[i].addr_lo = sg_list[i].addr;
1937                 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1938                 sge[i].len = sg_list[i].length;
1939                 hdr->total_len += sg_list[i].length;
1940         }
1941         if (num_sge == 0)
1942                 memset(sge, 0, sizeof(*sge));
1943 }
1944
1945 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1946 {
1947         uint32_t total_len = 0, i;
1948
1949         for (i = 0; i < num_sge; i++)
1950                 total_len += sg_list[i].length;
1951         return total_len;
1952 }
1953
1954
1955 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1956                                     struct ocrdma_hdr_wqe *hdr,
1957                                     struct ocrdma_sge *sge,
1958                                     const struct ib_send_wr *wr, u32 wqe_size)
1959 {
1960         int i;
1961         char *dpp_addr;
1962
1963         if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1964                 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1965                 if (unlikely(hdr->total_len > qp->max_inline_data)) {
1966                         pr_err("%s() supported_len=0x%x,\n"
1967                                " unsupported len req=0x%x\n", __func__,
1968                                 qp->max_inline_data, hdr->total_len);
1969                         return -EINVAL;
1970                 }
1971                 dpp_addr = (char *)sge;
1972                 for (i = 0; i < wr->num_sge; i++) {
1973                         memcpy(dpp_addr,
1974                                (void *)(unsigned long)wr->sg_list[i].addr,
1975                                wr->sg_list[i].length);
1976                         dpp_addr += wr->sg_list[i].length;
1977                 }
1978
1979                 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1980                 if (0 == hdr->total_len)
1981                         wqe_size += sizeof(struct ocrdma_sge);
1982                 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1983         } else {
1984                 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1985                 if (wr->num_sge)
1986                         wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1987                 else
1988                         wqe_size += sizeof(struct ocrdma_sge);
1989                 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1990         }
1991         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1992         return 0;
1993 }
1994
1995 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1996                              const struct ib_send_wr *wr)
1997 {
1998         int status;
1999         struct ocrdma_sge *sge;
2000         u32 wqe_size = sizeof(*hdr);
2001
2002         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2003                 ocrdma_build_ud_hdr(qp, hdr, wr);
2004                 sge = (struct ocrdma_sge *)(hdr + 2);
2005                 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2006         } else {
2007                 sge = (struct ocrdma_sge *)(hdr + 1);
2008         }
2009
2010         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2011         return status;
2012 }
2013
2014 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2015                               const struct ib_send_wr *wr)
2016 {
2017         int status;
2018         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2019         struct ocrdma_sge *sge = ext_rw + 1;
2020         u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2021
2022         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2023         if (status)
2024                 return status;
2025         ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2026         ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2027         ext_rw->lrkey = rdma_wr(wr)->rkey;
2028         ext_rw->len = hdr->total_len;
2029         return 0;
2030 }
2031
2032 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2033                               const struct ib_send_wr *wr)
2034 {
2035         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2036         struct ocrdma_sge *sge = ext_rw + 1;
2037         u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2038             sizeof(struct ocrdma_hdr_wqe);
2039
2040         ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2041         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2042         hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2043         hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2044
2045         ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2046         ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2047         ext_rw->lrkey = rdma_wr(wr)->rkey;
2048         ext_rw->len = hdr->total_len;
2049 }
2050
2051 static int get_encoded_page_size(int pg_sz)
2052 {
2053         /* Max size is 256M 4096 << 16 */
2054         int i = 0;
2055         for (; i < 17; i++)
2056                 if (pg_sz == (4096 << i))
2057                         break;
2058         return i;
2059 }
2060
2061 static int ocrdma_build_reg(struct ocrdma_qp *qp,
2062                             struct ocrdma_hdr_wqe *hdr,
2063                             const struct ib_reg_wr *wr)
2064 {
2065         u64 fbo;
2066         struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2067         struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2068         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
2069         struct ocrdma_pbe *pbe;
2070         u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2071         int num_pbes = 0, i;
2072
2073         wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2074
2075         hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2076         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2077
2078         if (wr->access & IB_ACCESS_LOCAL_WRITE)
2079                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2080         if (wr->access & IB_ACCESS_REMOTE_WRITE)
2081                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2082         if (wr->access & IB_ACCESS_REMOTE_READ)
2083                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2084         hdr->lkey = wr->key;
2085         hdr->total_len = mr->ibmr.length;
2086
2087         fbo = mr->ibmr.iova - mr->pages[0];
2088
2089         fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
2090         fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
2091         fast_reg->fbo_hi = upper_32_bits(fbo);
2092         fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2093         fast_reg->num_sges = mr->npages;
2094         fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
2095
2096         pbe = pbl_tbl->va;
2097         for (i = 0; i < mr->npages; i++) {
2098                 u64 buf_addr = mr->pages[i];
2099
2100                 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2101                 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2102                 num_pbes += 1;
2103                 pbe++;
2104
2105                 /* if the pbl is full storing the pbes,
2106                  * move to next pbl.
2107                 */
2108                 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
2109                         pbl_tbl++;
2110                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2111                 }
2112         }
2113
2114         return 0;
2115 }
2116
2117 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2118 {
2119         u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2120
2121         iowrite32(val, qp->sq_db);
2122 }
2123
2124 int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2125                      const struct ib_send_wr **bad_wr)
2126 {
2127         int status = 0;
2128         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2129         struct ocrdma_hdr_wqe *hdr;
2130         unsigned long flags;
2131
2132         spin_lock_irqsave(&qp->q_lock, flags);
2133         if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2134                 spin_unlock_irqrestore(&qp->q_lock, flags);
2135                 *bad_wr = wr;
2136                 return -EINVAL;
2137         }
2138
2139         while (wr) {
2140                 if (qp->qp_type == IB_QPT_UD &&
2141                     (wr->opcode != IB_WR_SEND &&
2142                      wr->opcode != IB_WR_SEND_WITH_IMM)) {
2143                         *bad_wr = wr;
2144                         status = -EINVAL;
2145                         break;
2146                 }
2147                 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2148                     wr->num_sge > qp->sq.max_sges) {
2149                         *bad_wr = wr;
2150                         status = -ENOMEM;
2151                         break;
2152                 }
2153                 hdr = ocrdma_hwq_head(&qp->sq);
2154                 hdr->cw = 0;
2155                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2156                         hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2157                 if (wr->send_flags & IB_SEND_FENCE)
2158                         hdr->cw |=
2159                             (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2160                 if (wr->send_flags & IB_SEND_SOLICITED)
2161                         hdr->cw |=
2162                             (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2163                 hdr->total_len = 0;
2164                 switch (wr->opcode) {
2165                 case IB_WR_SEND_WITH_IMM:
2166                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2167                         hdr->immdt = ntohl(wr->ex.imm_data);
2168                         /* fall through */
2169                 case IB_WR_SEND:
2170                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2171                         ocrdma_build_send(qp, hdr, wr);
2172                         break;
2173                 case IB_WR_SEND_WITH_INV:
2174                         hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2175                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2176                         hdr->lkey = wr->ex.invalidate_rkey;
2177                         status = ocrdma_build_send(qp, hdr, wr);
2178                         break;
2179                 case IB_WR_RDMA_WRITE_WITH_IMM:
2180                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2181                         hdr->immdt = ntohl(wr->ex.imm_data);
2182                         /* fall through */
2183                 case IB_WR_RDMA_WRITE:
2184                         hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2185                         status = ocrdma_build_write(qp, hdr, wr);
2186                         break;
2187                 case IB_WR_RDMA_READ:
2188                         ocrdma_build_read(qp, hdr, wr);
2189                         break;
2190                 case IB_WR_LOCAL_INV:
2191                         hdr->cw |=
2192                             (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2193                         hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2194                                         sizeof(struct ocrdma_sge)) /
2195                                 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2196                         hdr->lkey = wr->ex.invalidate_rkey;
2197                         break;
2198                 case IB_WR_REG_MR:
2199                         status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2200                         break;
2201                 default:
2202                         status = -EINVAL;
2203                         break;
2204                 }
2205                 if (status) {
2206                         *bad_wr = wr;
2207                         break;
2208                 }
2209                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2210                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2211                 else
2212                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2213                 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2214                 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2215                                    OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2216                 /* make sure wqe is written before adapter can access it */
2217                 wmb();
2218                 /* inform hw to start processing it */
2219                 ocrdma_ring_sq_db(qp);
2220
2221                 /* update pointer, counter for next wr */
2222                 ocrdma_hwq_inc_head(&qp->sq);
2223                 wr = wr->next;
2224         }
2225         spin_unlock_irqrestore(&qp->q_lock, flags);
2226         return status;
2227 }
2228
2229 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2230 {
2231         u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2232
2233         iowrite32(val, qp->rq_db);
2234 }
2235
2236 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
2237                              const struct ib_recv_wr *wr, u16 tag)
2238 {
2239         u32 wqe_size = 0;
2240         struct ocrdma_sge *sge;
2241         if (wr->num_sge)
2242                 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2243         else
2244                 wqe_size = sizeof(*sge) + sizeof(*rqe);
2245
2246         rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2247                                 OCRDMA_WQE_SIZE_SHIFT);
2248         rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2249         rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2250         rqe->total_len = 0;
2251         rqe->rsvd_tag = tag;
2252         sge = (struct ocrdma_sge *)(rqe + 1);
2253         ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2254         ocrdma_cpu_to_le32(rqe, wqe_size);
2255 }
2256
2257 int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2258                      const struct ib_recv_wr **bad_wr)
2259 {
2260         int status = 0;
2261         unsigned long flags;
2262         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2263         struct ocrdma_hdr_wqe *rqe;
2264
2265         spin_lock_irqsave(&qp->q_lock, flags);
2266         if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2267                 spin_unlock_irqrestore(&qp->q_lock, flags);
2268                 *bad_wr = wr;
2269                 return -EINVAL;
2270         }
2271         while (wr) {
2272                 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2273                     wr->num_sge > qp->rq.max_sges) {
2274                         *bad_wr = wr;
2275                         status = -ENOMEM;
2276                         break;
2277                 }
2278                 rqe = ocrdma_hwq_head(&qp->rq);
2279                 ocrdma_build_rqe(rqe, wr, 0);
2280
2281                 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2282                 /* make sure rqe is written before adapter can access it */
2283                 wmb();
2284
2285                 /* inform hw to start processing it */
2286                 ocrdma_ring_rq_db(qp);
2287
2288                 /* update pointer, counter for next wr */
2289                 ocrdma_hwq_inc_head(&qp->rq);
2290                 wr = wr->next;
2291         }
2292         spin_unlock_irqrestore(&qp->q_lock, flags);
2293         return status;
2294 }
2295
2296 /* cqe for srq's rqe can potentially arrive out of order.
2297  * index gives the entry in the shadow table where to store
2298  * the wr_id. tag/index is returned in cqe to reference back
2299  * for a given rqe.
2300  */
2301 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2302 {
2303         int row = 0;
2304         int indx = 0;
2305
2306         for (row = 0; row < srq->bit_fields_len; row++) {
2307                 if (srq->idx_bit_fields[row]) {
2308                         indx = ffs(srq->idx_bit_fields[row]);
2309                         indx = (row * 32) + (indx - 1);
2310                         BUG_ON(indx >= srq->rq.max_cnt);
2311                         ocrdma_srq_toggle_bit(srq, indx);
2312                         break;
2313                 }
2314         }
2315
2316         BUG_ON(row == srq->bit_fields_len);
2317         return indx + 1; /* Use from index 1 */
2318 }
2319
2320 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2321 {
2322         u32 val = srq->rq.dbid | (1 << 16);
2323
2324         iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2325 }
2326
2327 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2328                          const struct ib_recv_wr **bad_wr)
2329 {
2330         int status = 0;
2331         unsigned long flags;
2332         struct ocrdma_srq *srq;
2333         struct ocrdma_hdr_wqe *rqe;
2334         u16 tag;
2335
2336         srq = get_ocrdma_srq(ibsrq);
2337
2338         spin_lock_irqsave(&srq->q_lock, flags);
2339         while (wr) {
2340                 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2341                     wr->num_sge > srq->rq.max_sges) {
2342                         status = -ENOMEM;
2343                         *bad_wr = wr;
2344                         break;
2345                 }
2346                 tag = ocrdma_srq_get_idx(srq);
2347                 rqe = ocrdma_hwq_head(&srq->rq);
2348                 ocrdma_build_rqe(rqe, wr, tag);
2349
2350                 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2351                 /* make sure rqe is written before adapter can perform DMA */
2352                 wmb();
2353                 /* inform hw to start processing it */
2354                 ocrdma_ring_srq_db(srq);
2355                 /* update pointer, counter for next wr */
2356                 ocrdma_hwq_inc_head(&srq->rq);
2357                 wr = wr->next;
2358         }
2359         spin_unlock_irqrestore(&srq->q_lock, flags);
2360         return status;
2361 }
2362
2363 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2364 {
2365         enum ib_wc_status ibwc_status;
2366
2367         switch (status) {
2368         case OCRDMA_CQE_GENERAL_ERR:
2369                 ibwc_status = IB_WC_GENERAL_ERR;
2370                 break;
2371         case OCRDMA_CQE_LOC_LEN_ERR:
2372                 ibwc_status = IB_WC_LOC_LEN_ERR;
2373                 break;
2374         case OCRDMA_CQE_LOC_QP_OP_ERR:
2375                 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2376                 break;
2377         case OCRDMA_CQE_LOC_EEC_OP_ERR:
2378                 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2379                 break;
2380         case OCRDMA_CQE_LOC_PROT_ERR:
2381                 ibwc_status = IB_WC_LOC_PROT_ERR;
2382                 break;
2383         case OCRDMA_CQE_WR_FLUSH_ERR:
2384                 ibwc_status = IB_WC_WR_FLUSH_ERR;
2385                 break;
2386         case OCRDMA_CQE_MW_BIND_ERR:
2387                 ibwc_status = IB_WC_MW_BIND_ERR;
2388                 break;
2389         case OCRDMA_CQE_BAD_RESP_ERR:
2390                 ibwc_status = IB_WC_BAD_RESP_ERR;
2391                 break;
2392         case OCRDMA_CQE_LOC_ACCESS_ERR:
2393                 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2394                 break;
2395         case OCRDMA_CQE_REM_INV_REQ_ERR:
2396                 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2397                 break;
2398         case OCRDMA_CQE_REM_ACCESS_ERR:
2399                 ibwc_status = IB_WC_REM_ACCESS_ERR;
2400                 break;
2401         case OCRDMA_CQE_REM_OP_ERR:
2402                 ibwc_status = IB_WC_REM_OP_ERR;
2403                 break;
2404         case OCRDMA_CQE_RETRY_EXC_ERR:
2405                 ibwc_status = IB_WC_RETRY_EXC_ERR;
2406                 break;
2407         case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2408                 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2409                 break;
2410         case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2411                 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2412                 break;
2413         case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2414                 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2415                 break;
2416         case OCRDMA_CQE_REM_ABORT_ERR:
2417                 ibwc_status = IB_WC_REM_ABORT_ERR;
2418                 break;
2419         case OCRDMA_CQE_INV_EECN_ERR:
2420                 ibwc_status = IB_WC_INV_EECN_ERR;
2421                 break;
2422         case OCRDMA_CQE_INV_EEC_STATE_ERR:
2423                 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2424                 break;
2425         case OCRDMA_CQE_FATAL_ERR:
2426                 ibwc_status = IB_WC_FATAL_ERR;
2427                 break;
2428         case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2429                 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2430                 break;
2431         default:
2432                 ibwc_status = IB_WC_GENERAL_ERR;
2433                 break;
2434         }
2435         return ibwc_status;
2436 }
2437
2438 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2439                       u32 wqe_idx)
2440 {
2441         struct ocrdma_hdr_wqe *hdr;
2442         struct ocrdma_sge *rw;
2443         int opcode;
2444
2445         hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2446
2447         ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2448         /* Undo the hdr->cw swap */
2449         opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2450         switch (opcode) {
2451         case OCRDMA_WRITE:
2452                 ibwc->opcode = IB_WC_RDMA_WRITE;
2453                 break;
2454         case OCRDMA_READ:
2455                 rw = (struct ocrdma_sge *)(hdr + 1);
2456                 ibwc->opcode = IB_WC_RDMA_READ;
2457                 ibwc->byte_len = rw->len;
2458                 break;
2459         case OCRDMA_SEND:
2460                 ibwc->opcode = IB_WC_SEND;
2461                 break;
2462         case OCRDMA_FR_MR:
2463                 ibwc->opcode = IB_WC_REG_MR;
2464                 break;
2465         case OCRDMA_LKEY_INV:
2466                 ibwc->opcode = IB_WC_LOCAL_INV;
2467                 break;
2468         default:
2469                 ibwc->status = IB_WC_GENERAL_ERR;
2470                 pr_err("%s() invalid opcode received = 0x%x\n",
2471                        __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2472                 break;
2473         }
2474 }
2475
2476 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2477                                                 struct ocrdma_cqe *cqe)
2478 {
2479         if (is_cqe_for_sq(cqe)) {
2480                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2481                                 cqe->flags_status_srcqpn) &
2482                                         ~OCRDMA_CQE_STATUS_MASK);
2483                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2484                                 cqe->flags_status_srcqpn) |
2485                                 (OCRDMA_CQE_WR_FLUSH_ERR <<
2486                                         OCRDMA_CQE_STATUS_SHIFT));
2487         } else {
2488                 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2489                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2490                                         cqe->flags_status_srcqpn) &
2491                                                 ~OCRDMA_CQE_UD_STATUS_MASK);
2492                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2493                                         cqe->flags_status_srcqpn) |
2494                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2495                                                 OCRDMA_CQE_UD_STATUS_SHIFT));
2496                 } else {
2497                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2498                                         cqe->flags_status_srcqpn) &
2499                                                 ~OCRDMA_CQE_STATUS_MASK);
2500                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2501                                         cqe->flags_status_srcqpn) |
2502                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2503                                                 OCRDMA_CQE_STATUS_SHIFT));
2504                 }
2505         }
2506 }
2507
2508 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2509                                   struct ocrdma_qp *qp, int status)
2510 {
2511         bool expand = false;
2512
2513         ibwc->byte_len = 0;
2514         ibwc->qp = &qp->ibqp;
2515         ibwc->status = ocrdma_to_ibwc_err(status);
2516
2517         ocrdma_flush_qp(qp);
2518         ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2519
2520         /* if wqe/rqe pending for which cqe needs to be returned,
2521          * trigger inflating it.
2522          */
2523         if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2524                 expand = true;
2525                 ocrdma_set_cqe_status_flushed(qp, cqe);
2526         }
2527         return expand;
2528 }
2529
2530 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2531                                   struct ocrdma_qp *qp, int status)
2532 {
2533         ibwc->opcode = IB_WC_RECV;
2534         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2535         ocrdma_hwq_inc_tail(&qp->rq);
2536
2537         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2538 }
2539
2540 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2541                                   struct ocrdma_qp *qp, int status)
2542 {
2543         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2544         ocrdma_hwq_inc_tail(&qp->sq);
2545
2546         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2547 }
2548
2549
2550 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2551                                  struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2552                                  bool *polled, bool *stop)
2553 {
2554         bool expand;
2555         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2556         int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2557                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2558         if (status < OCRDMA_MAX_CQE_ERR)
2559                 atomic_inc(&dev->cqe_err_stats[status]);
2560
2561         /* when hw sq is empty, but rq is not empty, so we continue
2562          * to keep the cqe in order to get the cq event again.
2563          */
2564         if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2565                 /* when cq for rq and sq is same, it is safe to return
2566                  * flush cqe for RQEs.
2567                  */
2568                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2569                         *polled = true;
2570                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2571                         expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2572                 } else {
2573                         /* stop processing further cqe as this cqe is used for
2574                          * triggering cq event on buddy cq of RQ.
2575                          * When QP is destroyed, this cqe will be removed
2576                          * from the cq's hardware q.
2577                          */
2578                         *polled = false;
2579                         *stop = true;
2580                         expand = false;
2581                 }
2582         } else if (is_hw_sq_empty(qp)) {
2583                 /* Do nothing */
2584                 expand = false;
2585                 *polled = false;
2586                 *stop = false;
2587         } else {
2588                 *polled = true;
2589                 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2590         }
2591         return expand;
2592 }
2593
2594 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2595                                      struct ocrdma_cqe *cqe,
2596                                      struct ib_wc *ibwc, bool *polled)
2597 {
2598         bool expand = false;
2599         int tail = qp->sq.tail;
2600         u32 wqe_idx;
2601
2602         if (!qp->wqe_wr_id_tbl[tail].signaled) {
2603                 *polled = false;    /* WC cannot be consumed yet */
2604         } else {
2605                 ibwc->status = IB_WC_SUCCESS;
2606                 ibwc->wc_flags = 0;
2607                 ibwc->qp = &qp->ibqp;
2608                 ocrdma_update_wc(qp, ibwc, tail);
2609                 *polled = true;
2610         }
2611         wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2612                         OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2613         if (tail != wqe_idx)
2614                 expand = true; /* Coalesced CQE can't be consumed yet */
2615
2616         ocrdma_hwq_inc_tail(&qp->sq);
2617         return expand;
2618 }
2619
2620 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2621                              struct ib_wc *ibwc, bool *polled, bool *stop)
2622 {
2623         int status;
2624         bool expand;
2625
2626         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2627                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2628
2629         if (status == OCRDMA_CQE_SUCCESS)
2630                 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2631         else
2632                 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2633         return expand;
2634 }
2635
2636 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
2637                                  struct ocrdma_cqe *cqe)
2638 {
2639         int status;
2640         u16 hdr_type = 0;
2641
2642         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2643                 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2644         ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2645                                                 OCRDMA_CQE_SRCQP_MASK;
2646         ibwc->pkey_index = 0;
2647         ibwc->wc_flags = IB_WC_GRH;
2648         ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2649                           OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
2650                           OCRDMA_CQE_UD_XFER_LEN_MASK;
2651
2652         if (ocrdma_is_udp_encap_supported(dev)) {
2653                 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2654                             OCRDMA_CQE_UD_L3TYPE_SHIFT) &
2655                             OCRDMA_CQE_UD_L3TYPE_MASK;
2656                 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2657                 ibwc->network_hdr_type = hdr_type;
2658         }
2659
2660         return status;
2661 }
2662
2663 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2664                                        struct ocrdma_cqe *cqe,
2665                                        struct ocrdma_qp *qp)
2666 {
2667         unsigned long flags;
2668         struct ocrdma_srq *srq;
2669         u32 wqe_idx;
2670
2671         srq = get_ocrdma_srq(qp->ibqp.srq);
2672         wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2673                 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2674         BUG_ON(wqe_idx < 1);
2675
2676         ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2677         spin_lock_irqsave(&srq->q_lock, flags);
2678         ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2679         spin_unlock_irqrestore(&srq->q_lock, flags);
2680         ocrdma_hwq_inc_tail(&srq->rq);
2681 }
2682
2683 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2684                                 struct ib_wc *ibwc, bool *polled, bool *stop,
2685                                 int status)
2686 {
2687         bool expand;
2688         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2689
2690         if (status < OCRDMA_MAX_CQE_ERR)
2691                 atomic_inc(&dev->cqe_err_stats[status]);
2692
2693         /* when hw_rq is empty, but wq is not empty, so continue
2694          * to keep the cqe to get the cq event again.
2695          */
2696         if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2697                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2698                         *polled = true;
2699                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2700                         expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2701                 } else {
2702                         *polled = false;
2703                         *stop = true;
2704                         expand = false;
2705                 }
2706         } else if (is_hw_rq_empty(qp)) {
2707                 /* Do nothing */
2708                 expand = false;
2709                 *polled = false;
2710                 *stop = false;
2711         } else {
2712                 *polled = true;
2713                 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2714         }
2715         return expand;
2716 }
2717
2718 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2719                                      struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2720 {
2721         struct ocrdma_dev *dev;
2722
2723         dev = get_ocrdma_dev(qp->ibqp.device);
2724         ibwc->opcode = IB_WC_RECV;
2725         ibwc->qp = &qp->ibqp;
2726         ibwc->status = IB_WC_SUCCESS;
2727
2728         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2729                 ocrdma_update_ud_rcqe(dev, ibwc, cqe);
2730         else
2731                 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2732
2733         if (is_cqe_imm(cqe)) {
2734                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2735                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2736         } else if (is_cqe_wr_imm(cqe)) {
2737                 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2738                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2739                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2740         } else if (is_cqe_invalidated(cqe)) {
2741                 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2742                 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2743         }
2744         if (qp->ibqp.srq) {
2745                 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2746         } else {
2747                 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2748                 ocrdma_hwq_inc_tail(&qp->rq);
2749         }
2750 }
2751
2752 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2753                              struct ib_wc *ibwc, bool *polled, bool *stop)
2754 {
2755         int status;
2756         bool expand = false;
2757
2758         ibwc->wc_flags = 0;
2759         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2760                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2761                                         OCRDMA_CQE_UD_STATUS_MASK) >>
2762                                         OCRDMA_CQE_UD_STATUS_SHIFT;
2763         } else {
2764                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2765                              OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2766         }
2767
2768         if (status == OCRDMA_CQE_SUCCESS) {
2769                 *polled = true;
2770                 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2771         } else {
2772                 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2773                                               status);
2774         }
2775         return expand;
2776 }
2777
2778 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2779                                    u16 cur_getp)
2780 {
2781         if (cq->phase_change) {
2782                 if (cur_getp == 0)
2783                         cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2784         } else {
2785                 /* clear valid bit */
2786                 cqe->flags_status_srcqpn = 0;
2787         }
2788 }
2789
2790 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2791                             struct ib_wc *ibwc)
2792 {
2793         u16 qpn = 0;
2794         int i = 0;
2795         bool expand = false;
2796         int polled_hw_cqes = 0;
2797         struct ocrdma_qp *qp = NULL;
2798         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2799         struct ocrdma_cqe *cqe;
2800         u16 cur_getp; bool polled = false; bool stop = false;
2801
2802         cur_getp = cq->getp;
2803         while (num_entries) {
2804                 cqe = cq->va + cur_getp;
2805                 /* check whether valid cqe or not */
2806                 if (!is_cqe_valid(cq, cqe))
2807                         break;
2808                 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2809                 /* ignore discarded cqe */
2810                 if (qpn == 0)
2811                         goto skip_cqe;
2812                 qp = dev->qp_tbl[qpn];
2813                 BUG_ON(qp == NULL);
2814
2815                 if (is_cqe_for_sq(cqe)) {
2816                         expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2817                                                   &stop);
2818                 } else {
2819                         expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2820                                                   &stop);
2821                 }
2822                 if (expand)
2823                         goto expand_cqe;
2824                 if (stop)
2825                         goto stop_cqe;
2826                 /* clear qpn to avoid duplicate processing by discard_cqe() */
2827                 cqe->cmn.qpn = 0;
2828 skip_cqe:
2829                 polled_hw_cqes += 1;
2830                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2831                 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2832 expand_cqe:
2833                 if (polled) {
2834                         num_entries -= 1;
2835                         i += 1;
2836                         ibwc = ibwc + 1;
2837                         polled = false;
2838                 }
2839         }
2840 stop_cqe:
2841         cq->getp = cur_getp;
2842
2843         if (polled_hw_cqes)
2844                 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2845
2846         return i;
2847 }
2848
2849 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2850 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2851                               struct ocrdma_qp *qp, struct ib_wc *ibwc)
2852 {
2853         int err_cqes = 0;
2854
2855         while (num_entries) {
2856                 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2857                         break;
2858                 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2859                         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2860                         ocrdma_hwq_inc_tail(&qp->sq);
2861                 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2862                         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2863                         ocrdma_hwq_inc_tail(&qp->rq);
2864                 } else {
2865                         return err_cqes;
2866                 }
2867                 ibwc->byte_len = 0;
2868                 ibwc->status = IB_WC_WR_FLUSH_ERR;
2869                 ibwc = ibwc + 1;
2870                 err_cqes += 1;
2871                 num_entries -= 1;
2872         }
2873         return err_cqes;
2874 }
2875
2876 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2877 {
2878         int cqes_to_poll = num_entries;
2879         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2880         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2881         int num_os_cqe = 0, err_cqes = 0;
2882         struct ocrdma_qp *qp;
2883         unsigned long flags;
2884
2885         /* poll cqes from adapter CQ */
2886         spin_lock_irqsave(&cq->cq_lock, flags);
2887         num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2888         spin_unlock_irqrestore(&cq->cq_lock, flags);
2889         cqes_to_poll -= num_os_cqe;
2890
2891         if (cqes_to_poll) {
2892                 wc = wc + num_os_cqe;
2893                 /* adapter returns single error cqe when qp moves to
2894                  * error state. So insert error cqes with wc_status as
2895                  * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2896                  * respectively which uses this CQ.
2897                  */
2898                 spin_lock_irqsave(&dev->flush_q_lock, flags);
2899                 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2900                         if (cqes_to_poll == 0)
2901                                 break;
2902                         err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2903                         cqes_to_poll -= err_cqes;
2904                         num_os_cqe += err_cqes;
2905                         wc = wc + err_cqes;
2906                 }
2907                 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2908         }
2909         return num_os_cqe;
2910 }
2911
2912 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2913 {
2914         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2915         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2916         u16 cq_id;
2917         unsigned long flags;
2918         bool arm_needed = false, sol_needed = false;
2919
2920         cq_id = cq->id;
2921
2922         spin_lock_irqsave(&cq->cq_lock, flags);
2923         if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2924                 arm_needed = true;
2925         if (cq_flags & IB_CQ_SOLICITED)
2926                 sol_needed = true;
2927
2928         ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2929         spin_unlock_irqrestore(&cq->cq_lock, flags);
2930
2931         return 0;
2932 }
2933
2934 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
2935                               enum ib_mr_type mr_type,
2936                               u32 max_num_sg)
2937 {
2938         int status;
2939         struct ocrdma_mr *mr;
2940         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2941         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2942
2943         if (mr_type != IB_MR_TYPE_MEM_REG)
2944                 return ERR_PTR(-EINVAL);
2945
2946         if (max_num_sg > dev->attr.max_pages_per_frmr)
2947                 return ERR_PTR(-EINVAL);
2948
2949         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2950         if (!mr)
2951                 return ERR_PTR(-ENOMEM);
2952
2953         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
2954         if (!mr->pages) {
2955                 status = -ENOMEM;
2956                 goto pl_err;
2957         }
2958
2959         status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
2960         if (status)
2961                 goto pbl_err;
2962         mr->hwmr.fr_mr = 1;
2963         mr->hwmr.remote_rd = 0;
2964         mr->hwmr.remote_wr = 0;
2965         mr->hwmr.local_rd = 0;
2966         mr->hwmr.local_wr = 0;
2967         mr->hwmr.mw_bind = 0;
2968         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2969         if (status)
2970                 goto pbl_err;
2971         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2972         if (status)
2973                 goto mbx_err;
2974         mr->ibmr.rkey = mr->hwmr.lkey;
2975         mr->ibmr.lkey = mr->hwmr.lkey;
2976         dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
2977                 (unsigned long) mr;
2978         return &mr->ibmr;
2979 mbx_err:
2980         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2981 pbl_err:
2982         kfree(mr->pages);
2983 pl_err:
2984         kfree(mr);
2985         return ERR_PTR(-ENOMEM);
2986 }
2987
2988 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
2989 {
2990         struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2991
2992         if (unlikely(mr->npages == mr->hwmr.num_pbes))
2993                 return -ENOMEM;
2994
2995         mr->pages[mr->npages++] = addr;
2996
2997         return 0;
2998 }
2999
3000 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
3001                      unsigned int *sg_offset)
3002 {
3003         struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
3004
3005         mr->npages = 0;
3006
3007         return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
3008 }