RDMA/ocrdma: Fix memory leak in _ocrdma_alloc_pd()
[linux-2.6-microblaze.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
1 /*******************************************************************
2  * This file is part of the Emulex RoCE Device Driver for          *
3  * RoCE (RDMA over Converged Ethernet) adapters.                   *
4  * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *
20  * Contact Information:
21  * linux-drivers@emulex.com
22  *
23  * Emulex
24  * 3333 Susan Street
25  * Costa Mesa, CA 92626
26  *******************************************************************/
27
28 #include <linux/dma-mapping.h>
29 #include <rdma/ib_verbs.h>
30 #include <rdma/ib_user_verbs.h>
31 #include <rdma/iw_cm.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_addr.h>
34
35 #include "ocrdma.h"
36 #include "ocrdma_hw.h"
37 #include "ocrdma_verbs.h"
38 #include "ocrdma_abi.h"
39
40 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41 {
42         if (index > 1)
43                 return -EINVAL;
44
45         *pkey = 0xffff;
46         return 0;
47 }
48
49 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50                      int index, union ib_gid *sgid)
51 {
52         struct ocrdma_dev *dev;
53
54         dev = get_ocrdma_dev(ibdev);
55         memset(sgid, 0, sizeof(*sgid));
56         if (index >= OCRDMA_MAX_SGID)
57                 return -EINVAL;
58
59         memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61         return 0;
62 }
63
64 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65 {
66         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68         memset(attr, 0, sizeof *attr);
69         memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70                min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71         ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72         attr->max_mr_size = dev->attr.max_mr_size;
73         attr->page_size_cap = 0xffff000;
74         attr->vendor_id = dev->nic_info.pdev->vendor;
75         attr->vendor_part_id = dev->nic_info.pdev->device;
76         attr->hw_ver = dev->asic_id;
77         attr->max_qp = dev->attr.max_qp;
78         attr->max_ah = OCRDMA_MAX_AH;
79         attr->max_qp_wr = dev->attr.max_wqe;
80
81         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82                                         IB_DEVICE_RC_RNR_NAK_GEN |
83                                         IB_DEVICE_SHUTDOWN_PORT |
84                                         IB_DEVICE_SYS_IMAGE_GUID |
85                                         IB_DEVICE_LOCAL_DMA_LKEY |
86                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
87         attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
88         attr->max_sge_rd = 0;
89         attr->max_cq = dev->attr.max_cq;
90         attr->max_cqe = dev->attr.max_cqe;
91         attr->max_mr = dev->attr.max_mr;
92         attr->max_mw = dev->attr.max_mw;
93         attr->max_pd = dev->attr.max_pd;
94         attr->atomic_cap = 0;
95         attr->max_fmr = 0;
96         attr->max_map_per_fmr = 0;
97         attr->max_qp_rd_atom =
98             min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99         attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
100         attr->max_srq = dev->attr.max_srq;
101         attr->max_srq_sge = dev->attr.max_srq_sge;
102         attr->max_srq_wr = dev->attr.max_rqe;
103         attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104         attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
105         attr->max_pkeys = 1;
106         return 0;
107 }
108
109 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
110                                             u8 *ib_speed, u8 *ib_width)
111 {
112         int status;
113         u8 speed;
114
115         status = ocrdma_mbx_get_link_speed(dev, &speed);
116         if (status)
117                 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
118
119         switch (speed) {
120         case OCRDMA_PHYS_LINK_SPEED_1GBPS:
121                 *ib_speed = IB_SPEED_SDR;
122                 *ib_width = IB_WIDTH_1X;
123                 break;
124
125         case OCRDMA_PHYS_LINK_SPEED_10GBPS:
126                 *ib_speed = IB_SPEED_QDR;
127                 *ib_width = IB_WIDTH_1X;
128                 break;
129
130         case OCRDMA_PHYS_LINK_SPEED_20GBPS:
131                 *ib_speed = IB_SPEED_DDR;
132                 *ib_width = IB_WIDTH_4X;
133                 break;
134
135         case OCRDMA_PHYS_LINK_SPEED_40GBPS:
136                 *ib_speed = IB_SPEED_QDR;
137                 *ib_width = IB_WIDTH_4X;
138                 break;
139
140         default:
141                 /* Unsupported */
142                 *ib_speed = IB_SPEED_SDR;
143                 *ib_width = IB_WIDTH_1X;
144         }
145 }
146
147 int ocrdma_query_port(struct ib_device *ibdev,
148                       u8 port, struct ib_port_attr *props)
149 {
150         enum ib_port_state port_state;
151         struct ocrdma_dev *dev;
152         struct net_device *netdev;
153
154         dev = get_ocrdma_dev(ibdev);
155         if (port > 1) {
156                 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
157                        dev->id, port);
158                 return -EINVAL;
159         }
160         netdev = dev->nic_info.netdev;
161         if (netif_running(netdev) && netif_oper_up(netdev)) {
162                 port_state = IB_PORT_ACTIVE;
163                 props->phys_state = 5;
164         } else {
165                 port_state = IB_PORT_DOWN;
166                 props->phys_state = 3;
167         }
168         props->max_mtu = IB_MTU_4096;
169         props->active_mtu = iboe_get_mtu(netdev->mtu);
170         props->lid = 0;
171         props->lmc = 0;
172         props->sm_lid = 0;
173         props->sm_sl = 0;
174         props->state = port_state;
175         props->port_cap_flags =
176             IB_PORT_CM_SUP |
177             IB_PORT_REINIT_SUP |
178             IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
179         props->gid_tbl_len = OCRDMA_MAX_SGID;
180         props->pkey_tbl_len = 1;
181         props->bad_pkey_cntr = 0;
182         props->qkey_viol_cntr = 0;
183         get_link_speed_and_width(dev, &props->active_speed,
184                                  &props->active_width);
185         props->max_msg_sz = 0x80000000;
186         props->max_vl_num = 4;
187         return 0;
188 }
189
190 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
191                        struct ib_port_modify *props)
192 {
193         struct ocrdma_dev *dev;
194
195         dev = get_ocrdma_dev(ibdev);
196         if (port > 1) {
197                 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
198                 return -EINVAL;
199         }
200         return 0;
201 }
202
203 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
204                            unsigned long len)
205 {
206         struct ocrdma_mm *mm;
207
208         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
209         if (mm == NULL)
210                 return -ENOMEM;
211         mm->key.phy_addr = phy_addr;
212         mm->key.len = len;
213         INIT_LIST_HEAD(&mm->entry);
214
215         mutex_lock(&uctx->mm_list_lock);
216         list_add_tail(&mm->entry, &uctx->mm_head);
217         mutex_unlock(&uctx->mm_list_lock);
218         return 0;
219 }
220
221 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
222                             unsigned long len)
223 {
224         struct ocrdma_mm *mm, *tmp;
225
226         mutex_lock(&uctx->mm_list_lock);
227         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
228                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
229                         continue;
230
231                 list_del(&mm->entry);
232                 kfree(mm);
233                 break;
234         }
235         mutex_unlock(&uctx->mm_list_lock);
236 }
237
238 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
239                               unsigned long len)
240 {
241         bool found = false;
242         struct ocrdma_mm *mm;
243
244         mutex_lock(&uctx->mm_list_lock);
245         list_for_each_entry(mm, &uctx->mm_head, entry) {
246                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
247                         continue;
248
249                 found = true;
250                 break;
251         }
252         mutex_unlock(&uctx->mm_list_lock);
253         return found;
254 }
255
256
257 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
258 {
259         u16 pd_bitmap_idx = 0;
260         const unsigned long *pd_bitmap;
261
262         if (dpp_pool) {
263                 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265                                                     dev->pd_mgr->max_dpp_pd);
266                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267                 dev->pd_mgr->pd_dpp_count++;
268                 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269                         dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
270         } else {
271                 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273                                                     dev->pd_mgr->max_normal_pd);
274                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275                 dev->pd_mgr->pd_norm_count++;
276                 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277                         dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
278         }
279         return pd_bitmap_idx;
280 }
281
282 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
283                                         bool dpp_pool)
284 {
285         u16 pd_count;
286         u16 pd_bit_index;
287
288         pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289                               dev->pd_mgr->pd_norm_count;
290         if (pd_count == 0)
291                 return -EINVAL;
292
293         if (dpp_pool) {
294                 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295                 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
296                         return -EINVAL;
297                 } else {
298                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299                         dev->pd_mgr->pd_dpp_count--;
300                 }
301         } else {
302                 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303                 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
304                         return -EINVAL;
305                 } else {
306                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307                         dev->pd_mgr->pd_norm_count--;
308                 }
309         }
310
311         return 0;
312 }
313
314 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315                                    bool dpp_pool)
316 {
317         int status;
318
319         mutex_lock(&dev->dev_lock);
320         status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321         mutex_unlock(&dev->dev_lock);
322         return status;
323 }
324
325 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326 {
327         u16 pd_idx = 0;
328         int status = 0;
329
330         mutex_lock(&dev->dev_lock);
331         if (pd->dpp_enabled) {
332                 /* try allocating DPP PD, if not available then normal PD */
333                 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335                         pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336                         pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337                 } else if (dev->pd_mgr->pd_norm_count <
338                            dev->pd_mgr->max_normal_pd) {
339                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341                         pd->dpp_enabled = false;
342                 } else {
343                         status = -EINVAL;
344                 }
345         } else {
346                 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349                 } else {
350                         status = -EINVAL;
351                 }
352         }
353         mutex_unlock(&dev->dev_lock);
354         return status;
355 }
356
357 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
358                                           struct ocrdma_ucontext *uctx,
359                                           struct ib_udata *udata)
360 {
361         struct ocrdma_pd *pd = NULL;
362         int status = 0;
363
364         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
365         if (!pd)
366                 return ERR_PTR(-ENOMEM);
367
368         if (udata && uctx) {
369                 pd->dpp_enabled =
370                         ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
371                 pd->num_dpp_qp =
372                         pd->dpp_enabled ? (dev->nic_info.db_page_size /
373                                            dev->attr.wqe_size) : 0;
374         }
375
376         if (dev->pd_mgr->pd_prealloc_valid) {
377                 status = ocrdma_get_pd_num(dev, pd);
378                 if (status == 0) {
379                         return pd;
380                 } else {
381                         kfree(pd);
382                         return ERR_PTR(status);
383                 }
384         }
385
386 retry:
387         status = ocrdma_mbx_alloc_pd(dev, pd);
388         if (status) {
389                 if (pd->dpp_enabled) {
390                         pd->dpp_enabled = false;
391                         pd->num_dpp_qp = 0;
392                         goto retry;
393                 } else {
394                         kfree(pd);
395                         return ERR_PTR(status);
396                 }
397         }
398
399         return pd;
400 }
401
402 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
403                                  struct ocrdma_pd *pd)
404 {
405         return (uctx->cntxt_pd == pd ? true : false);
406 }
407
408 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
409                               struct ocrdma_pd *pd)
410 {
411         int status = 0;
412
413         if (dev->pd_mgr->pd_prealloc_valid)
414                 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
415         else
416                 status = ocrdma_mbx_dealloc_pd(dev, pd);
417
418         kfree(pd);
419         return status;
420 }
421
422 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
423                                     struct ocrdma_ucontext *uctx,
424                                     struct ib_udata *udata)
425 {
426         int status = 0;
427
428         uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
429         if (IS_ERR(uctx->cntxt_pd)) {
430                 status = PTR_ERR(uctx->cntxt_pd);
431                 uctx->cntxt_pd = NULL;
432                 goto err;
433         }
434
435         uctx->cntxt_pd->uctx = uctx;
436         uctx->cntxt_pd->ibpd.device = &dev->ibdev;
437 err:
438         return status;
439 }
440
441 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
442 {
443         struct ocrdma_pd *pd = uctx->cntxt_pd;
444         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
445
446         if (uctx->pd_in_use) {
447                 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
448                        __func__, dev->id, pd->id);
449         }
450         uctx->cntxt_pd = NULL;
451         (void)_ocrdma_dealloc_pd(dev, pd);
452         return 0;
453 }
454
455 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
456 {
457         struct ocrdma_pd *pd = NULL;
458
459         mutex_lock(&uctx->mm_list_lock);
460         if (!uctx->pd_in_use) {
461                 uctx->pd_in_use = true;
462                 pd = uctx->cntxt_pd;
463         }
464         mutex_unlock(&uctx->mm_list_lock);
465
466         return pd;
467 }
468
469 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
470 {
471         mutex_lock(&uctx->mm_list_lock);
472         uctx->pd_in_use = false;
473         mutex_unlock(&uctx->mm_list_lock);
474 }
475
476 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
477                                           struct ib_udata *udata)
478 {
479         int status;
480         struct ocrdma_ucontext *ctx;
481         struct ocrdma_alloc_ucontext_resp resp;
482         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
483         struct pci_dev *pdev = dev->nic_info.pdev;
484         u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
485
486         if (!udata)
487                 return ERR_PTR(-EFAULT);
488         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
489         if (!ctx)
490                 return ERR_PTR(-ENOMEM);
491         INIT_LIST_HEAD(&ctx->mm_head);
492         mutex_init(&ctx->mm_list_lock);
493
494         ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
495                                             &ctx->ah_tbl.pa, GFP_KERNEL);
496         if (!ctx->ah_tbl.va) {
497                 kfree(ctx);
498                 return ERR_PTR(-ENOMEM);
499         }
500         memset(ctx->ah_tbl.va, 0, map_len);
501         ctx->ah_tbl.len = map_len;
502
503         memset(&resp, 0, sizeof(resp));
504         resp.ah_tbl_len = ctx->ah_tbl.len;
505         resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
506
507         status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
508         if (status)
509                 goto map_err;
510
511         status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
512         if (status)
513                 goto pd_err;
514
515         resp.dev_id = dev->id;
516         resp.max_inline_data = dev->attr.max_inline_data;
517         resp.wqe_size = dev->attr.wqe_size;
518         resp.rqe_size = dev->attr.rqe_size;
519         resp.dpp_wqe_size = dev->attr.wqe_size;
520
521         memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
522         status = ib_copy_to_udata(udata, &resp, sizeof(resp));
523         if (status)
524                 goto cpy_err;
525         return &ctx->ibucontext;
526
527 cpy_err:
528 pd_err:
529         ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
530 map_err:
531         dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
532                           ctx->ah_tbl.pa);
533         kfree(ctx);
534         return ERR_PTR(status);
535 }
536
537 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
538 {
539         int status = 0;
540         struct ocrdma_mm *mm, *tmp;
541         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
542         struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
543         struct pci_dev *pdev = dev->nic_info.pdev;
544
545         status = ocrdma_dealloc_ucontext_pd(uctx);
546
547         ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
548         dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
549                           uctx->ah_tbl.pa);
550
551         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
552                 list_del(&mm->entry);
553                 kfree(mm);
554         }
555         kfree(uctx);
556         return status;
557 }
558
559 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
560 {
561         struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
562         struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
563         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
564         u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
565         unsigned long len = (vma->vm_end - vma->vm_start);
566         int status = 0;
567         bool found;
568
569         if (vma->vm_start & (PAGE_SIZE - 1))
570                 return -EINVAL;
571         found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
572         if (!found)
573                 return -EINVAL;
574
575         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
576                 dev->nic_info.db_total_size)) &&
577                 (len <= dev->nic_info.db_page_size)) {
578                 if (vma->vm_flags & VM_READ)
579                         return -EPERM;
580
581                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
582                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
583                                             len, vma->vm_page_prot);
584         } else if (dev->nic_info.dpp_unmapped_len &&
585                 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
586                 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
587                         dev->nic_info.dpp_unmapped_len)) &&
588                 (len <= dev->nic_info.dpp_unmapped_len)) {
589                 if (vma->vm_flags & VM_READ)
590                         return -EPERM;
591
592                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
593                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
594                                             len, vma->vm_page_prot);
595         } else {
596                 status = remap_pfn_range(vma, vma->vm_start,
597                                          vma->vm_pgoff, len, vma->vm_page_prot);
598         }
599         return status;
600 }
601
602 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
603                                 struct ib_ucontext *ib_ctx,
604                                 struct ib_udata *udata)
605 {
606         int status;
607         u64 db_page_addr;
608         u64 dpp_page_addr = 0;
609         u32 db_page_size;
610         struct ocrdma_alloc_pd_uresp rsp;
611         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
612
613         memset(&rsp, 0, sizeof(rsp));
614         rsp.id = pd->id;
615         rsp.dpp_enabled = pd->dpp_enabled;
616         db_page_addr = ocrdma_get_db_addr(dev, pd->id);
617         db_page_size = dev->nic_info.db_page_size;
618
619         status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
620         if (status)
621                 return status;
622
623         if (pd->dpp_enabled) {
624                 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
625                                 (pd->id * PAGE_SIZE);
626                 status = ocrdma_add_mmap(uctx, dpp_page_addr,
627                                  PAGE_SIZE);
628                 if (status)
629                         goto dpp_map_err;
630                 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
631                 rsp.dpp_page_addr_lo = dpp_page_addr;
632         }
633
634         status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
635         if (status)
636                 goto ucopy_err;
637
638         pd->uctx = uctx;
639         return 0;
640
641 ucopy_err:
642         if (pd->dpp_enabled)
643                 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
644 dpp_map_err:
645         ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
646         return status;
647 }
648
649 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
650                               struct ib_ucontext *context,
651                               struct ib_udata *udata)
652 {
653         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
654         struct ocrdma_pd *pd;
655         struct ocrdma_ucontext *uctx = NULL;
656         int status;
657         u8 is_uctx_pd = false;
658
659         if (udata && context) {
660                 uctx = get_ocrdma_ucontext(context);
661                 pd = ocrdma_get_ucontext_pd(uctx);
662                 if (pd) {
663                         is_uctx_pd = true;
664                         goto pd_mapping;
665                 }
666         }
667
668         pd = _ocrdma_alloc_pd(dev, uctx, udata);
669         if (IS_ERR(pd)) {
670                 status = PTR_ERR(pd);
671                 goto exit;
672         }
673
674 pd_mapping:
675         if (udata && context) {
676                 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
677                 if (status)
678                         goto err;
679         }
680         return &pd->ibpd;
681
682 err:
683         if (is_uctx_pd) {
684                 ocrdma_release_ucontext_pd(uctx);
685         } else {
686                 status = _ocrdma_dealloc_pd(dev, pd);
687                 kfree(pd);
688         }
689 exit:
690         return ERR_PTR(status);
691 }
692
693 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
694 {
695         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
696         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
697         struct ocrdma_ucontext *uctx = NULL;
698         int status = 0;
699         u64 usr_db;
700
701         uctx = pd->uctx;
702         if (uctx) {
703                 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
704                         (pd->id * PAGE_SIZE);
705                 if (pd->dpp_enabled)
706                         ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
707                 usr_db = ocrdma_get_db_addr(dev, pd->id);
708                 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
709
710                 if (is_ucontext_pd(uctx, pd)) {
711                         ocrdma_release_ucontext_pd(uctx);
712                         return status;
713                 }
714         }
715         status = _ocrdma_dealloc_pd(dev, pd);
716         return status;
717 }
718
719 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
720                             u32 pdid, int acc, u32 num_pbls, u32 addr_check)
721 {
722         int status;
723
724         mr->hwmr.fr_mr = 0;
725         mr->hwmr.local_rd = 1;
726         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
727         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
728         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
729         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
730         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
731         mr->hwmr.num_pbls = num_pbls;
732
733         status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
734         if (status)
735                 return status;
736
737         mr->ibmr.lkey = mr->hwmr.lkey;
738         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
739                 mr->ibmr.rkey = mr->hwmr.lkey;
740         return 0;
741 }
742
743 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
744 {
745         int status;
746         struct ocrdma_mr *mr;
747         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
748         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
749
750         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
751                 pr_err("%s err, invalid access rights\n", __func__);
752                 return ERR_PTR(-EINVAL);
753         }
754
755         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
756         if (!mr)
757                 return ERR_PTR(-ENOMEM);
758
759         status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
760                                    OCRDMA_ADDR_CHECK_DISABLE);
761         if (status) {
762                 kfree(mr);
763                 return ERR_PTR(status);
764         }
765
766         return &mr->ibmr;
767 }
768
769 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
770                                    struct ocrdma_hw_mr *mr)
771 {
772         struct pci_dev *pdev = dev->nic_info.pdev;
773         int i = 0;
774
775         if (mr->pbl_table) {
776                 for (i = 0; i < mr->num_pbls; i++) {
777                         if (!mr->pbl_table[i].va)
778                                 continue;
779                         dma_free_coherent(&pdev->dev, mr->pbl_size,
780                                           mr->pbl_table[i].va,
781                                           mr->pbl_table[i].pa);
782                 }
783                 kfree(mr->pbl_table);
784                 mr->pbl_table = NULL;
785         }
786 }
787
788 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
789                               u32 num_pbes)
790 {
791         u32 num_pbls = 0;
792         u32 idx = 0;
793         int status = 0;
794         u32 pbl_size;
795
796         do {
797                 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
798                 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
799                         status = -EFAULT;
800                         break;
801                 }
802                 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
803                 num_pbls = num_pbls / (pbl_size / sizeof(u64));
804                 idx++;
805         } while (num_pbls >= dev->attr.max_num_mr_pbl);
806
807         mr->hwmr.num_pbes = num_pbes;
808         mr->hwmr.num_pbls = num_pbls;
809         mr->hwmr.pbl_size = pbl_size;
810         return status;
811 }
812
813 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
814 {
815         int status = 0;
816         int i;
817         u32 dma_len = mr->pbl_size;
818         struct pci_dev *pdev = dev->nic_info.pdev;
819         void *va;
820         dma_addr_t pa;
821
822         mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
823                                 mr->num_pbls, GFP_KERNEL);
824
825         if (!mr->pbl_table)
826                 return -ENOMEM;
827
828         for (i = 0; i < mr->num_pbls; i++) {
829                 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
830                 if (!va) {
831                         ocrdma_free_mr_pbl_tbl(dev, mr);
832                         status = -ENOMEM;
833                         break;
834                 }
835                 memset(va, 0, dma_len);
836                 mr->pbl_table[i].va = va;
837                 mr->pbl_table[i].pa = pa;
838         }
839         return status;
840 }
841
842 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
843                             u32 num_pbes)
844 {
845         struct ocrdma_pbe *pbe;
846         struct scatterlist *sg;
847         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
848         struct ib_umem *umem = mr->umem;
849         int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
850
851         if (!mr->hwmr.num_pbes)
852                 return;
853
854         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
855         pbe_cnt = 0;
856
857         shift = ilog2(umem->page_size);
858
859         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
860                 pages = sg_dma_len(sg) >> shift;
861                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
862                         /* store the page address in pbe */
863                         pbe->pa_lo =
864                             cpu_to_le32(sg_dma_address
865                                         (sg) +
866                                         (umem->page_size * pg_cnt));
867                         pbe->pa_hi =
868                             cpu_to_le32(upper_32_bits
869                                         ((sg_dma_address
870                                           (sg) +
871                                           umem->page_size * pg_cnt)));
872                         pbe_cnt += 1;
873                         total_num_pbes += 1;
874                         pbe++;
875
876                         /* if done building pbes, issue the mbx cmd. */
877                         if (total_num_pbes == num_pbes)
878                                 return;
879
880                         /* if the given pbl is full storing the pbes,
881                          * move to next pbl.
882                          */
883                         if (pbe_cnt ==
884                                 (mr->hwmr.pbl_size / sizeof(u64))) {
885                                 pbl_tbl++;
886                                 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
887                                 pbe_cnt = 0;
888                         }
889
890                 }
891         }
892 }
893
894 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
895                                  u64 usr_addr, int acc, struct ib_udata *udata)
896 {
897         int status = -ENOMEM;
898         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
899         struct ocrdma_mr *mr;
900         struct ocrdma_pd *pd;
901         u32 num_pbes;
902
903         pd = get_ocrdma_pd(ibpd);
904
905         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
906                 return ERR_PTR(-EINVAL);
907
908         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
909         if (!mr)
910                 return ERR_PTR(status);
911         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
912         if (IS_ERR(mr->umem)) {
913                 status = -EFAULT;
914                 goto umem_err;
915         }
916         num_pbes = ib_umem_page_count(mr->umem);
917         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
918         if (status)
919                 goto umem_err;
920
921         mr->hwmr.pbe_size = mr->umem->page_size;
922         mr->hwmr.fbo = ib_umem_offset(mr->umem);
923         mr->hwmr.va = usr_addr;
924         mr->hwmr.len = len;
925         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
926         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
927         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
928         mr->hwmr.local_rd = 1;
929         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
930         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
931         if (status)
932                 goto umem_err;
933         build_user_pbes(dev, mr, num_pbes);
934         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
935         if (status)
936                 goto mbx_err;
937         mr->ibmr.lkey = mr->hwmr.lkey;
938         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
939                 mr->ibmr.rkey = mr->hwmr.lkey;
940
941         return &mr->ibmr;
942
943 mbx_err:
944         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
945 umem_err:
946         kfree(mr);
947         return ERR_PTR(status);
948 }
949
950 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
951 {
952         struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
953         struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
954
955         (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
956
957         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
958
959         /* it could be user registered memory. */
960         if (mr->umem)
961                 ib_umem_release(mr->umem);
962         kfree(mr);
963
964         /* Don't stop cleanup, in case FW is unresponsive */
965         if (dev->mqe_ctx.fw_error_state) {
966                 pr_err("%s(%d) fw not responding.\n",
967                        __func__, dev->id);
968         }
969         return 0;
970 }
971
972 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
973                                 struct ib_udata *udata,
974                                 struct ib_ucontext *ib_ctx)
975 {
976         int status;
977         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
978         struct ocrdma_create_cq_uresp uresp;
979
980         memset(&uresp, 0, sizeof(uresp));
981         uresp.cq_id = cq->id;
982         uresp.page_size = PAGE_ALIGN(cq->len);
983         uresp.num_pages = 1;
984         uresp.max_hw_cqe = cq->max_hw_cqe;
985         uresp.page_addr[0] = virt_to_phys(cq->va);
986         uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
987         uresp.db_page_size = dev->nic_info.db_page_size;
988         uresp.phase_change = cq->phase_change ? 1 : 0;
989         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
990         if (status) {
991                 pr_err("%s(%d) copy error cqid=0x%x.\n",
992                        __func__, dev->id, cq->id);
993                 goto err;
994         }
995         status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
996         if (status)
997                 goto err;
998         status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
999         if (status) {
1000                 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1001                 goto err;
1002         }
1003         cq->ucontext = uctx;
1004 err:
1005         return status;
1006 }
1007
1008 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
1009                                struct ib_ucontext *ib_ctx,
1010                                struct ib_udata *udata)
1011 {
1012         struct ocrdma_cq *cq;
1013         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1014         struct ocrdma_ucontext *uctx = NULL;
1015         u16 pd_id = 0;
1016         int status;
1017         struct ocrdma_create_cq_ureq ureq;
1018
1019         if (udata) {
1020                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1021                         return ERR_PTR(-EFAULT);
1022         } else
1023                 ureq.dpp_cq = 0;
1024         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1025         if (!cq)
1026                 return ERR_PTR(-ENOMEM);
1027
1028         spin_lock_init(&cq->cq_lock);
1029         spin_lock_init(&cq->comp_handler_lock);
1030         INIT_LIST_HEAD(&cq->sq_head);
1031         INIT_LIST_HEAD(&cq->rq_head);
1032         cq->first_arm = true;
1033
1034         if (ib_ctx) {
1035                 uctx = get_ocrdma_ucontext(ib_ctx);
1036                 pd_id = uctx->cntxt_pd->id;
1037         }
1038
1039         status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1040         if (status) {
1041                 kfree(cq);
1042                 return ERR_PTR(status);
1043         }
1044         if (ib_ctx) {
1045                 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1046                 if (status)
1047                         goto ctx_err;
1048         }
1049         cq->phase = OCRDMA_CQE_VALID;
1050         dev->cq_tbl[cq->id] = cq;
1051         return &cq->ibcq;
1052
1053 ctx_err:
1054         ocrdma_mbx_destroy_cq(dev, cq);
1055         kfree(cq);
1056         return ERR_PTR(status);
1057 }
1058
1059 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1060                      struct ib_udata *udata)
1061 {
1062         int status = 0;
1063         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1064
1065         if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1066                 status = -EINVAL;
1067                 return status;
1068         }
1069         ibcq->cqe = new_cnt;
1070         return status;
1071 }
1072
1073 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1074 {
1075         int cqe_cnt;
1076         int valid_count = 0;
1077         unsigned long flags;
1078
1079         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1080         struct ocrdma_cqe *cqe = NULL;
1081
1082         cqe = cq->va;
1083         cqe_cnt = cq->cqe_cnt;
1084
1085         /* Last irq might have scheduled a polling thread
1086          * sync-up with it before hard flushing.
1087          */
1088         spin_lock_irqsave(&cq->cq_lock, flags);
1089         while (cqe_cnt) {
1090                 if (is_cqe_valid(cq, cqe))
1091                         valid_count++;
1092                 cqe++;
1093                 cqe_cnt--;
1094         }
1095         ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1096         spin_unlock_irqrestore(&cq->cq_lock, flags);
1097 }
1098
1099 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1100 {
1101         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1102         struct ocrdma_eq *eq = NULL;
1103         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1104         int pdid = 0;
1105         u32 irq, indx;
1106
1107         dev->cq_tbl[cq->id] = NULL;
1108         indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1109         if (indx == -EINVAL)
1110                 BUG();
1111
1112         eq = &dev->eq_tbl[indx];
1113         irq = ocrdma_get_irq(dev, eq);
1114         synchronize_irq(irq);
1115         ocrdma_flush_cq(cq);
1116
1117         (void)ocrdma_mbx_destroy_cq(dev, cq);
1118         if (cq->ucontext) {
1119                 pdid = cq->ucontext->cntxt_pd->id;
1120                 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1121                                 PAGE_ALIGN(cq->len));
1122                 ocrdma_del_mmap(cq->ucontext,
1123                                 ocrdma_get_db_addr(dev, pdid),
1124                                 dev->nic_info.db_page_size);
1125         }
1126
1127         kfree(cq);
1128         return 0;
1129 }
1130
1131 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1132 {
1133         int status = -EINVAL;
1134
1135         if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1136                 dev->qp_tbl[qp->id] = qp;
1137                 status = 0;
1138         }
1139         return status;
1140 }
1141
1142 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1143 {
1144         dev->qp_tbl[qp->id] = NULL;
1145 }
1146
1147 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1148                                   struct ib_qp_init_attr *attrs)
1149 {
1150         if ((attrs->qp_type != IB_QPT_GSI) &&
1151             (attrs->qp_type != IB_QPT_RC) &&
1152             (attrs->qp_type != IB_QPT_UC) &&
1153             (attrs->qp_type != IB_QPT_UD)) {
1154                 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1155                        __func__, dev->id, attrs->qp_type);
1156                 return -EINVAL;
1157         }
1158         /* Skip the check for QP1 to support CM size of 128 */
1159         if ((attrs->qp_type != IB_QPT_GSI) &&
1160             (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1161                 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1162                        __func__, dev->id, attrs->cap.max_send_wr);
1163                 pr_err("%s(%d) supported send_wr=0x%x\n",
1164                        __func__, dev->id, dev->attr.max_wqe);
1165                 return -EINVAL;
1166         }
1167         if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1168                 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1169                        __func__, dev->id, attrs->cap.max_recv_wr);
1170                 pr_err("%s(%d) supported recv_wr=0x%x\n",
1171                        __func__, dev->id, dev->attr.max_rqe);
1172                 return -EINVAL;
1173         }
1174         if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1175                 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1176                        __func__, dev->id, attrs->cap.max_inline_data);
1177                 pr_err("%s(%d) supported inline data size=0x%x\n",
1178                        __func__, dev->id, dev->attr.max_inline_data);
1179                 return -EINVAL;
1180         }
1181         if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1182                 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1183                        __func__, dev->id, attrs->cap.max_send_sge);
1184                 pr_err("%s(%d) supported send_sge=0x%x\n",
1185                        __func__, dev->id, dev->attr.max_send_sge);
1186                 return -EINVAL;
1187         }
1188         if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1189                 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1190                        __func__, dev->id, attrs->cap.max_recv_sge);
1191                 pr_err("%s(%d) supported recv_sge=0x%x\n",
1192                        __func__, dev->id, dev->attr.max_recv_sge);
1193                 return -EINVAL;
1194         }
1195         /* unprivileged user space cannot create special QP */
1196         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1197                 pr_err
1198                     ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1199                      __func__, dev->id, attrs->qp_type);
1200                 return -EINVAL;
1201         }
1202         /* allow creating only one GSI type of QP */
1203         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1204                 pr_err("%s(%d) GSI special QPs already created.\n",
1205                        __func__, dev->id);
1206                 return -EINVAL;
1207         }
1208         /* verify consumer QPs are not trying to use GSI QP's CQ */
1209         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1210                 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1211                         (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1212                         pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1213                                 __func__, dev->id);
1214                         return -EINVAL;
1215                 }
1216         }
1217         return 0;
1218 }
1219
1220 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1221                                 struct ib_udata *udata, int dpp_offset,
1222                                 int dpp_credit_lmt, int srq)
1223 {
1224         int status = 0;
1225         u64 usr_db;
1226         struct ocrdma_create_qp_uresp uresp;
1227         struct ocrdma_pd *pd = qp->pd;
1228         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1229
1230         memset(&uresp, 0, sizeof(uresp));
1231         usr_db = dev->nic_info.unmapped_db +
1232                         (pd->id * dev->nic_info.db_page_size);
1233         uresp.qp_id = qp->id;
1234         uresp.sq_dbid = qp->sq.dbid;
1235         uresp.num_sq_pages = 1;
1236         uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1237         uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1238         uresp.num_wqe_allocated = qp->sq.max_cnt;
1239         if (!srq) {
1240                 uresp.rq_dbid = qp->rq.dbid;
1241                 uresp.num_rq_pages = 1;
1242                 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1243                 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1244                 uresp.num_rqe_allocated = qp->rq.max_cnt;
1245         }
1246         uresp.db_page_addr = usr_db;
1247         uresp.db_page_size = dev->nic_info.db_page_size;
1248         uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1249         uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1250         uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1251
1252         if (qp->dpp_enabled) {
1253                 uresp.dpp_credit = dpp_credit_lmt;
1254                 uresp.dpp_offset = dpp_offset;
1255         }
1256         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1257         if (status) {
1258                 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1259                 goto err;
1260         }
1261         status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1262                                  uresp.sq_page_size);
1263         if (status)
1264                 goto err;
1265
1266         if (!srq) {
1267                 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1268                                          uresp.rq_page_size);
1269                 if (status)
1270                         goto rq_map_err;
1271         }
1272         return status;
1273 rq_map_err:
1274         ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1275 err:
1276         return status;
1277 }
1278
1279 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1280                              struct ocrdma_pd *pd)
1281 {
1282         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1283                 qp->sq_db = dev->nic_info.db +
1284                         (pd->id * dev->nic_info.db_page_size) +
1285                         OCRDMA_DB_GEN2_SQ_OFFSET;
1286                 qp->rq_db = dev->nic_info.db +
1287                         (pd->id * dev->nic_info.db_page_size) +
1288                         OCRDMA_DB_GEN2_RQ_OFFSET;
1289         } else {
1290                 qp->sq_db = dev->nic_info.db +
1291                         (pd->id * dev->nic_info.db_page_size) +
1292                         OCRDMA_DB_SQ_OFFSET;
1293                 qp->rq_db = dev->nic_info.db +
1294                         (pd->id * dev->nic_info.db_page_size) +
1295                         OCRDMA_DB_RQ_OFFSET;
1296         }
1297 }
1298
1299 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1300 {
1301         qp->wqe_wr_id_tbl =
1302             kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1303                     GFP_KERNEL);
1304         if (qp->wqe_wr_id_tbl == NULL)
1305                 return -ENOMEM;
1306         qp->rqe_wr_id_tbl =
1307             kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1308         if (qp->rqe_wr_id_tbl == NULL)
1309                 return -ENOMEM;
1310
1311         return 0;
1312 }
1313
1314 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1315                                       struct ocrdma_pd *pd,
1316                                       struct ib_qp_init_attr *attrs)
1317 {
1318         qp->pd = pd;
1319         spin_lock_init(&qp->q_lock);
1320         INIT_LIST_HEAD(&qp->sq_entry);
1321         INIT_LIST_HEAD(&qp->rq_entry);
1322
1323         qp->qp_type = attrs->qp_type;
1324         qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1325         qp->max_inline_data = attrs->cap.max_inline_data;
1326         qp->sq.max_sges = attrs->cap.max_send_sge;
1327         qp->rq.max_sges = attrs->cap.max_recv_sge;
1328         qp->state = OCRDMA_QPS_RST;
1329         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1330 }
1331
1332 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1333                                    struct ib_qp_init_attr *attrs)
1334 {
1335         if (attrs->qp_type == IB_QPT_GSI) {
1336                 dev->gsi_qp_created = 1;
1337                 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1338                 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1339         }
1340 }
1341
1342 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1343                                struct ib_qp_init_attr *attrs,
1344                                struct ib_udata *udata)
1345 {
1346         int status;
1347         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1348         struct ocrdma_qp *qp;
1349         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1350         struct ocrdma_create_qp_ureq ureq;
1351         u16 dpp_credit_lmt, dpp_offset;
1352
1353         status = ocrdma_check_qp_params(ibpd, dev, attrs);
1354         if (status)
1355                 goto gen_err;
1356
1357         memset(&ureq, 0, sizeof(ureq));
1358         if (udata) {
1359                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1360                         return ERR_PTR(-EFAULT);
1361         }
1362         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1363         if (!qp) {
1364                 status = -ENOMEM;
1365                 goto gen_err;
1366         }
1367         ocrdma_set_qp_init_params(qp, pd, attrs);
1368         if (udata == NULL)
1369                 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1370                                         OCRDMA_QP_FAST_REG);
1371
1372         mutex_lock(&dev->dev_lock);
1373         status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1374                                         ureq.dpp_cq_id,
1375                                         &dpp_offset, &dpp_credit_lmt);
1376         if (status)
1377                 goto mbx_err;
1378
1379         /* user space QP's wr_id table are managed in library */
1380         if (udata == NULL) {
1381                 status = ocrdma_alloc_wr_id_tbl(qp);
1382                 if (status)
1383                         goto map_err;
1384         }
1385
1386         status = ocrdma_add_qpn_map(dev, qp);
1387         if (status)
1388                 goto map_err;
1389         ocrdma_set_qp_db(dev, qp, pd);
1390         if (udata) {
1391                 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1392                                               dpp_credit_lmt,
1393                                               (attrs->srq != NULL));
1394                 if (status)
1395                         goto cpy_err;
1396         }
1397         ocrdma_store_gsi_qp_cq(dev, attrs);
1398         qp->ibqp.qp_num = qp->id;
1399         mutex_unlock(&dev->dev_lock);
1400         return &qp->ibqp;
1401
1402 cpy_err:
1403         ocrdma_del_qpn_map(dev, qp);
1404 map_err:
1405         ocrdma_mbx_destroy_qp(dev, qp);
1406 mbx_err:
1407         mutex_unlock(&dev->dev_lock);
1408         kfree(qp->wqe_wr_id_tbl);
1409         kfree(qp->rqe_wr_id_tbl);
1410         kfree(qp);
1411         pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1412 gen_err:
1413         return ERR_PTR(status);
1414 }
1415
1416 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1417                       int attr_mask)
1418 {
1419         int status = 0;
1420         struct ocrdma_qp *qp;
1421         struct ocrdma_dev *dev;
1422         enum ib_qp_state old_qps;
1423
1424         qp = get_ocrdma_qp(ibqp);
1425         dev = get_ocrdma_dev(ibqp->device);
1426         if (attr_mask & IB_QP_STATE)
1427                 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1428         /* if new and previous states are same hw doesn't need to
1429          * know about it.
1430          */
1431         if (status < 0)
1432                 return status;
1433         status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1434
1435         return status;
1436 }
1437
1438 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1439                      int attr_mask, struct ib_udata *udata)
1440 {
1441         unsigned long flags;
1442         int status = -EINVAL;
1443         struct ocrdma_qp *qp;
1444         struct ocrdma_dev *dev;
1445         enum ib_qp_state old_qps, new_qps;
1446
1447         qp = get_ocrdma_qp(ibqp);
1448         dev = get_ocrdma_dev(ibqp->device);
1449
1450         /* syncronize with multiple context trying to change, retrive qps */
1451         mutex_lock(&dev->dev_lock);
1452         /* syncronize with wqe, rqe posting and cqe processing contexts */
1453         spin_lock_irqsave(&qp->q_lock, flags);
1454         old_qps = get_ibqp_state(qp->state);
1455         if (attr_mask & IB_QP_STATE)
1456                 new_qps = attr->qp_state;
1457         else
1458                 new_qps = old_qps;
1459         spin_unlock_irqrestore(&qp->q_lock, flags);
1460
1461         if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1462                                 IB_LINK_LAYER_ETHERNET)) {
1463                 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1464                        "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1465                        __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1466                        old_qps, new_qps);
1467                 goto param_err;
1468         }
1469
1470         status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1471         if (status > 0)
1472                 status = 0;
1473 param_err:
1474         mutex_unlock(&dev->dev_lock);
1475         return status;
1476 }
1477
1478 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1479 {
1480         switch (mtu) {
1481         case 256:
1482                 return IB_MTU_256;
1483         case 512:
1484                 return IB_MTU_512;
1485         case 1024:
1486                 return IB_MTU_1024;
1487         case 2048:
1488                 return IB_MTU_2048;
1489         case 4096:
1490                 return IB_MTU_4096;
1491         default:
1492                 return IB_MTU_1024;
1493         }
1494 }
1495
1496 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1497 {
1498         int ib_qp_acc_flags = 0;
1499
1500         if (qp_cap_flags & OCRDMA_QP_INB_WR)
1501                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1502         if (qp_cap_flags & OCRDMA_QP_INB_RD)
1503                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1504         return ib_qp_acc_flags;
1505 }
1506
1507 int ocrdma_query_qp(struct ib_qp *ibqp,
1508                     struct ib_qp_attr *qp_attr,
1509                     int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1510 {
1511         int status;
1512         u32 qp_state;
1513         struct ocrdma_qp_params params;
1514         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1515         struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1516
1517         memset(&params, 0, sizeof(params));
1518         mutex_lock(&dev->dev_lock);
1519         status = ocrdma_mbx_query_qp(dev, qp, &params);
1520         mutex_unlock(&dev->dev_lock);
1521         if (status)
1522                 goto mbx_err;
1523         if (qp->qp_type == IB_QPT_UD)
1524                 qp_attr->qkey = params.qkey;
1525         qp_attr->path_mtu =
1526                 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1527                                 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1528                                 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1529         qp_attr->path_mig_state = IB_MIG_MIGRATED;
1530         qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1531         qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1532         qp_attr->dest_qp_num =
1533             params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1534
1535         qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1536         qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1537         qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1538         qp_attr->cap.max_send_sge = qp->sq.max_sges;
1539         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1540         qp_attr->cap.max_inline_data = qp->max_inline_data;
1541         qp_init_attr->cap = qp_attr->cap;
1542         memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1543                sizeof(params.dgid));
1544         qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1545             OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1546         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1547         qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1548                                           OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1549                                                 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1550         qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1551                                               OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1552                                                 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1553
1554         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1555         qp_attr->ah_attr.port_num = 1;
1556         qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1557                                OCRDMA_QP_PARAMS_SL_MASK) >>
1558                                 OCRDMA_QP_PARAMS_SL_SHIFT;
1559         qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1560                             OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1561                                 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1562         qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1563                               OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1564                                 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1565         qp_attr->retry_cnt =
1566             (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1567                 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1568         qp_attr->min_rnr_timer = 0;
1569         qp_attr->pkey_index = 0;
1570         qp_attr->port_num = 1;
1571         qp_attr->ah_attr.src_path_bits = 0;
1572         qp_attr->ah_attr.static_rate = 0;
1573         qp_attr->alt_pkey_index = 0;
1574         qp_attr->alt_port_num = 0;
1575         qp_attr->alt_timeout = 0;
1576         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1577         qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1578                     OCRDMA_QP_PARAMS_STATE_SHIFT;
1579         qp_attr->qp_state = get_ibqp_state(qp_state);
1580         qp_attr->cur_qp_state = qp_attr->qp_state;
1581         qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1582         qp_attr->max_dest_rd_atomic =
1583             params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1584         qp_attr->max_rd_atomic =
1585             params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1586         qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1587                                 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1588         /* Sync driver QP state with FW */
1589         ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1590 mbx_err:
1591         return status;
1592 }
1593
1594 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1595 {
1596         unsigned int i = idx / 32;
1597         u32 mask = (1U << (idx % 32));
1598
1599         srq->idx_bit_fields[i] ^= mask;
1600 }
1601
1602 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1603 {
1604         return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1605 }
1606
1607 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1608 {
1609         return (qp->sq.tail == qp->sq.head);
1610 }
1611
1612 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1613 {
1614         return (qp->rq.tail == qp->rq.head);
1615 }
1616
1617 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1618 {
1619         return q->va + (q->head * q->entry_size);
1620 }
1621
1622 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1623                                       u32 idx)
1624 {
1625         return q->va + (idx * q->entry_size);
1626 }
1627
1628 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1629 {
1630         q->head = (q->head + 1) & q->max_wqe_idx;
1631 }
1632
1633 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1634 {
1635         q->tail = (q->tail + 1) & q->max_wqe_idx;
1636 }
1637
1638 /* discard the cqe for a given QP */
1639 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1640 {
1641         unsigned long cq_flags;
1642         unsigned long flags;
1643         int discard_cnt = 0;
1644         u32 cur_getp, stop_getp;
1645         struct ocrdma_cqe *cqe;
1646         u32 qpn = 0, wqe_idx = 0;
1647
1648         spin_lock_irqsave(&cq->cq_lock, cq_flags);
1649
1650         /* traverse through the CQEs in the hw CQ,
1651          * find the matching CQE for a given qp,
1652          * mark the matching one discarded by clearing qpn.
1653          * ring the doorbell in the poll_cq() as
1654          * we don't complete out of order cqe.
1655          */
1656
1657         cur_getp = cq->getp;
1658         /* find upto when do we reap the cq. */
1659         stop_getp = cur_getp;
1660         do {
1661                 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1662                         break;
1663
1664                 cqe = cq->va + cur_getp;
1665                 /* if (a) done reaping whole hw cq, or
1666                  *    (b) qp_xq becomes empty.
1667                  * then exit
1668                  */
1669                 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1670                 /* if previously discarded cqe found, skip that too. */
1671                 /* check for matching qp */
1672                 if (qpn == 0 || qpn != qp->id)
1673                         goto skip_cqe;
1674
1675                 if (is_cqe_for_sq(cqe)) {
1676                         ocrdma_hwq_inc_tail(&qp->sq);
1677                 } else {
1678                         if (qp->srq) {
1679                                 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1680                                         OCRDMA_CQE_BUFTAG_SHIFT) &
1681                                         qp->srq->rq.max_wqe_idx;
1682                                 if (wqe_idx < 1)
1683                                         BUG();
1684                                 spin_lock_irqsave(&qp->srq->q_lock, flags);
1685                                 ocrdma_hwq_inc_tail(&qp->srq->rq);
1686                                 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1687                                 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1688
1689                         } else {
1690                                 ocrdma_hwq_inc_tail(&qp->rq);
1691                         }
1692                 }
1693                 /* mark cqe discarded so that it is not picked up later
1694                  * in the poll_cq().
1695                  */
1696                 discard_cnt += 1;
1697                 cqe->cmn.qpn = 0;
1698 skip_cqe:
1699                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1700         } while (cur_getp != stop_getp);
1701         spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1702 }
1703
1704 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1705 {
1706         int found = false;
1707         unsigned long flags;
1708         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1709         /* sync with any active CQ poll */
1710
1711         spin_lock_irqsave(&dev->flush_q_lock, flags);
1712         found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1713         if (found)
1714                 list_del(&qp->sq_entry);
1715         if (!qp->srq) {
1716                 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1717                 if (found)
1718                         list_del(&qp->rq_entry);
1719         }
1720         spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1721 }
1722
1723 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1724 {
1725         struct ocrdma_pd *pd;
1726         struct ocrdma_qp *qp;
1727         struct ocrdma_dev *dev;
1728         struct ib_qp_attr attrs;
1729         int attr_mask = IB_QP_STATE;
1730         unsigned long flags;
1731
1732         qp = get_ocrdma_qp(ibqp);
1733         dev = get_ocrdma_dev(ibqp->device);
1734
1735         attrs.qp_state = IB_QPS_ERR;
1736         pd = qp->pd;
1737
1738         /* change the QP state to ERROR */
1739         _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1740
1741         /* ensure that CQEs for newly created QP (whose id may be same with
1742          * one which just getting destroyed are same), dont get
1743          * discarded until the old CQEs are discarded.
1744          */
1745         mutex_lock(&dev->dev_lock);
1746         (void) ocrdma_mbx_destroy_qp(dev, qp);
1747
1748         /*
1749          * acquire CQ lock while destroy is in progress, in order to
1750          * protect against proessing in-flight CQEs for this QP.
1751          */
1752         spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1753         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1754                 spin_lock(&qp->rq_cq->cq_lock);
1755
1756         ocrdma_del_qpn_map(dev, qp);
1757
1758         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1759                 spin_unlock(&qp->rq_cq->cq_lock);
1760         spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1761
1762         if (!pd->uctx) {
1763                 ocrdma_discard_cqes(qp, qp->sq_cq);
1764                 ocrdma_discard_cqes(qp, qp->rq_cq);
1765         }
1766         mutex_unlock(&dev->dev_lock);
1767
1768         if (pd->uctx) {
1769                 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1770                                 PAGE_ALIGN(qp->sq.len));
1771                 if (!qp->srq)
1772                         ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1773                                         PAGE_ALIGN(qp->rq.len));
1774         }
1775
1776         ocrdma_del_flush_qp(qp);
1777
1778         kfree(qp->wqe_wr_id_tbl);
1779         kfree(qp->rqe_wr_id_tbl);
1780         kfree(qp);
1781         return 0;
1782 }
1783
1784 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1785                                 struct ib_udata *udata)
1786 {
1787         int status;
1788         struct ocrdma_create_srq_uresp uresp;
1789
1790         memset(&uresp, 0, sizeof(uresp));
1791         uresp.rq_dbid = srq->rq.dbid;
1792         uresp.num_rq_pages = 1;
1793         uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1794         uresp.rq_page_size = srq->rq.len;
1795         uresp.db_page_addr = dev->nic_info.unmapped_db +
1796             (srq->pd->id * dev->nic_info.db_page_size);
1797         uresp.db_page_size = dev->nic_info.db_page_size;
1798         uresp.num_rqe_allocated = srq->rq.max_cnt;
1799         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1800                 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1801                 uresp.db_shift = 24;
1802         } else {
1803                 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1804                 uresp.db_shift = 16;
1805         }
1806
1807         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1808         if (status)
1809                 return status;
1810         status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1811                                  uresp.rq_page_size);
1812         if (status)
1813                 return status;
1814         return status;
1815 }
1816
1817 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1818                                  struct ib_srq_init_attr *init_attr,
1819                                  struct ib_udata *udata)
1820 {
1821         int status = -ENOMEM;
1822         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1823         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1824         struct ocrdma_srq *srq;
1825
1826         if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1827                 return ERR_PTR(-EINVAL);
1828         if (init_attr->attr.max_wr > dev->attr.max_rqe)
1829                 return ERR_PTR(-EINVAL);
1830
1831         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1832         if (!srq)
1833                 return ERR_PTR(status);
1834
1835         spin_lock_init(&srq->q_lock);
1836         srq->pd = pd;
1837         srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1838         status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1839         if (status)
1840                 goto err;
1841
1842         if (udata == NULL) {
1843                 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1844                             GFP_KERNEL);
1845                 if (srq->rqe_wr_id_tbl == NULL)
1846                         goto arm_err;
1847
1848                 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1849                     (srq->rq.max_cnt % 32 ? 1 : 0);
1850                 srq->idx_bit_fields =
1851                     kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1852                 if (srq->idx_bit_fields == NULL)
1853                         goto arm_err;
1854                 memset(srq->idx_bit_fields, 0xff,
1855                        srq->bit_fields_len * sizeof(u32));
1856         }
1857
1858         if (init_attr->attr.srq_limit) {
1859                 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1860                 if (status)
1861                         goto arm_err;
1862         }
1863
1864         if (udata) {
1865                 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1866                 if (status)
1867                         goto arm_err;
1868         }
1869
1870         return &srq->ibsrq;
1871
1872 arm_err:
1873         ocrdma_mbx_destroy_srq(dev, srq);
1874 err:
1875         kfree(srq->rqe_wr_id_tbl);
1876         kfree(srq->idx_bit_fields);
1877         kfree(srq);
1878         return ERR_PTR(status);
1879 }
1880
1881 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1882                       struct ib_srq_attr *srq_attr,
1883                       enum ib_srq_attr_mask srq_attr_mask,
1884                       struct ib_udata *udata)
1885 {
1886         int status = 0;
1887         struct ocrdma_srq *srq;
1888
1889         srq = get_ocrdma_srq(ibsrq);
1890         if (srq_attr_mask & IB_SRQ_MAX_WR)
1891                 status = -EINVAL;
1892         else
1893                 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1894         return status;
1895 }
1896
1897 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1898 {
1899         int status;
1900         struct ocrdma_srq *srq;
1901
1902         srq = get_ocrdma_srq(ibsrq);
1903         status = ocrdma_mbx_query_srq(srq, srq_attr);
1904         return status;
1905 }
1906
1907 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1908 {
1909         int status;
1910         struct ocrdma_srq *srq;
1911         struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1912
1913         srq = get_ocrdma_srq(ibsrq);
1914
1915         status = ocrdma_mbx_destroy_srq(dev, srq);
1916
1917         if (srq->pd->uctx)
1918                 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1919                                 PAGE_ALIGN(srq->rq.len));
1920
1921         kfree(srq->idx_bit_fields);
1922         kfree(srq->rqe_wr_id_tbl);
1923         kfree(srq);
1924         return status;
1925 }
1926
1927 /* unprivileged verbs and their support functions. */
1928 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1929                                 struct ocrdma_hdr_wqe *hdr,
1930                                 struct ib_send_wr *wr)
1931 {
1932         struct ocrdma_ewqe_ud_hdr *ud_hdr =
1933                 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1934         struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1935
1936         ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1937         if (qp->qp_type == IB_QPT_GSI)
1938                 ud_hdr->qkey = qp->qkey;
1939         else
1940                 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1941         ud_hdr->rsvd_ahid = ah->id;
1942         if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1943                 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1944 }
1945
1946 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1947                               struct ocrdma_sge *sge, int num_sge,
1948                               struct ib_sge *sg_list)
1949 {
1950         int i;
1951
1952         for (i = 0; i < num_sge; i++) {
1953                 sge[i].lrkey = sg_list[i].lkey;
1954                 sge[i].addr_lo = sg_list[i].addr;
1955                 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1956                 sge[i].len = sg_list[i].length;
1957                 hdr->total_len += sg_list[i].length;
1958         }
1959         if (num_sge == 0)
1960                 memset(sge, 0, sizeof(*sge));
1961 }
1962
1963 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1964 {
1965         uint32_t total_len = 0, i;
1966
1967         for (i = 0; i < num_sge; i++)
1968                 total_len += sg_list[i].length;
1969         return total_len;
1970 }
1971
1972
1973 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1974                                     struct ocrdma_hdr_wqe *hdr,
1975                                     struct ocrdma_sge *sge,
1976                                     struct ib_send_wr *wr, u32 wqe_size)
1977 {
1978         int i;
1979         char *dpp_addr;
1980
1981         if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1982                 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1983                 if (unlikely(hdr->total_len > qp->max_inline_data)) {
1984                         pr_err("%s() supported_len=0x%x,\n"
1985                                " unsupported len req=0x%x\n", __func__,
1986                                 qp->max_inline_data, hdr->total_len);
1987                         return -EINVAL;
1988                 }
1989                 dpp_addr = (char *)sge;
1990                 for (i = 0; i < wr->num_sge; i++) {
1991                         memcpy(dpp_addr,
1992                                (void *)(unsigned long)wr->sg_list[i].addr,
1993                                wr->sg_list[i].length);
1994                         dpp_addr += wr->sg_list[i].length;
1995                 }
1996
1997                 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1998                 if (0 == hdr->total_len)
1999                         wqe_size += sizeof(struct ocrdma_sge);
2000                 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
2001         } else {
2002                 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2003                 if (wr->num_sge)
2004                         wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2005                 else
2006                         wqe_size += sizeof(struct ocrdma_sge);
2007                 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2008         }
2009         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2010         return 0;
2011 }
2012
2013 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2014                              struct ib_send_wr *wr)
2015 {
2016         int status;
2017         struct ocrdma_sge *sge;
2018         u32 wqe_size = sizeof(*hdr);
2019
2020         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2021                 ocrdma_build_ud_hdr(qp, hdr, wr);
2022                 sge = (struct ocrdma_sge *)(hdr + 2);
2023                 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2024         } else {
2025                 sge = (struct ocrdma_sge *)(hdr + 1);
2026         }
2027
2028         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2029         return status;
2030 }
2031
2032 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2033                               struct ib_send_wr *wr)
2034 {
2035         int status;
2036         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2037         struct ocrdma_sge *sge = ext_rw + 1;
2038         u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2039
2040         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2041         if (status)
2042                 return status;
2043         ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2044         ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2045         ext_rw->lrkey = wr->wr.rdma.rkey;
2046         ext_rw->len = hdr->total_len;
2047         return 0;
2048 }
2049
2050 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2051                               struct ib_send_wr *wr)
2052 {
2053         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2054         struct ocrdma_sge *sge = ext_rw + 1;
2055         u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2056             sizeof(struct ocrdma_hdr_wqe);
2057
2058         ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2059         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2060         hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2061         hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2062
2063         ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2064         ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2065         ext_rw->lrkey = wr->wr.rdma.rkey;
2066         ext_rw->len = hdr->total_len;
2067 }
2068
2069 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2070                             struct ocrdma_hw_mr *hwmr)
2071 {
2072         int i;
2073         u64 buf_addr = 0;
2074         int num_pbes;
2075         struct ocrdma_pbe *pbe;
2076
2077         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2078         num_pbes = 0;
2079
2080         /* go through the OS phy regions & fill hw pbe entries into pbls. */
2081         for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
2082                 /* number of pbes can be more for one OS buf, when
2083                  * buffers are of different sizes.
2084                  * split the ib_buf to one or more pbes.
2085                  */
2086                 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
2087                 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2088                 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2089                 num_pbes += 1;
2090                 pbe++;
2091
2092                 /* if the pbl is full storing the pbes,
2093                  * move to next pbl.
2094                 */
2095                 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2096                         pbl_tbl++;
2097                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2098                 }
2099         }
2100         return;
2101 }
2102
2103 static int get_encoded_page_size(int pg_sz)
2104 {
2105         /* Max size is 256M 4096 << 16 */
2106         int i = 0;
2107         for (; i < 17; i++)
2108                 if (pg_sz == (4096 << i))
2109                         break;
2110         return i;
2111 }
2112
2113
2114 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2115                            struct ib_send_wr *wr)
2116 {
2117         u64 fbo;
2118         struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2119         struct ocrdma_mr *mr;
2120         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2121         u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2122
2123         wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2124
2125         if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2126                 return -EINVAL;
2127
2128         hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2129         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2130
2131         if (wr->wr.fast_reg.page_list_len == 0)
2132                 BUG();
2133         if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2134                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2135         if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2136                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2137         if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2138                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2139         hdr->lkey = wr->wr.fast_reg.rkey;
2140         hdr->total_len = wr->wr.fast_reg.length;
2141
2142         fbo = wr->wr.fast_reg.iova_start -
2143             (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2144
2145         fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2146         fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2147         fast_reg->fbo_hi = upper_32_bits(fbo);
2148         fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2149         fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2150         fast_reg->size_sge =
2151                 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2152         mr = (struct ocrdma_mr *) (unsigned long)
2153                 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2154         build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2155         return 0;
2156 }
2157
2158 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2159 {
2160         u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2161
2162         iowrite32(val, qp->sq_db);
2163 }
2164
2165 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2166                      struct ib_send_wr **bad_wr)
2167 {
2168         int status = 0;
2169         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2170         struct ocrdma_hdr_wqe *hdr;
2171         unsigned long flags;
2172
2173         spin_lock_irqsave(&qp->q_lock, flags);
2174         if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2175                 spin_unlock_irqrestore(&qp->q_lock, flags);
2176                 *bad_wr = wr;
2177                 return -EINVAL;
2178         }
2179
2180         while (wr) {
2181                 if (qp->qp_type == IB_QPT_UD &&
2182                     (wr->opcode != IB_WR_SEND &&
2183                      wr->opcode != IB_WR_SEND_WITH_IMM)) {
2184                         *bad_wr = wr;
2185                         status = -EINVAL;
2186                         break;
2187                 }
2188                 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2189                     wr->num_sge > qp->sq.max_sges) {
2190                         *bad_wr = wr;
2191                         status = -ENOMEM;
2192                         break;
2193                 }
2194                 hdr = ocrdma_hwq_head(&qp->sq);
2195                 hdr->cw = 0;
2196                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2197                         hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2198                 if (wr->send_flags & IB_SEND_FENCE)
2199                         hdr->cw |=
2200                             (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2201                 if (wr->send_flags & IB_SEND_SOLICITED)
2202                         hdr->cw |=
2203                             (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2204                 hdr->total_len = 0;
2205                 switch (wr->opcode) {
2206                 case IB_WR_SEND_WITH_IMM:
2207                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2208                         hdr->immdt = ntohl(wr->ex.imm_data);
2209                 case IB_WR_SEND:
2210                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2211                         ocrdma_build_send(qp, hdr, wr);
2212                         break;
2213                 case IB_WR_SEND_WITH_INV:
2214                         hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2215                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2216                         hdr->lkey = wr->ex.invalidate_rkey;
2217                         status = ocrdma_build_send(qp, hdr, wr);
2218                         break;
2219                 case IB_WR_RDMA_WRITE_WITH_IMM:
2220                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2221                         hdr->immdt = ntohl(wr->ex.imm_data);
2222                 case IB_WR_RDMA_WRITE:
2223                         hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2224                         status = ocrdma_build_write(qp, hdr, wr);
2225                         break;
2226                 case IB_WR_RDMA_READ:
2227                         ocrdma_build_read(qp, hdr, wr);
2228                         break;
2229                 case IB_WR_LOCAL_INV:
2230                         hdr->cw |=
2231                             (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2232                         hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2233                                         sizeof(struct ocrdma_sge)) /
2234                                 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2235                         hdr->lkey = wr->ex.invalidate_rkey;
2236                         break;
2237                 case IB_WR_FAST_REG_MR:
2238                         status = ocrdma_build_fr(qp, hdr, wr);
2239                         break;
2240                 default:
2241                         status = -EINVAL;
2242                         break;
2243                 }
2244                 if (status) {
2245                         *bad_wr = wr;
2246                         break;
2247                 }
2248                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2249                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2250                 else
2251                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2252                 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2253                 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2254                                    OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2255                 /* make sure wqe is written before adapter can access it */
2256                 wmb();
2257                 /* inform hw to start processing it */
2258                 ocrdma_ring_sq_db(qp);
2259
2260                 /* update pointer, counter for next wr */
2261                 ocrdma_hwq_inc_head(&qp->sq);
2262                 wr = wr->next;
2263         }
2264         spin_unlock_irqrestore(&qp->q_lock, flags);
2265         return status;
2266 }
2267
2268 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2269 {
2270         u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2271
2272         iowrite32(val, qp->rq_db);
2273 }
2274
2275 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2276                              u16 tag)
2277 {
2278         u32 wqe_size = 0;
2279         struct ocrdma_sge *sge;
2280         if (wr->num_sge)
2281                 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2282         else
2283                 wqe_size = sizeof(*sge) + sizeof(*rqe);
2284
2285         rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2286                                 OCRDMA_WQE_SIZE_SHIFT);
2287         rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2288         rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2289         rqe->total_len = 0;
2290         rqe->rsvd_tag = tag;
2291         sge = (struct ocrdma_sge *)(rqe + 1);
2292         ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2293         ocrdma_cpu_to_le32(rqe, wqe_size);
2294 }
2295
2296 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2297                      struct ib_recv_wr **bad_wr)
2298 {
2299         int status = 0;
2300         unsigned long flags;
2301         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2302         struct ocrdma_hdr_wqe *rqe;
2303
2304         spin_lock_irqsave(&qp->q_lock, flags);
2305         if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2306                 spin_unlock_irqrestore(&qp->q_lock, flags);
2307                 *bad_wr = wr;
2308                 return -EINVAL;
2309         }
2310         while (wr) {
2311                 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2312                     wr->num_sge > qp->rq.max_sges) {
2313                         *bad_wr = wr;
2314                         status = -ENOMEM;
2315                         break;
2316                 }
2317                 rqe = ocrdma_hwq_head(&qp->rq);
2318                 ocrdma_build_rqe(rqe, wr, 0);
2319
2320                 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2321                 /* make sure rqe is written before adapter can access it */
2322                 wmb();
2323
2324                 /* inform hw to start processing it */
2325                 ocrdma_ring_rq_db(qp);
2326
2327                 /* update pointer, counter for next wr */
2328                 ocrdma_hwq_inc_head(&qp->rq);
2329                 wr = wr->next;
2330         }
2331         spin_unlock_irqrestore(&qp->q_lock, flags);
2332         return status;
2333 }
2334
2335 /* cqe for srq's rqe can potentially arrive out of order.
2336  * index gives the entry in the shadow table where to store
2337  * the wr_id. tag/index is returned in cqe to reference back
2338  * for a given rqe.
2339  */
2340 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2341 {
2342         int row = 0;
2343         int indx = 0;
2344
2345         for (row = 0; row < srq->bit_fields_len; row++) {
2346                 if (srq->idx_bit_fields[row]) {
2347                         indx = ffs(srq->idx_bit_fields[row]);
2348                         indx = (row * 32) + (indx - 1);
2349                         if (indx >= srq->rq.max_cnt)
2350                                 BUG();
2351                         ocrdma_srq_toggle_bit(srq, indx);
2352                         break;
2353                 }
2354         }
2355
2356         if (row == srq->bit_fields_len)
2357                 BUG();
2358         return indx + 1; /* Use from index 1 */
2359 }
2360
2361 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2362 {
2363         u32 val = srq->rq.dbid | (1 << 16);
2364
2365         iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2366 }
2367
2368 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2369                          struct ib_recv_wr **bad_wr)
2370 {
2371         int status = 0;
2372         unsigned long flags;
2373         struct ocrdma_srq *srq;
2374         struct ocrdma_hdr_wqe *rqe;
2375         u16 tag;
2376
2377         srq = get_ocrdma_srq(ibsrq);
2378
2379         spin_lock_irqsave(&srq->q_lock, flags);
2380         while (wr) {
2381                 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2382                     wr->num_sge > srq->rq.max_sges) {
2383                         status = -ENOMEM;
2384                         *bad_wr = wr;
2385                         break;
2386                 }
2387                 tag = ocrdma_srq_get_idx(srq);
2388                 rqe = ocrdma_hwq_head(&srq->rq);
2389                 ocrdma_build_rqe(rqe, wr, tag);
2390
2391                 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2392                 /* make sure rqe is written before adapter can perform DMA */
2393                 wmb();
2394                 /* inform hw to start processing it */
2395                 ocrdma_ring_srq_db(srq);
2396                 /* update pointer, counter for next wr */
2397                 ocrdma_hwq_inc_head(&srq->rq);
2398                 wr = wr->next;
2399         }
2400         spin_unlock_irqrestore(&srq->q_lock, flags);
2401         return status;
2402 }
2403
2404 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2405 {
2406         enum ib_wc_status ibwc_status;
2407
2408         switch (status) {
2409         case OCRDMA_CQE_GENERAL_ERR:
2410                 ibwc_status = IB_WC_GENERAL_ERR;
2411                 break;
2412         case OCRDMA_CQE_LOC_LEN_ERR:
2413                 ibwc_status = IB_WC_LOC_LEN_ERR;
2414                 break;
2415         case OCRDMA_CQE_LOC_QP_OP_ERR:
2416                 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2417                 break;
2418         case OCRDMA_CQE_LOC_EEC_OP_ERR:
2419                 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2420                 break;
2421         case OCRDMA_CQE_LOC_PROT_ERR:
2422                 ibwc_status = IB_WC_LOC_PROT_ERR;
2423                 break;
2424         case OCRDMA_CQE_WR_FLUSH_ERR:
2425                 ibwc_status = IB_WC_WR_FLUSH_ERR;
2426                 break;
2427         case OCRDMA_CQE_MW_BIND_ERR:
2428                 ibwc_status = IB_WC_MW_BIND_ERR;
2429                 break;
2430         case OCRDMA_CQE_BAD_RESP_ERR:
2431                 ibwc_status = IB_WC_BAD_RESP_ERR;
2432                 break;
2433         case OCRDMA_CQE_LOC_ACCESS_ERR:
2434                 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2435                 break;
2436         case OCRDMA_CQE_REM_INV_REQ_ERR:
2437                 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2438                 break;
2439         case OCRDMA_CQE_REM_ACCESS_ERR:
2440                 ibwc_status = IB_WC_REM_ACCESS_ERR;
2441                 break;
2442         case OCRDMA_CQE_REM_OP_ERR:
2443                 ibwc_status = IB_WC_REM_OP_ERR;
2444                 break;
2445         case OCRDMA_CQE_RETRY_EXC_ERR:
2446                 ibwc_status = IB_WC_RETRY_EXC_ERR;
2447                 break;
2448         case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2449                 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2450                 break;
2451         case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2452                 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2453                 break;
2454         case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2455                 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2456                 break;
2457         case OCRDMA_CQE_REM_ABORT_ERR:
2458                 ibwc_status = IB_WC_REM_ABORT_ERR;
2459                 break;
2460         case OCRDMA_CQE_INV_EECN_ERR:
2461                 ibwc_status = IB_WC_INV_EECN_ERR;
2462                 break;
2463         case OCRDMA_CQE_INV_EEC_STATE_ERR:
2464                 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2465                 break;
2466         case OCRDMA_CQE_FATAL_ERR:
2467                 ibwc_status = IB_WC_FATAL_ERR;
2468                 break;
2469         case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2470                 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2471                 break;
2472         default:
2473                 ibwc_status = IB_WC_GENERAL_ERR;
2474                 break;
2475         }
2476         return ibwc_status;
2477 }
2478
2479 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2480                       u32 wqe_idx)
2481 {
2482         struct ocrdma_hdr_wqe *hdr;
2483         struct ocrdma_sge *rw;
2484         int opcode;
2485
2486         hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2487
2488         ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2489         /* Undo the hdr->cw swap */
2490         opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2491         switch (opcode) {
2492         case OCRDMA_WRITE:
2493                 ibwc->opcode = IB_WC_RDMA_WRITE;
2494                 break;
2495         case OCRDMA_READ:
2496                 rw = (struct ocrdma_sge *)(hdr + 1);
2497                 ibwc->opcode = IB_WC_RDMA_READ;
2498                 ibwc->byte_len = rw->len;
2499                 break;
2500         case OCRDMA_SEND:
2501                 ibwc->opcode = IB_WC_SEND;
2502                 break;
2503         case OCRDMA_FR_MR:
2504                 ibwc->opcode = IB_WC_FAST_REG_MR;
2505                 break;
2506         case OCRDMA_LKEY_INV:
2507                 ibwc->opcode = IB_WC_LOCAL_INV;
2508                 break;
2509         default:
2510                 ibwc->status = IB_WC_GENERAL_ERR;
2511                 pr_err("%s() invalid opcode received = 0x%x\n",
2512                        __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2513                 break;
2514         }
2515 }
2516
2517 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2518                                                 struct ocrdma_cqe *cqe)
2519 {
2520         if (is_cqe_for_sq(cqe)) {
2521                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2522                                 cqe->flags_status_srcqpn) &
2523                                         ~OCRDMA_CQE_STATUS_MASK);
2524                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2525                                 cqe->flags_status_srcqpn) |
2526                                 (OCRDMA_CQE_WR_FLUSH_ERR <<
2527                                         OCRDMA_CQE_STATUS_SHIFT));
2528         } else {
2529                 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2530                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2531                                         cqe->flags_status_srcqpn) &
2532                                                 ~OCRDMA_CQE_UD_STATUS_MASK);
2533                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2534                                         cqe->flags_status_srcqpn) |
2535                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2536                                                 OCRDMA_CQE_UD_STATUS_SHIFT));
2537                 } else {
2538                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2539                                         cqe->flags_status_srcqpn) &
2540                                                 ~OCRDMA_CQE_STATUS_MASK);
2541                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2542                                         cqe->flags_status_srcqpn) |
2543                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2544                                                 OCRDMA_CQE_STATUS_SHIFT));
2545                 }
2546         }
2547 }
2548
2549 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2550                                   struct ocrdma_qp *qp, int status)
2551 {
2552         bool expand = false;
2553
2554         ibwc->byte_len = 0;
2555         ibwc->qp = &qp->ibqp;
2556         ibwc->status = ocrdma_to_ibwc_err(status);
2557
2558         ocrdma_flush_qp(qp);
2559         ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2560
2561         /* if wqe/rqe pending for which cqe needs to be returned,
2562          * trigger inflating it.
2563          */
2564         if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2565                 expand = true;
2566                 ocrdma_set_cqe_status_flushed(qp, cqe);
2567         }
2568         return expand;
2569 }
2570
2571 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2572                                   struct ocrdma_qp *qp, int status)
2573 {
2574         ibwc->opcode = IB_WC_RECV;
2575         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2576         ocrdma_hwq_inc_tail(&qp->rq);
2577
2578         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2579 }
2580
2581 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2582                                   struct ocrdma_qp *qp, int status)
2583 {
2584         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2585         ocrdma_hwq_inc_tail(&qp->sq);
2586
2587         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2588 }
2589
2590
2591 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2592                                  struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2593                                  bool *polled, bool *stop)
2594 {
2595         bool expand;
2596         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2597         int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2598                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2599         if (status < OCRDMA_MAX_CQE_ERR)
2600                 atomic_inc(&dev->cqe_err_stats[status]);
2601
2602         /* when hw sq is empty, but rq is not empty, so we continue
2603          * to keep the cqe in order to get the cq event again.
2604          */
2605         if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2606                 /* when cq for rq and sq is same, it is safe to return
2607                  * flush cqe for RQEs.
2608                  */
2609                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2610                         *polled = true;
2611                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2612                         expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2613                 } else {
2614                         /* stop processing further cqe as this cqe is used for
2615                          * triggering cq event on buddy cq of RQ.
2616                          * When QP is destroyed, this cqe will be removed
2617                          * from the cq's hardware q.
2618                          */
2619                         *polled = false;
2620                         *stop = true;
2621                         expand = false;
2622                 }
2623         } else if (is_hw_sq_empty(qp)) {
2624                 /* Do nothing */
2625                 expand = false;
2626                 *polled = false;
2627                 *stop = false;
2628         } else {
2629                 *polled = true;
2630                 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2631         }
2632         return expand;
2633 }
2634
2635 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2636                                      struct ocrdma_cqe *cqe,
2637                                      struct ib_wc *ibwc, bool *polled)
2638 {
2639         bool expand = false;
2640         int tail = qp->sq.tail;
2641         u32 wqe_idx;
2642
2643         if (!qp->wqe_wr_id_tbl[tail].signaled) {
2644                 *polled = false;    /* WC cannot be consumed yet */
2645         } else {
2646                 ibwc->status = IB_WC_SUCCESS;
2647                 ibwc->wc_flags = 0;
2648                 ibwc->qp = &qp->ibqp;
2649                 ocrdma_update_wc(qp, ibwc, tail);
2650                 *polled = true;
2651         }
2652         wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2653                         OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2654         if (tail != wqe_idx)
2655                 expand = true; /* Coalesced CQE can't be consumed yet */
2656
2657         ocrdma_hwq_inc_tail(&qp->sq);
2658         return expand;
2659 }
2660
2661 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2662                              struct ib_wc *ibwc, bool *polled, bool *stop)
2663 {
2664         int status;
2665         bool expand;
2666
2667         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2668                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2669
2670         if (status == OCRDMA_CQE_SUCCESS)
2671                 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2672         else
2673                 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2674         return expand;
2675 }
2676
2677 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2678 {
2679         int status;
2680
2681         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2682                 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2683         ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2684                                                 OCRDMA_CQE_SRCQP_MASK;
2685         ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2686                                                 OCRDMA_CQE_PKEY_MASK;
2687         ibwc->wc_flags = IB_WC_GRH;
2688         ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2689                                         OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2690         return status;
2691 }
2692
2693 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2694                                        struct ocrdma_cqe *cqe,
2695                                        struct ocrdma_qp *qp)
2696 {
2697         unsigned long flags;
2698         struct ocrdma_srq *srq;
2699         u32 wqe_idx;
2700
2701         srq = get_ocrdma_srq(qp->ibqp.srq);
2702         wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2703                 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2704         if (wqe_idx < 1)
2705                 BUG();
2706
2707         ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2708         spin_lock_irqsave(&srq->q_lock, flags);
2709         ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2710         spin_unlock_irqrestore(&srq->q_lock, flags);
2711         ocrdma_hwq_inc_tail(&srq->rq);
2712 }
2713
2714 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2715                                 struct ib_wc *ibwc, bool *polled, bool *stop,
2716                                 int status)
2717 {
2718         bool expand;
2719         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2720
2721         if (status < OCRDMA_MAX_CQE_ERR)
2722                 atomic_inc(&dev->cqe_err_stats[status]);
2723
2724         /* when hw_rq is empty, but wq is not empty, so continue
2725          * to keep the cqe to get the cq event again.
2726          */
2727         if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2728                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2729                         *polled = true;
2730                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2731                         expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2732                 } else {
2733                         *polled = false;
2734                         *stop = true;
2735                         expand = false;
2736                 }
2737         } else if (is_hw_rq_empty(qp)) {
2738                 /* Do nothing */
2739                 expand = false;
2740                 *polled = false;
2741                 *stop = false;
2742         } else {
2743                 *polled = true;
2744                 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2745         }
2746         return expand;
2747 }
2748
2749 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2750                                      struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2751 {
2752         ibwc->opcode = IB_WC_RECV;
2753         ibwc->qp = &qp->ibqp;
2754         ibwc->status = IB_WC_SUCCESS;
2755
2756         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2757                 ocrdma_update_ud_rcqe(ibwc, cqe);
2758         else
2759                 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2760
2761         if (is_cqe_imm(cqe)) {
2762                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2763                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2764         } else if (is_cqe_wr_imm(cqe)) {
2765                 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2766                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2767                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2768         } else if (is_cqe_invalidated(cqe)) {
2769                 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2770                 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2771         }
2772         if (qp->ibqp.srq) {
2773                 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2774         } else {
2775                 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2776                 ocrdma_hwq_inc_tail(&qp->rq);
2777         }
2778 }
2779
2780 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2781                              struct ib_wc *ibwc, bool *polled, bool *stop)
2782 {
2783         int status;
2784         bool expand = false;
2785
2786         ibwc->wc_flags = 0;
2787         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2788                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2789                                         OCRDMA_CQE_UD_STATUS_MASK) >>
2790                                         OCRDMA_CQE_UD_STATUS_SHIFT;
2791         } else {
2792                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2793                              OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2794         }
2795
2796         if (status == OCRDMA_CQE_SUCCESS) {
2797                 *polled = true;
2798                 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2799         } else {
2800                 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2801                                               status);
2802         }
2803         return expand;
2804 }
2805
2806 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2807                                    u16 cur_getp)
2808 {
2809         if (cq->phase_change) {
2810                 if (cur_getp == 0)
2811                         cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2812         } else {
2813                 /* clear valid bit */
2814                 cqe->flags_status_srcqpn = 0;
2815         }
2816 }
2817
2818 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2819                             struct ib_wc *ibwc)
2820 {
2821         u16 qpn = 0;
2822         int i = 0;
2823         bool expand = false;
2824         int polled_hw_cqes = 0;
2825         struct ocrdma_qp *qp = NULL;
2826         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2827         struct ocrdma_cqe *cqe;
2828         u16 cur_getp; bool polled = false; bool stop = false;
2829
2830         cur_getp = cq->getp;
2831         while (num_entries) {
2832                 cqe = cq->va + cur_getp;
2833                 /* check whether valid cqe or not */
2834                 if (!is_cqe_valid(cq, cqe))
2835                         break;
2836                 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2837                 /* ignore discarded cqe */
2838                 if (qpn == 0)
2839                         goto skip_cqe;
2840                 qp = dev->qp_tbl[qpn];
2841                 BUG_ON(qp == NULL);
2842
2843                 if (is_cqe_for_sq(cqe)) {
2844                         expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2845                                                   &stop);
2846                 } else {
2847                         expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2848                                                   &stop);
2849                 }
2850                 if (expand)
2851                         goto expand_cqe;
2852                 if (stop)
2853                         goto stop_cqe;
2854                 /* clear qpn to avoid duplicate processing by discard_cqe() */
2855                 cqe->cmn.qpn = 0;
2856 skip_cqe:
2857                 polled_hw_cqes += 1;
2858                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2859                 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2860 expand_cqe:
2861                 if (polled) {
2862                         num_entries -= 1;
2863                         i += 1;
2864                         ibwc = ibwc + 1;
2865                         polled = false;
2866                 }
2867         }
2868 stop_cqe:
2869         cq->getp = cur_getp;
2870         if (cq->deferred_arm) {
2871                 ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
2872                                   polled_hw_cqes);
2873                 cq->deferred_arm = false;
2874                 cq->deferred_sol = false;
2875         } else {
2876                 /* We need to pop the CQE. No need to arm */
2877                 ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2878                                   polled_hw_cqes);
2879                 cq->deferred_sol = false;
2880         }
2881
2882         return i;
2883 }
2884
2885 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2886 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2887                               struct ocrdma_qp *qp, struct ib_wc *ibwc)
2888 {
2889         int err_cqes = 0;
2890
2891         while (num_entries) {
2892                 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2893                         break;
2894                 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2895                         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2896                         ocrdma_hwq_inc_tail(&qp->sq);
2897                 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2898                         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2899                         ocrdma_hwq_inc_tail(&qp->rq);
2900                 } else {
2901                         return err_cqes;
2902                 }
2903                 ibwc->byte_len = 0;
2904                 ibwc->status = IB_WC_WR_FLUSH_ERR;
2905                 ibwc = ibwc + 1;
2906                 err_cqes += 1;
2907                 num_entries -= 1;
2908         }
2909         return err_cqes;
2910 }
2911
2912 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2913 {
2914         int cqes_to_poll = num_entries;
2915         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2916         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2917         int num_os_cqe = 0, err_cqes = 0;
2918         struct ocrdma_qp *qp;
2919         unsigned long flags;
2920
2921         /* poll cqes from adapter CQ */
2922         spin_lock_irqsave(&cq->cq_lock, flags);
2923         num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2924         spin_unlock_irqrestore(&cq->cq_lock, flags);
2925         cqes_to_poll -= num_os_cqe;
2926
2927         if (cqes_to_poll) {
2928                 wc = wc + num_os_cqe;
2929                 /* adapter returns single error cqe when qp moves to
2930                  * error state. So insert error cqes with wc_status as
2931                  * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2932                  * respectively which uses this CQ.
2933                  */
2934                 spin_lock_irqsave(&dev->flush_q_lock, flags);
2935                 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2936                         if (cqes_to_poll == 0)
2937                                 break;
2938                         err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2939                         cqes_to_poll -= err_cqes;
2940                         num_os_cqe += err_cqes;
2941                         wc = wc + err_cqes;
2942                 }
2943                 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2944         }
2945         return num_os_cqe;
2946 }
2947
2948 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2949 {
2950         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2951         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2952         u16 cq_id;
2953         unsigned long flags;
2954         bool arm_needed = false, sol_needed = false;
2955
2956         cq_id = cq->id;
2957
2958         spin_lock_irqsave(&cq->cq_lock, flags);
2959         if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2960                 arm_needed = true;
2961         if (cq_flags & IB_CQ_SOLICITED)
2962                 sol_needed = true;
2963
2964         if (cq->first_arm) {
2965                 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2966                 cq->first_arm = false;
2967         }
2968
2969         cq->deferred_arm = true;
2970         cq->deferred_sol = sol_needed;
2971         spin_unlock_irqrestore(&cq->cq_lock, flags);
2972
2973         return 0;
2974 }
2975
2976 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2977 {
2978         int status;
2979         struct ocrdma_mr *mr;
2980         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2981         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2982
2983         if (max_page_list_len > dev->attr.max_pages_per_frmr)
2984                 return ERR_PTR(-EINVAL);
2985
2986         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2987         if (!mr)
2988                 return ERR_PTR(-ENOMEM);
2989
2990         status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2991         if (status)
2992                 goto pbl_err;
2993         mr->hwmr.fr_mr = 1;
2994         mr->hwmr.remote_rd = 0;
2995         mr->hwmr.remote_wr = 0;
2996         mr->hwmr.local_rd = 0;
2997         mr->hwmr.local_wr = 0;
2998         mr->hwmr.mw_bind = 0;
2999         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3000         if (status)
3001                 goto pbl_err;
3002         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3003         if (status)
3004                 goto mbx_err;
3005         mr->ibmr.rkey = mr->hwmr.lkey;
3006         mr->ibmr.lkey = mr->hwmr.lkey;
3007         dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3008                 (unsigned long) mr;
3009         return &mr->ibmr;
3010 mbx_err:
3011         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3012 pbl_err:
3013         kfree(mr);
3014         return ERR_PTR(-ENOMEM);
3015 }
3016
3017 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
3018                                                           *ibdev,
3019                                                           int page_list_len)
3020 {
3021         struct ib_fast_reg_page_list *frmr_list;
3022         int size;
3023
3024         size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
3025         frmr_list = kzalloc(size, GFP_KERNEL);
3026         if (!frmr_list)
3027                 return ERR_PTR(-ENOMEM);
3028         frmr_list->page_list = (u64 *)(frmr_list + 1);
3029         return frmr_list;
3030 }
3031
3032 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
3033 {
3034         kfree(page_list);
3035 }
3036
3037 #define MAX_KERNEL_PBE_SIZE 65536
3038 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
3039                                     int buf_cnt, u32 *pbe_size)
3040 {
3041         u64 total_size = 0;
3042         u64 buf_size = 0;
3043         int i;
3044         *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
3045         *pbe_size = roundup_pow_of_two(*pbe_size);
3046
3047         /* find the smallest PBE size that we can have */
3048         for (i = 0; i < buf_cnt; i++) {
3049                 /* first addr may not be page aligned, so ignore checking */
3050                 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
3051                                  (buf_list[i].size & ~PAGE_MASK))) {
3052                         return 0;
3053                 }
3054
3055                 /* if configured PBE size is greater then the chosen one,
3056                  * reduce the PBE size.
3057                  */
3058                 buf_size = roundup(buf_list[i].size, PAGE_SIZE);
3059                 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
3060                 buf_size = roundup_pow_of_two(buf_size);
3061                 if (*pbe_size > buf_size)
3062                         *pbe_size = buf_size;
3063
3064                 total_size += buf_size;
3065         }
3066         *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
3067             (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
3068
3069         /* num_pbes = total_size / (*pbe_size);  this is implemented below. */
3070
3071         return total_size >> ilog2(*pbe_size);
3072 }
3073
3074 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
3075                               u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
3076                               struct ocrdma_hw_mr *hwmr)
3077 {
3078         int i;
3079         int idx;
3080         int pbes_per_buf = 0;
3081         u64 buf_addr = 0;
3082         int num_pbes;
3083         struct ocrdma_pbe *pbe;
3084         int total_num_pbes = 0;
3085
3086         if (!hwmr->num_pbes)
3087                 return;
3088
3089         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3090         num_pbes = 0;
3091
3092         /* go through the OS phy regions & fill hw pbe entries into pbls. */
3093         for (i = 0; i < ib_buf_cnt; i++) {
3094                 buf_addr = buf_list[i].addr;
3095                 pbes_per_buf =
3096                     roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
3097                     pbe_size;
3098                 hwmr->len += buf_list[i].size;
3099                 /* number of pbes can be more for one OS buf, when
3100                  * buffers are of different sizes.
3101                  * split the ib_buf to one or more pbes.
3102                  */
3103                 for (idx = 0; idx < pbes_per_buf; idx++) {
3104                         /* we program always page aligned addresses,
3105                          * first unaligned address is taken care by fbo.
3106                          */
3107                         if (i == 0) {
3108                                 /* for non zero fbo, assign the
3109                                  * start of the page.
3110                                  */
3111                                 pbe->pa_lo =
3112                                     cpu_to_le32((u32) (buf_addr & PAGE_MASK));
3113                                 pbe->pa_hi =
3114                                     cpu_to_le32((u32) upper_32_bits(buf_addr));
3115                         } else {
3116                                 pbe->pa_lo =
3117                                     cpu_to_le32((u32) (buf_addr & 0xffffffff));
3118                                 pbe->pa_hi =
3119                                     cpu_to_le32((u32) upper_32_bits(buf_addr));
3120                         }
3121                         buf_addr += pbe_size;
3122                         num_pbes += 1;
3123                         total_num_pbes += 1;
3124                         pbe++;
3125
3126                         if (total_num_pbes == hwmr->num_pbes)
3127                                 goto mr_tbl_done;
3128                         /* if the pbl is full storing the pbes,
3129                          * move to next pbl.
3130                          */
3131                         if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
3132                                 pbl_tbl++;
3133                                 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3134                                 num_pbes = 0;
3135                         }
3136                 }
3137         }
3138 mr_tbl_done:
3139         return;
3140 }
3141
3142 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3143                                    struct ib_phys_buf *buf_list,
3144                                    int buf_cnt, int acc, u64 *iova_start)
3145 {
3146         int status = -ENOMEM;
3147         struct ocrdma_mr *mr;
3148         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3149         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3150         u32 num_pbes;
3151         u32 pbe_size = 0;
3152
3153         if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3154                 return ERR_PTR(-EINVAL);
3155
3156         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3157         if (!mr)
3158                 return ERR_PTR(status);
3159
3160         num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3161         if (num_pbes == 0) {
3162                 status = -EINVAL;
3163                 goto pbl_err;
3164         }
3165         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3166         if (status)
3167                 goto pbl_err;
3168
3169         mr->hwmr.pbe_size = pbe_size;
3170         mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3171         mr->hwmr.va = *iova_start;
3172         mr->hwmr.local_rd = 1;
3173         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3174         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3175         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3176         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3177         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3178
3179         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3180         if (status)
3181                 goto pbl_err;
3182         build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3183                           &mr->hwmr);
3184         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3185         if (status)
3186                 goto mbx_err;
3187
3188         mr->ibmr.lkey = mr->hwmr.lkey;
3189         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3190                 mr->ibmr.rkey = mr->hwmr.lkey;
3191         return &mr->ibmr;
3192
3193 mbx_err:
3194         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3195 pbl_err:
3196         kfree(mr);
3197         return ERR_PTR(status);
3198 }